code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import time
from w1thermsensor import W1ThermSensor
from datetime import datetime
from datetime import timedelta
#création d'une string au format : jj/mm/aaaa hh:mm:ss
# que l'on renvoie
def recupDate() :
date = datetime.now()
strDate = date.strftime("%d/%m/%Y %H:%M:%S")
print(strDate)
return strDate
# Récupération de la température renvoyé par le thermomètre
# et renvoie de celle-ci
def recupTemp() :
thermometre = W1ThermSensor()
temperature = thermometre.get_temperature()
print(" %s" % temperature + "\n")
return temperature
# Écrit la date et la température dans le fichier
# prédéfini, après la dernière ligne du fichier
# ( il créé une nouvelle ligne à chaque fois )
def ecritFichier (strDate, temperature) :
fichier = open("temperatures.txt", "a+")
fichier.write(strDate + " %s" % temperature + '\n')
print("date écrite %s"% strDate)
fichier.close()
# Au lancement du programme, verifie le fichier texte pour comparer la
# différence entre la dernière date écrite et la date actuelle afin de completer
# les dates manquantes
# Récupère le numéro de la dernière ligne, et cette dernière ligne est placée dans un string
def verifNbDate() :
numLigne = 0
derniereLigne = "test"
fichier = open("temperatures.txt", "r+")
for ligne in fichier :
if ligne != "":
derniereLigne = ligne
numLigne+=1
fichier.close()
print("test : %s" % derniereLigne)
if derniereLigne != "" :
correctionFichier(derniereLigne,numLigne)
# Verifie qu'il manque des dates dans le fichier
# pour toutes les dates qu'il manque rajoute
# cette date et la température -300.0
def correctionFichier(strDate, numLigne) :
splitted = strDate.split(" ")
print(splitted[0] + " " + splitted[1])
derniereDate = datetime.strptime(splitted[0] + " " + splitted[1],'%d/%m/%Y %H:%M:%S')
now = datetime.now()
if derniereDate + timedelta(seconds=11) < now :
#la date est inférieur à la date actuel de plus de 10 secondes
temperatureInvalide = -300.0
while derniereDate + timedelta(seconds=10) < now :
derniereDate = derniereDate + timedelta(seconds=10)
derniereDatestr = derniereDate.strftime("%d/%m/%Y %H:%M:%S")
#ajout des dates manquantes
ecritFichier(derniereDatestr, temperatureInvalide)
# lance les différentes méthodes
verifNbDate()
while True:
strDate = recupDate()
temperature = recupTemp()
ecritFichier(strDate, temperature)
time.sleep(10) | Communication/Projet/src/projet/getTemperature.py | import time
from w1thermsensor import W1ThermSensor
from datetime import datetime
from datetime import timedelta
#création d'une string au format : jj/mm/aaaa hh:mm:ss
# que l'on renvoie
def recupDate() :
date = datetime.now()
strDate = date.strftime("%d/%m/%Y %H:%M:%S")
print(strDate)
return strDate
# Récupération de la température renvoyé par le thermomètre
# et renvoie de celle-ci
def recupTemp() :
thermometre = W1ThermSensor()
temperature = thermometre.get_temperature()
print(" %s" % temperature + "\n")
return temperature
# Écrit la date et la température dans le fichier
# prédéfini, après la dernière ligne du fichier
# ( il créé une nouvelle ligne à chaque fois )
def ecritFichier (strDate, temperature) :
fichier = open("temperatures.txt", "a+")
fichier.write(strDate + " %s" % temperature + '\n')
print("date écrite %s"% strDate)
fichier.close()
# Au lancement du programme, verifie le fichier texte pour comparer la
# différence entre la dernière date écrite et la date actuelle afin de completer
# les dates manquantes
# Récupère le numéro de la dernière ligne, et cette dernière ligne est placée dans un string
def verifNbDate() :
numLigne = 0
derniereLigne = "test"
fichier = open("temperatures.txt", "r+")
for ligne in fichier :
if ligne != "":
derniereLigne = ligne
numLigne+=1
fichier.close()
print("test : %s" % derniereLigne)
if derniereLigne != "" :
correctionFichier(derniereLigne,numLigne)
# Verifie qu'il manque des dates dans le fichier
# pour toutes les dates qu'il manque rajoute
# cette date et la température -300.0
def correctionFichier(strDate, numLigne) :
splitted = strDate.split(" ")
print(splitted[0] + " " + splitted[1])
derniereDate = datetime.strptime(splitted[0] + " " + splitted[1],'%d/%m/%Y %H:%M:%S')
now = datetime.now()
if derniereDate + timedelta(seconds=11) < now :
#la date est inférieur à la date actuel de plus de 10 secondes
temperatureInvalide = -300.0
while derniereDate + timedelta(seconds=10) < now :
derniereDate = derniereDate + timedelta(seconds=10)
derniereDatestr = derniereDate.strftime("%d/%m/%Y %H:%M:%S")
#ajout des dates manquantes
ecritFichier(derniereDatestr, temperatureInvalide)
# lance les différentes méthodes
verifNbDate()
while True:
strDate = recupDate()
temperature = recupTemp()
ecritFichier(strDate, temperature)
time.sleep(10) | 0.226099 | 0.247589 |
import sys
from types import ModuleType, new_class
import importlib
from importlib.abc import MetaPathFinder, Loader
import qsharp
from typing import Optional, Any, Dict
import logging
logger = logging.getLogger(__name__)
class QSharpModuleFinder(MetaPathFinder):
def find_module(self, full_name : str, path : Optional[str] = None) -> Loader:
# We expose Q# namespaces as their own root-level packages.
# E.g.:
# >>> import Microsoft.Quantum.Intrinsic as mqi
# Thus, we need to check if the full name is one that that we can
# sensibly load before we proceed.
# To check the full name, we ask the client rather than going through
# the public API for the qsharp package, so that we can check if the
# client is currently busy. This can happen if anything below us in
# meta_path needs to handle an import during an execute; this is the
# case when ZeroMQ needs to import additional functionality from a
# Cython module to handle a message.
# See https://github.com/Microsoft/QuantumLibraries/issues/69 for an
# example of this failure modality.
# If the client is busy, we'll want to forego this request to find a
# module and return None early.
if qsharp.client.busy:
return None
# At this point, we should be safe to rely on the public API again.
ops = qsharp.get_available_operations_by_namespace()
if full_name not in ops:
# We may have been given part of the qualified name of a namespace.
# E.g., if we try to import Microsoft.Quantum.Intrinsic, we'll
# see calls with "Microsoft" and "Microsoft.Quantum" first.
if not any(
ns_name.startswith(full_name + ".")
for ns_name in ops
):
return None
return QSharpModuleLoader()
class QSharpModuleLoader(Loader):
def load_module(self, full_name : str):
logger.debug(f"Trying to load {full_name} as a Q# namespace.")
if full_name in sys.modules:
return sys.modules[full_name]
module = QSharpModule(full_name, full_name, self)
# Register the new module.
sys.modules.setdefault(full_name, module)
return module
class QSharpCallable(object):
_name : str
def __init__(self, callable_name : str, source : str):
self._name = callable_name
self.source = source
def __repr__(self) -> str:
return f"<Q# callable {self._name}>"
def __call__(self, **kwargs) -> Any:
"""
Executes this function or operation on the QuantumSimulator target
machine, returning its output as a Python object.
"""
return self.simulate(**kwargs)
def simulate(self, **kwargs) -> Any:
"""
Executes this function or operation on the QuantumSimulator target
machine, returning its output as a Python object.
"""
return qsharp.client.simulate(self, **kwargs)
def toffoli_simulate(self, **kwargs) -> Any:
"""
Executes this function or operation on the ToffoliSimulator target
machine, returning its output as a Python object.
"""
return qsharp.client.toffoli_simulate(self, **kwargs)
def estimate_resources(self, **kwargs) -> Dict[str, int]:
return qsharp.client.estimate(self, **kwargs)
def trace(self, **kwargs) -> Any:
"""
Returns a structure representing the set of gates and qubits
used to execute this operation.
"""
return qsharp.client.trace(self, **kwargs)
class QSharpModule(ModuleType):
_qs_name : str
def __init__(self, full_name : str, qs_name : str, loader : QSharpModuleLoader):
super().__init__(full_name)
self._qs_name = qs_name
self.__file__ = f"qsharp:{qs_name}"
self.__path__ = []
self.__loader__ = loader
def __getattr__(self, name):
ops = qsharp.get_available_operations_by_namespace()
if name in ops[self._qs_name]:
op_cls = new_class(name, (QSharpCallable, ))
# Copy over metadata from the operation's header.
metadata = qsharp.client.get_operation_metadata(f"{self._qs_name}.{name}")
op_cls.__doc__ = metadata.get('documentation', '')
op_cls.__file__ = metadata.get('source', None)
return op_cls(f"{self._qs_name}.{name}", "workspace")
raise AttributeError(f"Q# namespace {self._qs_name} does not contain a callable {name}.")
def __repr__(self) -> str:
return f"<module '{self._qs_name}' (Q# namespace)>" | src/Python/qsharp-core/qsharp/loader.py |
import sys
from types import ModuleType, new_class
import importlib
from importlib.abc import MetaPathFinder, Loader
import qsharp
from typing import Optional, Any, Dict
import logging
logger = logging.getLogger(__name__)
class QSharpModuleFinder(MetaPathFinder):
def find_module(self, full_name : str, path : Optional[str] = None) -> Loader:
# We expose Q# namespaces as their own root-level packages.
# E.g.:
# >>> import Microsoft.Quantum.Intrinsic as mqi
# Thus, we need to check if the full name is one that that we can
# sensibly load before we proceed.
# To check the full name, we ask the client rather than going through
# the public API for the qsharp package, so that we can check if the
# client is currently busy. This can happen if anything below us in
# meta_path needs to handle an import during an execute; this is the
# case when ZeroMQ needs to import additional functionality from a
# Cython module to handle a message.
# See https://github.com/Microsoft/QuantumLibraries/issues/69 for an
# example of this failure modality.
# If the client is busy, we'll want to forego this request to find a
# module and return None early.
if qsharp.client.busy:
return None
# At this point, we should be safe to rely on the public API again.
ops = qsharp.get_available_operations_by_namespace()
if full_name not in ops:
# We may have been given part of the qualified name of a namespace.
# E.g., if we try to import Microsoft.Quantum.Intrinsic, we'll
# see calls with "Microsoft" and "Microsoft.Quantum" first.
if not any(
ns_name.startswith(full_name + ".")
for ns_name in ops
):
return None
return QSharpModuleLoader()
class QSharpModuleLoader(Loader):
def load_module(self, full_name : str):
logger.debug(f"Trying to load {full_name} as a Q# namespace.")
if full_name in sys.modules:
return sys.modules[full_name]
module = QSharpModule(full_name, full_name, self)
# Register the new module.
sys.modules.setdefault(full_name, module)
return module
class QSharpCallable(object):
_name : str
def __init__(self, callable_name : str, source : str):
self._name = callable_name
self.source = source
def __repr__(self) -> str:
return f"<Q# callable {self._name}>"
def __call__(self, **kwargs) -> Any:
"""
Executes this function or operation on the QuantumSimulator target
machine, returning its output as a Python object.
"""
return self.simulate(**kwargs)
def simulate(self, **kwargs) -> Any:
"""
Executes this function or operation on the QuantumSimulator target
machine, returning its output as a Python object.
"""
return qsharp.client.simulate(self, **kwargs)
def toffoli_simulate(self, **kwargs) -> Any:
"""
Executes this function or operation on the ToffoliSimulator target
machine, returning its output as a Python object.
"""
return qsharp.client.toffoli_simulate(self, **kwargs)
def estimate_resources(self, **kwargs) -> Dict[str, int]:
return qsharp.client.estimate(self, **kwargs)
def trace(self, **kwargs) -> Any:
"""
Returns a structure representing the set of gates and qubits
used to execute this operation.
"""
return qsharp.client.trace(self, **kwargs)
class QSharpModule(ModuleType):
_qs_name : str
def __init__(self, full_name : str, qs_name : str, loader : QSharpModuleLoader):
super().__init__(full_name)
self._qs_name = qs_name
self.__file__ = f"qsharp:{qs_name}"
self.__path__ = []
self.__loader__ = loader
def __getattr__(self, name):
ops = qsharp.get_available_operations_by_namespace()
if name in ops[self._qs_name]:
op_cls = new_class(name, (QSharpCallable, ))
# Copy over metadata from the operation's header.
metadata = qsharp.client.get_operation_metadata(f"{self._qs_name}.{name}")
op_cls.__doc__ = metadata.get('documentation', '')
op_cls.__file__ = metadata.get('source', None)
return op_cls(f"{self._qs_name}.{name}", "workspace")
raise AttributeError(f"Q# namespace {self._qs_name} does not contain a callable {name}.")
def __repr__(self) -> str:
return f"<module '{self._qs_name}' (Q# namespace)>" | 0.688678 | 0.274124 |
import sys
import os
import multiprocessing
import subprocess
import itertools
import tempfile
import argparse
import pandas as pd
from Bio import SeqIO
def run_spbuild(seq, tempdir):
"""
Run SPBuild
"""
node_name = multiprocessing.current_process().name
print(f'Processing {seq.id} on {node_name}', file=sys.stderr)
# Write required Fasta
fasta_path = f'{tempdir}/{node_name}.fa'
SeqIO.write(seq, fasta_path, 'fasta')
# Run SPBuild and cleanup
mtx_path = f'{tempdir}/{node_name}.mtx'
spbuild = subprocess.run(['spbuild', '-i', fasta_path, '-m', mtx_path])
if not spbuild.returncode == 0:
print(f'Error processing {seq.id}:', spbuild.std, sep='\n', file=sys.stderr)
return None
# Process Output
mtx = pd.read_csv(mtx_path, skiprows=2, sep='\s+')
mtx = mtx.reset_index().rename(columns={'level_0': 'position', 'level_1': 'wt'})
mtx['protein'] = seq.id
cols = mtx.columns.to_list()
mtx = mtx[['protein'] + cols[:-1]]
os.remove(fasta_path)
os.remove(mtx_path)
return mtx
def main(args):
"""
Run SPBuild on input Fasta and format into a single output file
"""
seqs = SeqIO.parse(args.fasta, 'fasta')
with multiprocessing.Pool(processes=args.processes) as pool,\
tempfile.TemporaryDirectory(dir=args.temp) as tempdir:
profiles = pool.starmap(run_spbuild, zip(seqs, itertools.cycle([tempdir])))
profiles = pd.concat(profiles, axis=0)
profiles.to_csv(sys.stdout, sep='\t', index=False)
def parse_args():
"""Process arguments"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('fasta', metavar='F', help="Input Fasta")
parser.add_argument('--processes', '-p', default=1, type=int,
help="Number of processes available")
parser.add_argument('--temp', '-t', default='.', type=str,
help="Root location for tempory storage")
return parser.parse_args()
if __name__ == "__main__":
main(parse_args()) | bin/predict_spbuild.py | import sys
import os
import multiprocessing
import subprocess
import itertools
import tempfile
import argparse
import pandas as pd
from Bio import SeqIO
def run_spbuild(seq, tempdir):
"""
Run SPBuild
"""
node_name = multiprocessing.current_process().name
print(f'Processing {seq.id} on {node_name}', file=sys.stderr)
# Write required Fasta
fasta_path = f'{tempdir}/{node_name}.fa'
SeqIO.write(seq, fasta_path, 'fasta')
# Run SPBuild and cleanup
mtx_path = f'{tempdir}/{node_name}.mtx'
spbuild = subprocess.run(['spbuild', '-i', fasta_path, '-m', mtx_path])
if not spbuild.returncode == 0:
print(f'Error processing {seq.id}:', spbuild.std, sep='\n', file=sys.stderr)
return None
# Process Output
mtx = pd.read_csv(mtx_path, skiprows=2, sep='\s+')
mtx = mtx.reset_index().rename(columns={'level_0': 'position', 'level_1': 'wt'})
mtx['protein'] = seq.id
cols = mtx.columns.to_list()
mtx = mtx[['protein'] + cols[:-1]]
os.remove(fasta_path)
os.remove(mtx_path)
return mtx
def main(args):
"""
Run SPBuild on input Fasta and format into a single output file
"""
seqs = SeqIO.parse(args.fasta, 'fasta')
with multiprocessing.Pool(processes=args.processes) as pool,\
tempfile.TemporaryDirectory(dir=args.temp) as tempdir:
profiles = pool.starmap(run_spbuild, zip(seqs, itertools.cycle([tempdir])))
profiles = pd.concat(profiles, axis=0)
profiles.to_csv(sys.stdout, sep='\t', index=False)
def parse_args():
"""Process arguments"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('fasta', metavar='F', help="Input Fasta")
parser.add_argument('--processes', '-p', default=1, type=int,
help="Number of processes available")
parser.add_argument('--temp', '-t', default='.', type=str,
help="Root location for tempory storage")
return parser.parse_args()
if __name__ == "__main__":
main(parse_args()) | 0.2763 | 0.122839 |
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
from otp.level import DistributedLevelAI, LevelSpec
from otp.level import LevelSpec
from toontown.coghq import CountryClubRoomBase, LevelSuitPlannerAI
from toontown.coghq import DistributedCountryClubBattleAI
from toontown.coghq import FactoryEntityCreatorAI, CountryClubRoomSpecs
from toontown.suit import DistributedMintSuitAI
from toontown.toonbase import ToontownGlobals, ToontownBattleGlobals
class DistributedCountryClubRoomAI(DistributedLevelAI.DistributedLevelAI, CountryClubRoomBase.CountryClubRoomBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCountryClubRoomAI')
def __init__(self, air, countryClubId, countryClubDoId, zoneId, roomId, roomNum, avIds, battleExpAggreg):
DistributedLevelAI.DistributedLevelAI.__init__(self, air, zoneId, 0, avIds)
CountryClubRoomBase.CountryClubRoomBase.__init__(self)
self.setCountryClubId(countryClubId)
self.countryClubId = countryClubId
self.setRoomId(roomId)
self.roomNum = roomNum
self.countryClubDoId = countryClubDoId
self.battleExpAggreg = battleExpAggreg
def createEntityCreator(self):
return FactoryEntityCreatorAI.FactoryEntityCreatorAI(level = self)
def getBattleCreditMultiplier(self):
return ToontownBattleGlobals.getCountryClubCreditMultiplier(self.countryClubId)
def generate(self):
self.notify.debug('generate %s: room=%s' % (self.doId, self.roomId))
self.notify.debug('loading spec')
specModule = CountryClubRoomSpecs.getCountryClubRoomSpecModule(self.roomId)
roomSpec = LevelSpec.LevelSpec(specModule)
if __dev__:
self.notify.debug('creating entity type registry')
typeReg = self.getCountryClubEntityTypeReg()
roomSpec.setEntityTypeReg(typeReg)
self.notify.debug('creating entities')
DistributedLevelAI.DistributedLevelAI.generate(self, roomSpec)
self.notify.debug('creating cogs')
cogSpecModule = CountryClubRoomSpecs.getCogSpecModule(self.roomId)
self.planner = LevelSuitPlannerAI.LevelSuitPlannerAI(self.air, self, DistributedMintSuitAI.DistributedMintSuitAI, DistributedCountryClubBattleAI.DistributedCountryClubBattleAI, cogSpecModule.CogData, cogSpecModule.ReserveCogData, cogSpecModule.BattleCells, battleExpAggreg = self.battleExpAggreg)
suitHandles = self.planner.genSuits()
messenger.send('plannerCreated-' + str(self.doId))
self.suits = suitHandles['activeSuits']
self.reserveSuits = suitHandles['reserveSuits']
self.d_setSuits()
self.notify.debug('finish mint room %s %s creation' % (self.roomId, self.doId))
def delete(self):
self.notify.debug('delete: %s' % self.doId)
suits = self.suits
for reserve in self.reserveSuits:
suits.append(reserve[0])
self.planner.destroy()
del self.planner
for suit in suits:
if not suit.isDeleted():
suit.factoryIsGoingDown()
suit.requestDelete()
del self.battleExpAggreg
DistributedLevelAI.DistributedLevelAI.delete(self, deAllocZone = False)
def getCountryClubId(self):
return self.countryClubId
def getRoomId(self):
return self.roomId
def getRoomNum(self):
return self.roomNum
def getCogLevel(self):
return self.cogLevel
def d_setSuits(self):
self.sendUpdate('setSuits', [self.getSuits(), self.getReserveSuits()])
def getSuits(self):
suitIds = []
for suit in self.suits:
suitIds.append(suit.doId)
return suitIds
def getReserveSuits(self):
suitIds = []
for suit in self.reserveSuits:
suitIds.append(suit[0].doId)
return suitIds
def d_setBossConfronted(self, toonId):
if toonId not in self.avIdList:
self.notify.warning('d_setBossConfronted: %s not in list of participants' % toonId)
return None
self.sendUpdate('setBossConfronted', [toonId])
def setVictors(self, victorIds):
activeVictors = []
activeVictorIds = []
for victorId in victorIds:
toon = self.air.doId2do.get(victorId)
if toon is not None:
activeVictors.append(toon)
activeVictorIds.append(victorId)
description = '%s|%s' % (self.countryClubId, activeVictorIds)
for avId in activeVictorIds:
self.air.writeServerEvent('mintDefeated', avId, description)
for toon in activeVictors:
simbase.air.questManager.toonDefeatedCountryClub(toon, self.countryClubId, activeVictors)
def b_setDefeated(self):
self.d_setDefeated()
self.setDefeated()
def d_setDefeated(self):
self.sendUpdate('setDefeated')
def setDefeated(self):
pass
def allToonsGone(self, toonsThatCleared):
DistributedLevelAI.DistributedLevelAI.allToonsGone(self, toonsThatCleared)
if self.roomNum == 0:
mint = simbase.air.doId2do.get(self.countryClubDoId)
if mint is not None:
mint.allToonsGone()
else:
self.notify.warning('no mint %s in allToonsGone' % self.countryClubDoId)
def challengeDefeated(self):
countryClub = simbase.air.doId2do.get(self.countryClubDoId)
if countryClub:
countryClub.roomDefeated(self) | toontown/coghq/DistributedCountryClubRoomAI.py | from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
from otp.level import DistributedLevelAI, LevelSpec
from otp.level import LevelSpec
from toontown.coghq import CountryClubRoomBase, LevelSuitPlannerAI
from toontown.coghq import DistributedCountryClubBattleAI
from toontown.coghq import FactoryEntityCreatorAI, CountryClubRoomSpecs
from toontown.suit import DistributedMintSuitAI
from toontown.toonbase import ToontownGlobals, ToontownBattleGlobals
class DistributedCountryClubRoomAI(DistributedLevelAI.DistributedLevelAI, CountryClubRoomBase.CountryClubRoomBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCountryClubRoomAI')
def __init__(self, air, countryClubId, countryClubDoId, zoneId, roomId, roomNum, avIds, battleExpAggreg):
DistributedLevelAI.DistributedLevelAI.__init__(self, air, zoneId, 0, avIds)
CountryClubRoomBase.CountryClubRoomBase.__init__(self)
self.setCountryClubId(countryClubId)
self.countryClubId = countryClubId
self.setRoomId(roomId)
self.roomNum = roomNum
self.countryClubDoId = countryClubDoId
self.battleExpAggreg = battleExpAggreg
def createEntityCreator(self):
return FactoryEntityCreatorAI.FactoryEntityCreatorAI(level = self)
def getBattleCreditMultiplier(self):
return ToontownBattleGlobals.getCountryClubCreditMultiplier(self.countryClubId)
def generate(self):
self.notify.debug('generate %s: room=%s' % (self.doId, self.roomId))
self.notify.debug('loading spec')
specModule = CountryClubRoomSpecs.getCountryClubRoomSpecModule(self.roomId)
roomSpec = LevelSpec.LevelSpec(specModule)
if __dev__:
self.notify.debug('creating entity type registry')
typeReg = self.getCountryClubEntityTypeReg()
roomSpec.setEntityTypeReg(typeReg)
self.notify.debug('creating entities')
DistributedLevelAI.DistributedLevelAI.generate(self, roomSpec)
self.notify.debug('creating cogs')
cogSpecModule = CountryClubRoomSpecs.getCogSpecModule(self.roomId)
self.planner = LevelSuitPlannerAI.LevelSuitPlannerAI(self.air, self, DistributedMintSuitAI.DistributedMintSuitAI, DistributedCountryClubBattleAI.DistributedCountryClubBattleAI, cogSpecModule.CogData, cogSpecModule.ReserveCogData, cogSpecModule.BattleCells, battleExpAggreg = self.battleExpAggreg)
suitHandles = self.planner.genSuits()
messenger.send('plannerCreated-' + str(self.doId))
self.suits = suitHandles['activeSuits']
self.reserveSuits = suitHandles['reserveSuits']
self.d_setSuits()
self.notify.debug('finish mint room %s %s creation' % (self.roomId, self.doId))
def delete(self):
self.notify.debug('delete: %s' % self.doId)
suits = self.suits
for reserve in self.reserveSuits:
suits.append(reserve[0])
self.planner.destroy()
del self.planner
for suit in suits:
if not suit.isDeleted():
suit.factoryIsGoingDown()
suit.requestDelete()
del self.battleExpAggreg
DistributedLevelAI.DistributedLevelAI.delete(self, deAllocZone = False)
def getCountryClubId(self):
return self.countryClubId
def getRoomId(self):
return self.roomId
def getRoomNum(self):
return self.roomNum
def getCogLevel(self):
return self.cogLevel
def d_setSuits(self):
self.sendUpdate('setSuits', [self.getSuits(), self.getReserveSuits()])
def getSuits(self):
suitIds = []
for suit in self.suits:
suitIds.append(suit.doId)
return suitIds
def getReserveSuits(self):
suitIds = []
for suit in self.reserveSuits:
suitIds.append(suit[0].doId)
return suitIds
def d_setBossConfronted(self, toonId):
if toonId not in self.avIdList:
self.notify.warning('d_setBossConfronted: %s not in list of participants' % toonId)
return None
self.sendUpdate('setBossConfronted', [toonId])
def setVictors(self, victorIds):
activeVictors = []
activeVictorIds = []
for victorId in victorIds:
toon = self.air.doId2do.get(victorId)
if toon is not None:
activeVictors.append(toon)
activeVictorIds.append(victorId)
description = '%s|%s' % (self.countryClubId, activeVictorIds)
for avId in activeVictorIds:
self.air.writeServerEvent('mintDefeated', avId, description)
for toon in activeVictors:
simbase.air.questManager.toonDefeatedCountryClub(toon, self.countryClubId, activeVictors)
def b_setDefeated(self):
self.d_setDefeated()
self.setDefeated()
def d_setDefeated(self):
self.sendUpdate('setDefeated')
def setDefeated(self):
pass
def allToonsGone(self, toonsThatCleared):
DistributedLevelAI.DistributedLevelAI.allToonsGone(self, toonsThatCleared)
if self.roomNum == 0:
mint = simbase.air.doId2do.get(self.countryClubDoId)
if mint is not None:
mint.allToonsGone()
else:
self.notify.warning('no mint %s in allToonsGone' % self.countryClubDoId)
def challengeDefeated(self):
countryClub = simbase.air.doId2do.get(self.countryClubDoId)
if countryClub:
countryClub.roomDefeated(self) | 0.413596 | 0.087097 |
from test import helpers
from passlib.hash import bcrypt
from flaskeddit import db
from flaskeddit.models import AppUser, Community, Post, PostVote
class TestPost:
def test_get_post(self, test_client):
"""
Test GET request to the /community/_/post/_ route to assert the community's
post page is displayed.
"""
app_user = AppUser(username="mockusername", password="<PASSWORD>")
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
post = Post(
title="mockposttitle",
post="mockpost",
app_user=app_user,
community=community,
)
db.session.add(app_user)
db.session.add(community)
db.session.add(post)
db.session.commit()
response = test_client.get(f"/community/{community.name}/post/{post.title}")
assert response is not None
assert response.status_code == 200
assert bytes(post.title, "utf-8") in response.data
def test_get_top_post(self, test_client):
"""
Test GET request to the /community/_/post/_/top route to assert the community's
post page is displayed.
"""
app_user = AppUser(username="mockusername", password="<PASSWORD>")
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
post = Post(
title="mockposttitle",
post="mockpost",
app_user=app_user,
community=community,
)
db.session.add(app_user)
db.session.add(community)
db.session.add(post)
db.session.commit()
response = test_client.get(f"/community/{community.name}/post/{post.title}/top")
assert response is not None
assert response.status_code == 200
assert bytes(post.title, "utf-8") in response.data
def test_get_create_post(self, test_client):
"""
Test GET request to the /community/_/post/create route to assert the post
creation page is displayed.
"""
password = "<PASSWORD>!"
hashed_password = <PASSWORD>.hash(password)
app_user = AppUser(username="mockusername", password=<PASSWORD>_password)
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
db.session.add(app_user)
db.session.add(community)
db.session.commit()
helpers.login(test_client, app_user.username, password)
response = test_client.get(f"/community/{community.name}/post/create")
assert response is not None
assert response.status_code == 200
assert b"Create Post" in response.data
def test_post_create_post(self, test_client):
"""
Test POST request to the /community/_/post/create route to assert the post is
created successfully.
"""
password = "<PASSWORD>!"
hashed_password = <PASSWORD>(password)
app_user = AppUser(username="mockusername", password=<PASSWORD>_password)
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
db.session.add(app_user)
db.session.add(community)
db.session.commit()
helpers.login(test_client, app_user.username, password)
response = test_client.post(
f"/community/{community.name}/post/create",
data={"title": "mockposttitle", "post": "mockpost"},
follow_redirects=True,
)
assert response is not None
assert response.status_code == 200
assert b"Successfully created post" in response.data
def test_get_update_post(self, test_client):
"""
Test GET request to the /community/_/post/_/update route to assert the post
update page is displayed.
"""
password = "<PASSWORD>!"
hashed_password = <PASSWORD>(password)
app_user = AppUser(username="mockusername", password=<PASSWORD>)
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
post = Post(
title="mockposttitle",
post="mockpost",
app_user=app_user,
community=community,
)
db.session.add(app_user)
db.session.add(community)
db.session.add(post)
db.session.commit()
helpers.login(test_client, app_user.username, password)
response = test_client.get(
f"/community/{community.name}/post/{post.title}/update"
)
assert response is not None
assert response.status_code == 200
assert b"Update Post" in response.data
def test_post_update_post(self, test_client):
"""
Test POST request to the /community/_/post/_/update route to assert the post is
updated successfully.
"""
password = "<PASSWORD>!"
hashed_password = <PASSWORD>(password)
app_user = AppUser(username="mockusername", password=<PASSWORD>)
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
post = Post(
title="mockposttitle",
post="mockpost",
app_user=app_user,
community=community,
)
db.session.add(app_user)
db.session.add(community)
db.session.add(post)
db.session.commit()
helpers.login(test_client, app_user.username, password)
response = test_client.post(
f"/community/{community.name}/post/{post.title}/update",
data={"post": "mockupdatedpost"},
follow_redirects=True,
)
assert response is not None
assert response.status_code == 200
assert b"Successfully updated post" in response.data
def test_post_delete_post(self, test_client):
"""
Test POST request to the /community/_/post/_/delete route to assert the post is
deleted successfully.
"""
password = "<PASSWORD>!"
hashed_password = <PASSWORD>(password)
app_user = AppUser(username="mockusername", password=<PASSWORD>)
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
post = Post(
title="mockposttitle",
post="mockpost",
app_user=app_user,
community=community,
)
db.session.add(app_user)
db.session.add(community)
db.session.add(post)
db.session.commit()
helpers.login(test_client, app_user.username, password)
response = test_client.post(
f"/community/{community.name}/post/{post.title}/delete",
follow_redirects=True,
)
assert response is not None
assert response.status_code == 200
assert b"Successfully deleted post" in response.data
def test_post_upvote_post(self, test_client):
"""
Test POST request to the /community/_/post/_/upvote route to assert the user
successfully upvotes the post.
"""
password = "<PASSWORD>!"
hashed_password = <PASSWORD>(password)
app_user = AppUser(username="mockusername", password=<PASSWORD>)
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
post = Post(
title="mockposttitle",
post="mockpost",
app_user=app_user,
community=community,
)
db.session.add(app_user)
db.session.add(community)
db.session.add(post)
db.session.commit()
helpers.login(test_client, app_user.username, password)
response = test_client.post(
f"/community/{community.name}/post/{post.title}/upvote"
)
assert response is not None
assert response.status_code == 302
post_vote = PostVote.query.filter_by(
user_id=app_user.id, post_id=post.id
).first()
assert post_vote is not None
assert post_vote.vote == 1
def test_post_downvote_post(self, test_client):
"""
Test POST request to the /community/_/post/_/downvote route to assert the user
successfully downvotes the post.
"""
password = "<PASSWORD>!"
hashed_password = bcrypt.hash(password)
app_user = AppUser(username="mockusername", password=<PASSWORD>)
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
post = Post(
title="mockposttitle",
post="mockpost",
app_user=app_user,
community=community,
)
db.session.add(app_user)
db.session.add(community)
db.session.add(post)
db.session.commit()
helpers.login(test_client, app_user.username, password)
response = test_client.post(
f"/community/{community.name}/post/{post.title}/downvote"
)
assert response is not None
assert response.status_code == 302
post_vote = PostVote.query.filter_by(
user_id=app_user.id, post_id=post.id
).first()
assert post_vote is not None
assert post_vote.vote == -1 | test/test_post.py | from test import helpers
from passlib.hash import bcrypt
from flaskeddit import db
from flaskeddit.models import AppUser, Community, Post, PostVote
class TestPost:
def test_get_post(self, test_client):
"""
Test GET request to the /community/_/post/_ route to assert the community's
post page is displayed.
"""
app_user = AppUser(username="mockusername", password="<PASSWORD>")
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
post = Post(
title="mockposttitle",
post="mockpost",
app_user=app_user,
community=community,
)
db.session.add(app_user)
db.session.add(community)
db.session.add(post)
db.session.commit()
response = test_client.get(f"/community/{community.name}/post/{post.title}")
assert response is not None
assert response.status_code == 200
assert bytes(post.title, "utf-8") in response.data
def test_get_top_post(self, test_client):
"""
Test GET request to the /community/_/post/_/top route to assert the community's
post page is displayed.
"""
app_user = AppUser(username="mockusername", password="<PASSWORD>")
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
post = Post(
title="mockposttitle",
post="mockpost",
app_user=app_user,
community=community,
)
db.session.add(app_user)
db.session.add(community)
db.session.add(post)
db.session.commit()
response = test_client.get(f"/community/{community.name}/post/{post.title}/top")
assert response is not None
assert response.status_code == 200
assert bytes(post.title, "utf-8") in response.data
def test_get_create_post(self, test_client):
"""
Test GET request to the /community/_/post/create route to assert the post
creation page is displayed.
"""
password = "<PASSWORD>!"
hashed_password = <PASSWORD>.hash(password)
app_user = AppUser(username="mockusername", password=<PASSWORD>_password)
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
db.session.add(app_user)
db.session.add(community)
db.session.commit()
helpers.login(test_client, app_user.username, password)
response = test_client.get(f"/community/{community.name}/post/create")
assert response is not None
assert response.status_code == 200
assert b"Create Post" in response.data
def test_post_create_post(self, test_client):
"""
Test POST request to the /community/_/post/create route to assert the post is
created successfully.
"""
password = "<PASSWORD>!"
hashed_password = <PASSWORD>(password)
app_user = AppUser(username="mockusername", password=<PASSWORD>_password)
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
db.session.add(app_user)
db.session.add(community)
db.session.commit()
helpers.login(test_client, app_user.username, password)
response = test_client.post(
f"/community/{community.name}/post/create",
data={"title": "mockposttitle", "post": "mockpost"},
follow_redirects=True,
)
assert response is not None
assert response.status_code == 200
assert b"Successfully created post" in response.data
def test_get_update_post(self, test_client):
"""
Test GET request to the /community/_/post/_/update route to assert the post
update page is displayed.
"""
password = "<PASSWORD>!"
hashed_password = <PASSWORD>(password)
app_user = AppUser(username="mockusername", password=<PASSWORD>)
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
post = Post(
title="mockposttitle",
post="mockpost",
app_user=app_user,
community=community,
)
db.session.add(app_user)
db.session.add(community)
db.session.add(post)
db.session.commit()
helpers.login(test_client, app_user.username, password)
response = test_client.get(
f"/community/{community.name}/post/{post.title}/update"
)
assert response is not None
assert response.status_code == 200
assert b"Update Post" in response.data
def test_post_update_post(self, test_client):
"""
Test POST request to the /community/_/post/_/update route to assert the post is
updated successfully.
"""
password = "<PASSWORD>!"
hashed_password = <PASSWORD>(password)
app_user = AppUser(username="mockusername", password=<PASSWORD>)
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
post = Post(
title="mockposttitle",
post="mockpost",
app_user=app_user,
community=community,
)
db.session.add(app_user)
db.session.add(community)
db.session.add(post)
db.session.commit()
helpers.login(test_client, app_user.username, password)
response = test_client.post(
f"/community/{community.name}/post/{post.title}/update",
data={"post": "mockupdatedpost"},
follow_redirects=True,
)
assert response is not None
assert response.status_code == 200
assert b"Successfully updated post" in response.data
def test_post_delete_post(self, test_client):
"""
Test POST request to the /community/_/post/_/delete route to assert the post is
deleted successfully.
"""
password = "<PASSWORD>!"
hashed_password = <PASSWORD>(password)
app_user = AppUser(username="mockusername", password=<PASSWORD>)
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
post = Post(
title="mockposttitle",
post="mockpost",
app_user=app_user,
community=community,
)
db.session.add(app_user)
db.session.add(community)
db.session.add(post)
db.session.commit()
helpers.login(test_client, app_user.username, password)
response = test_client.post(
f"/community/{community.name}/post/{post.title}/delete",
follow_redirects=True,
)
assert response is not None
assert response.status_code == 200
assert b"Successfully deleted post" in response.data
def test_post_upvote_post(self, test_client):
"""
Test POST request to the /community/_/post/_/upvote route to assert the user
successfully upvotes the post.
"""
password = "<PASSWORD>!"
hashed_password = <PASSWORD>(password)
app_user = AppUser(username="mockusername", password=<PASSWORD>)
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
post = Post(
title="mockposttitle",
post="mockpost",
app_user=app_user,
community=community,
)
db.session.add(app_user)
db.session.add(community)
db.session.add(post)
db.session.commit()
helpers.login(test_client, app_user.username, password)
response = test_client.post(
f"/community/{community.name}/post/{post.title}/upvote"
)
assert response is not None
assert response.status_code == 302
post_vote = PostVote.query.filter_by(
user_id=app_user.id, post_id=post.id
).first()
assert post_vote is not None
assert post_vote.vote == 1
def test_post_downvote_post(self, test_client):
"""
Test POST request to the /community/_/post/_/downvote route to assert the user
successfully downvotes the post.
"""
password = "<PASSWORD>!"
hashed_password = bcrypt.hash(password)
app_user = AppUser(username="mockusername", password=<PASSWORD>)
community = Community(
name="mockcommunity", description="mockdescription", app_user=app_user
)
post = Post(
title="mockposttitle",
post="mockpost",
app_user=app_user,
community=community,
)
db.session.add(app_user)
db.session.add(community)
db.session.add(post)
db.session.commit()
helpers.login(test_client, app_user.username, password)
response = test_client.post(
f"/community/{community.name}/post/{post.title}/downvote"
)
assert response is not None
assert response.status_code == 302
post_vote = PostVote.query.filter_by(
user_id=app_user.id, post_id=post.id
).first()
assert post_vote is not None
assert post_vote.vote == -1 | 0.658966 | 0.174656 |
import pytest
import os
from bigdl.dllib.utils.common import *
from bigdl.dllib.feature.transform.vision.image import *
import tempfile
class TestLayer():
def setup_method(self, method):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
sparkConf = create_spark_conf().setMaster("local[4]").setAppName("test model")
self.sc = get_spark_context(sparkConf)
init_engine()
resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
self.image_path = os.path.join(resource_path, "pascal/000025.jpg")
def teardown_method(self, method):
""" teardown any state that was previously setup with a setup_method
call.
"""
self.sc.stop()
def test_get_sample(self):
image_frame = ImageFrame.read(self.image_path)
transformer = Pipeline([PixelBytesToMat(), Resize(256, 256), CenterCrop(224, 224),
ChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224, 0.225),
MatToTensor(), ImageFrameToSample()])
transformed = transformer(image_frame)
transformed.get_sample()
def transformer_test(self, transformer):
image_frame = ImageFrame.read(self.image_path)
transformed = transformer(image_frame)
transformed.get_image()
image_frame = ImageFrame.read(self.image_path, self.sc)
transformed = transformer(image_frame)
images = transformed.get_image()
images.count()
def test_get_image(self):
image_frame = ImageFrame.read(self.image_path)
image_frame.get_image()
def test_get_label(self):
image_frame = ImageFrame.read(self.image_path)
image_frame.get_label()
def test_is_local(self):
image_frame = ImageFrame.read(self.image_path)
assert image_frame.is_local() is True
image_frame = ImageFrame.read(self.image_path, self.sc)
assert image_frame.is_local() is False
def test_is_distributed(self):
image_frame = ImageFrame.read(self.image_path)
assert image_frame.is_distributed() is False
image_frame = ImageFrame.read(self.image_path, self.sc)
assert image_frame.is_distributed() is True
def test_hflip(self):
transformer = HFlip()
self.transformer_test(transformer)
def test_colorjitter(self):
color = ColorJitter(random_order_prob=1.0, shuffle=True)
self.transformer_test(color)
def test_resize(self):
resize = Resize(200, 200, 1)
self.transformer_test(resize)
def test_brightness(self):
brightness = Brightness(0.0, 32.0)
self.transformer_test(brightness)
def test_channel_order(self):
transformer = ChannelOrder()
self.transformer_test(transformer)
def test_aspect_scale(self):
transformer = AspectScale(300)
self.transformer_test(transformer)
def test_random_aspect_scale(self):
transformer = RandomAspectScale([300, 400])
self.transformer_test(transformer)
def test_contrast(self):
transformer = Contrast(0.5, 1.5)
self.transformer_test(transformer)
def test_saturation(self):
transformer = Saturation(0.5, 1.5)
self.transformer_test(transformer)
def test_hue(self):
transformer = Hue(0.5, 1.5)
self.transformer_test(transformer)
def test_channel_normalize(self):
transformer = ChannelNormalize(100.0, 200.0, 300.0, 2.0, 2.0, 2.0)
self.transformer_test(transformer)
def test_pixel_normalize(self):
means = [2.0] * 3 * 500 * 375
transformer = PixelNormalize(means)
self.transformer_test(transformer)
def test_fixed_crop_norm(self):
crop = FixedCrop(0.0, 0.0, 0.5, 1.0)
self.transformer_test(crop)
def test_fixed_crop_unnorm(self):
crop = FixedCrop(0.0, 0.0, 200.0, 200., False)
self.transformer_test(crop)
def test_center_crop(self):
crop = CenterCrop(200, 200)
self.transformer_test(crop)
def test_random_crop(self):
crop = RandomCrop(200, 200)
self.transformer_test(crop)
def test_filler(self):
filler = Filler(0.0, 0.0, 0.1, 0.2)
self.transformer_test(filler)
def test_expand(self):
expand = Expand(means_r=123, means_g=117, means_b=104,
max_expand_ratio=2.0)
self.transformer_test(expand)
def test_fix_expand(self):
expand = FixExpand(1000, 1000)
self.transformer_test(expand)
def test_random_transformer(self):
transformer = RandomTransformer(HFlip(), 0.5)
self.transformer_test(transformer)
def test_pipeline(self):
transformer = Pipeline([ColorJitter(), HFlip(), Resize(200, 200, 1)])
self.transformer_test(transformer)
def test_inception_preprocess(self):
transformer = Pipeline([Resize(256, 256), CenterCrop(224, 224),
ChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224, 0.225),
MatToTensor(), ImageFrameToSample()])
self.transformer_test(transformer)
def test_mat_to_floats(self):
transformer = MatToFloats()
self.transformer_test(transformer)
def test_mat_to_floats_no_share(self):
transformer = MatToFloats(share_buffer=False)
self.transformer_test(transformer)
def test_mat_to_tensor(self):
transformer = MatToTensor()
self.transformer_test(transformer)
def testImageFrameToSample(self):
transformer = Pipeline([MatToTensor(), ImageFrameToSample()])
self.transformer_test(transformer)
def test_image_frame_transform(self):
transformer = MatToTensor()
image_frame = ImageFrame.read(self.image_path)
transformed = image_frame.transform(transformer)
transformed.get_image()
def test_empty_get_predict_local(self):
image_frame = ImageFrame.read(self.image_path)
image_frame.get_predict()
def test_empty_get_predict_distributed(self):
image_frame = ImageFrame.read(self.image_path, self.sc)
image_frame.get_predict()
def test_read_write_parquet(self):
temp = tempfile.mkdtemp() + "testParquet/"
resource_path = os.path.join(os.path.split(__file__)[0], "../resources/pascal")
ImageFrame.write_parquet(resource_path, temp, self.sc, 1)
read_image_frame = ImageFrame.read_parquet(temp, self.sc)
def test_set_label(self):
resource_path = os.path.join(os.path.split(__file__)[0], "../resources/pascal")
imageFrame = ImageFrame.read(resource_path, self.sc)
uris = imageFrame.get_uri().collect()
label = {}
for uri in uris:
label[uri] = 10
imageFrame.set_label(label)
def test_random_split(self):
resource_path = os.path.join(os.path.split(__file__)[0], "../resources/pascal")
imageFrame = ImageFrame.read(resource_path, self.sc)
splits = imageFrame.random_split([1.0])
def test_channel_scaled_normalizer(self):
transformer = ChannelScaledNormalizer(123, 117, 104, 1.0)
self.transformer_test(transformer)
def test_random_alter_aspect(self):
transformer = RandomAlterAspect(0.08, 1, 0.75, "CUBIC", 20)
self.transformer_test(transformer)
def test_random_cropper(self):
transformer = RandomCropper(20, 20, True, "Random", 3)
self.transformer_test(transformer)
def test_random_resize(self):
transformer = RandomResize(100, 100)
self.transformer_test(transformer)
if __name__ == "__main__":
pytest.main([__file__]) | python/dllib/test/bigdl/transform/test_image.py |
import pytest
import os
from bigdl.dllib.utils.common import *
from bigdl.dllib.feature.transform.vision.image import *
import tempfile
class TestLayer():
def setup_method(self, method):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
sparkConf = create_spark_conf().setMaster("local[4]").setAppName("test model")
self.sc = get_spark_context(sparkConf)
init_engine()
resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
self.image_path = os.path.join(resource_path, "pascal/000025.jpg")
def teardown_method(self, method):
""" teardown any state that was previously setup with a setup_method
call.
"""
self.sc.stop()
def test_get_sample(self):
image_frame = ImageFrame.read(self.image_path)
transformer = Pipeline([PixelBytesToMat(), Resize(256, 256), CenterCrop(224, 224),
ChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224, 0.225),
MatToTensor(), ImageFrameToSample()])
transformed = transformer(image_frame)
transformed.get_sample()
def transformer_test(self, transformer):
image_frame = ImageFrame.read(self.image_path)
transformed = transformer(image_frame)
transformed.get_image()
image_frame = ImageFrame.read(self.image_path, self.sc)
transformed = transformer(image_frame)
images = transformed.get_image()
images.count()
def test_get_image(self):
image_frame = ImageFrame.read(self.image_path)
image_frame.get_image()
def test_get_label(self):
image_frame = ImageFrame.read(self.image_path)
image_frame.get_label()
def test_is_local(self):
image_frame = ImageFrame.read(self.image_path)
assert image_frame.is_local() is True
image_frame = ImageFrame.read(self.image_path, self.sc)
assert image_frame.is_local() is False
def test_is_distributed(self):
image_frame = ImageFrame.read(self.image_path)
assert image_frame.is_distributed() is False
image_frame = ImageFrame.read(self.image_path, self.sc)
assert image_frame.is_distributed() is True
def test_hflip(self):
transformer = HFlip()
self.transformer_test(transformer)
def test_colorjitter(self):
color = ColorJitter(random_order_prob=1.0, shuffle=True)
self.transformer_test(color)
def test_resize(self):
resize = Resize(200, 200, 1)
self.transformer_test(resize)
def test_brightness(self):
brightness = Brightness(0.0, 32.0)
self.transformer_test(brightness)
def test_channel_order(self):
transformer = ChannelOrder()
self.transformer_test(transformer)
def test_aspect_scale(self):
transformer = AspectScale(300)
self.transformer_test(transformer)
def test_random_aspect_scale(self):
transformer = RandomAspectScale([300, 400])
self.transformer_test(transformer)
def test_contrast(self):
transformer = Contrast(0.5, 1.5)
self.transformer_test(transformer)
def test_saturation(self):
transformer = Saturation(0.5, 1.5)
self.transformer_test(transformer)
def test_hue(self):
transformer = Hue(0.5, 1.5)
self.transformer_test(transformer)
def test_channel_normalize(self):
transformer = ChannelNormalize(100.0, 200.0, 300.0, 2.0, 2.0, 2.0)
self.transformer_test(transformer)
def test_pixel_normalize(self):
means = [2.0] * 3 * 500 * 375
transformer = PixelNormalize(means)
self.transformer_test(transformer)
def test_fixed_crop_norm(self):
crop = FixedCrop(0.0, 0.0, 0.5, 1.0)
self.transformer_test(crop)
def test_fixed_crop_unnorm(self):
crop = FixedCrop(0.0, 0.0, 200.0, 200., False)
self.transformer_test(crop)
def test_center_crop(self):
crop = CenterCrop(200, 200)
self.transformer_test(crop)
def test_random_crop(self):
crop = RandomCrop(200, 200)
self.transformer_test(crop)
def test_filler(self):
filler = Filler(0.0, 0.0, 0.1, 0.2)
self.transformer_test(filler)
def test_expand(self):
expand = Expand(means_r=123, means_g=117, means_b=104,
max_expand_ratio=2.0)
self.transformer_test(expand)
def test_fix_expand(self):
expand = FixExpand(1000, 1000)
self.transformer_test(expand)
def test_random_transformer(self):
transformer = RandomTransformer(HFlip(), 0.5)
self.transformer_test(transformer)
def test_pipeline(self):
transformer = Pipeline([ColorJitter(), HFlip(), Resize(200, 200, 1)])
self.transformer_test(transformer)
def test_inception_preprocess(self):
transformer = Pipeline([Resize(256, 256), CenterCrop(224, 224),
ChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224, 0.225),
MatToTensor(), ImageFrameToSample()])
self.transformer_test(transformer)
def test_mat_to_floats(self):
transformer = MatToFloats()
self.transformer_test(transformer)
def test_mat_to_floats_no_share(self):
transformer = MatToFloats(share_buffer=False)
self.transformer_test(transformer)
def test_mat_to_tensor(self):
transformer = MatToTensor()
self.transformer_test(transformer)
def testImageFrameToSample(self):
transformer = Pipeline([MatToTensor(), ImageFrameToSample()])
self.transformer_test(transformer)
def test_image_frame_transform(self):
transformer = MatToTensor()
image_frame = ImageFrame.read(self.image_path)
transformed = image_frame.transform(transformer)
transformed.get_image()
def test_empty_get_predict_local(self):
image_frame = ImageFrame.read(self.image_path)
image_frame.get_predict()
def test_empty_get_predict_distributed(self):
image_frame = ImageFrame.read(self.image_path, self.sc)
image_frame.get_predict()
def test_read_write_parquet(self):
temp = tempfile.mkdtemp() + "testParquet/"
resource_path = os.path.join(os.path.split(__file__)[0], "../resources/pascal")
ImageFrame.write_parquet(resource_path, temp, self.sc, 1)
read_image_frame = ImageFrame.read_parquet(temp, self.sc)
def test_set_label(self):
resource_path = os.path.join(os.path.split(__file__)[0], "../resources/pascal")
imageFrame = ImageFrame.read(resource_path, self.sc)
uris = imageFrame.get_uri().collect()
label = {}
for uri in uris:
label[uri] = 10
imageFrame.set_label(label)
def test_random_split(self):
resource_path = os.path.join(os.path.split(__file__)[0], "../resources/pascal")
imageFrame = ImageFrame.read(resource_path, self.sc)
splits = imageFrame.random_split([1.0])
def test_channel_scaled_normalizer(self):
transformer = ChannelScaledNormalizer(123, 117, 104, 1.0)
self.transformer_test(transformer)
def test_random_alter_aspect(self):
transformer = RandomAlterAspect(0.08, 1, 0.75, "CUBIC", 20)
self.transformer_test(transformer)
def test_random_cropper(self):
transformer = RandomCropper(20, 20, True, "Random", 3)
self.transformer_test(transformer)
def test_random_resize(self):
transformer = RandomResize(100, 100)
self.transformer_test(transformer)
if __name__ == "__main__":
pytest.main([__file__]) | 0.61057 | 0.543651 |
import numpy as np
try:
import matplotlib.pyplot as pl
except(ImportError):
pass
from ..plotting.utils import get_best
__all__ = ["get_best", "get_truths", "get_percentiles", "get_stats",
"posterior_samples", "hist_samples", "joint_pdf", "compute_sigma_level",
"trim_walkers", "fill_between", "figgrid"]
def get_truths(res):
import pickle
try:
mock = pickle.loads(res['obs']['mock_params'])
res['obs']['mock_params'] = mock
except:
pass
try:
return res['obs']['mock_params']
except(KeyError):
return None
def get_percentiles(res, ptile=[16, 50, 84], start=0.0, thin=1, **extras):
"""Get get percentiles of the marginalized posterior for each parameter.
:param res:
A results dictionary, containing a "chain" and "theta_labels" keys.
:param ptile: (optional, default: [16, 50, 84])
A list of percentiles (integers 0 to 100) to return for each parameter.
:param start: (optional, default: 0.5)
How much of the beginning of chains to throw away before calculating
percentiles, expressed as a fraction of the total number of iterations.
:param thin: (optional, default: 10.0)
Only use every ``thin`` iteration when calculating percentiles.
:returns pcts:
Dictionary with keys giving the parameter names and values giving the
requested percentiles for that parameter.
"""
parnames = np.array(res.get('theta_labels', res['model'].theta_labels()))
niter = res['chain'].shape[-2]
start_index = np.floor(start * (niter-1)).astype(int)
if res["chain"].ndim > 2:
flatchain = res['chain'][:, start_index::thin, :]
dims = flatchain.shape
flatchain = flatchain.reshape(dims[0]*dims[1], dims[2])
elif res["chain"].ndim == 2:
flatchain = res["chain"][start_index::thin, :]
pct = np.array([quantile(p, ptile, weights=res.get("weights", None)) for p in flatchain.T])
return dict(zip(parnames, pct))
def quantile(data, percents, weights=None):
''' percents in units of 1%
weights specifies the frequency (count) of data.
'''
if weights is None:
return np.percentile(data, percents)
ind = np.argsort(data)
d = data[ind]
w = weights[ind]
p = 1.*w.cumsum()/w.sum()*100
y = np.interp(percents, p, d)
return y
def get_stats(res, pnames, **kwargs):
"""For selected parameters, get the truth (if known), the MAP value from
the chain, and the percentiles.
:param res:
A results dictionary, containing a "chain" and "theta_labels" keys.
:param pnames:
List of strings giving the names of the desired parameters.
"""
truth = get_truths(res)
best = dict(zip(*get_best(res)))
pct = get_percentiles(res, **kwargs)
if truth is not None:
truths = np.array([truth[k] for k in pnames])
else:
truths = None
pcts = np.array([pct[k] for i,k in enumerate(pnames)])
bests = np.array([best[k] for i,k in enumerate(pnames)])
return pnames, truths, bests, pcts
def trim_walkers(res, threshold=-1e4):
"""Remove walkers with probability below some threshold. Useful for
removing stuck walkers
"""
good = res['lnprobability'][:, -1] > threshold
trimmed = {}
trimmed['chain'] = res['chain'][good, :, :]
trimmed['lnprobability'] = res['lnprobability'][good, :]
trimmed['model'] = res['model']
return trimmed
def joint_pdf(res, p1, p2, pmap={}, **kwargs):
"""Build a 2-dimensional array representing the binned joint PDF of 2
parameters, in terms of sigma or fraction of the total distribution.
For example, to plot contours of the joint PDF of parameters ``"parname1"``
and ``"parname2"`` from the last half of a chain with 30bins in each
dimension;
::
xb, yb, sigma = joint_pdf(res, parname1, parname2, nbins=30, start=0.5)
ax.contour(xb, yb, sigma, **plotting_kwargs)
:param p1:
The name of the parameter for the x-axis
:param p2:
The name of the parameter for the y axis
:returns xb, yb, sigma:
The bins and the 2-d histogram
"""
trace, pars = hist_samples(res, [p1, p2], **kwargs)
trace = trace.copy().T
if pars[0] == p1:
trace = trace[::-1, :]
x = pmap.get(p2, lambda x: x)(trace[0])
y = pmap.get(p1, lambda x: x)(trace[1])
xbins, ybins, sigma = compute_sigma_level(x, y, **kwargs)
return xbins, ybins, sigma.T
def posterior_samples(res, nsample=None, **kwargs):
"""Pull samples of theta from the MCMC chain
:param res:
A results dictionary, containing a "chain" and "theta_labels" keys.
:param nsample:
Number of random samples to draw.
:param **kwargs:
Extra keywords are passed to ``hist_samples``.
:returns thetas:
A list of parameter vectors pulled at random from the chain, of same
length as ``samples``.
"""
flatchain, pnames = hist_samples(res, **kwargs)
weights = res.get("weights", None)
ns = flatchain.shape[0]
if nsample is None:
nsample = ns
s = np.random.choice(ns, p=weights, size=nsample)
thetas = flatchain[s, :]
return thetas
def hist_samples(res, showpars=None, start=0, thin=1,
return_lnprob=False, **extras):
"""Get posterior samples for the parameters listed in ``showpars``. This
can be done for different ending fractions of the (thinned) chain.
:param res:
A results dictionary, containing a "chain" and "theta_labels" keys.
:param showpars:
A list of strings giving the desired parameters.
:param start: (optional, default: 0.5)
How much of the beginning of chains to throw away before calculating
percentiles, expressed as a fraction of the total number of iterations.
:param thin: (optional, default: 10.0)
Only use every ``thin`` iteration when calculating percentiles.
"""
parnames = np.array(res.get('theta_labels', res['model'].theta_labels()))
niter = res['chain'].shape[-2]
start_index = np.floor(start * (niter-1)).astype(int)
if res["chain"].ndim > 2:
# emcee
flatchain = res['chain'][:, start_index::thin, :]
dims = flatchain.shape
flatchain = flatchain.reshape(dims[0]*dims[1], dims[2])
flatlnprob = res['lnprobability'][:, start_index::thin].reshape(dims[0]*dims[1])
elif res["chain"].ndim == 2:
# dynesty
flatchain = res["chain"][start_index::thin, :]
flatlnprob = res['lnprobability'][start_index::thin]
if showpars is None:
ind_show = slice(None)
else:
ind_show = np.array([p in showpars for p in parnames], dtype=bool)
flatchain = flatchain[:, ind_show]
if return_lnprob:
return flatchain, parnames[ind_show], flatlnprob
return flatchain, parnames[ind_show]
def compute_sigma_level(trace1, trace2, nbins=30, weights=None, extents=None, **extras):
"""From a set of traces in two parameters, make a 2-d histogram of number
of standard deviations. Following examples from <NAME>.
"""
L, xbins, ybins = np.histogram2d(trace1, trace2, bins=nbins,
weights=weights,
range=extents)
L[L == 0] = 1E-16
logL = np.log(L)
shape = L.shape
L = L.ravel()
# obtain the indices to sort and unsort the flattened array
i_sort = np.argsort(L)[::-1]
i_unsort = np.argsort(i_sort)
L_cumsum = L[i_sort].cumsum()
L_cumsum /= L_cumsum[-1]
xbins = 0.5 * (xbins[1:] + xbins[:-1])
ybins = 0.5 * (ybins[1:] + ybins[:-1])
return xbins, ybins, L_cumsum[i_unsort].reshape(shape)
def figgrid(ny, nx, figsize=None, left=0.1, right=0.85,
top=0.9, bottom=0.1, wspace=0.2, hspace=0.10):
"""Gridpars is
left, right
"""
from matplotlib import gridspec
if figsize is None:
figsize = (nx*4.5, ny*3)
fig = pl.figure(figsize=figsize)
axarray = np.zeros([ny, nx], dtype=np.dtype('O'))
gs1 = gridspec.GridSpec(ny, nx)
gs1.update(left=left, right=right, top=top, bottom=bottom,
wspace=wspace, hspace=hspace)
for i in range(ny):
for j in range(nx):
axarray[i, j] = fig.add_subplot(gs1[i, j])
return fig, axarray
def fill_between(x, y1, y2=0, ax=None, **kwargs):
"""Plot filled region between `y1` and `y2`.
This function works exactly the same as matplotlib's fill_between, except
that it also plots a proxy artist (specifically, a rectangle of 0 size)
so that it can be added it appears on a legend.
"""
ax = ax if ax is not None else pl.gca()
ax.fill_between(x, y1, y2, **kwargs)
p = pl.Rectangle((0, 0), 0, 0, **kwargs)
ax.add_patch(p)
return p
def logify(x):
return np.log10(x) | prospect/utils/plotting.py |
import numpy as np
try:
import matplotlib.pyplot as pl
except(ImportError):
pass
from ..plotting.utils import get_best
__all__ = ["get_best", "get_truths", "get_percentiles", "get_stats",
"posterior_samples", "hist_samples", "joint_pdf", "compute_sigma_level",
"trim_walkers", "fill_between", "figgrid"]
def get_truths(res):
import pickle
try:
mock = pickle.loads(res['obs']['mock_params'])
res['obs']['mock_params'] = mock
except:
pass
try:
return res['obs']['mock_params']
except(KeyError):
return None
def get_percentiles(res, ptile=[16, 50, 84], start=0.0, thin=1, **extras):
"""Get get percentiles of the marginalized posterior for each parameter.
:param res:
A results dictionary, containing a "chain" and "theta_labels" keys.
:param ptile: (optional, default: [16, 50, 84])
A list of percentiles (integers 0 to 100) to return for each parameter.
:param start: (optional, default: 0.5)
How much of the beginning of chains to throw away before calculating
percentiles, expressed as a fraction of the total number of iterations.
:param thin: (optional, default: 10.0)
Only use every ``thin`` iteration when calculating percentiles.
:returns pcts:
Dictionary with keys giving the parameter names and values giving the
requested percentiles for that parameter.
"""
parnames = np.array(res.get('theta_labels', res['model'].theta_labels()))
niter = res['chain'].shape[-2]
start_index = np.floor(start * (niter-1)).astype(int)
if res["chain"].ndim > 2:
flatchain = res['chain'][:, start_index::thin, :]
dims = flatchain.shape
flatchain = flatchain.reshape(dims[0]*dims[1], dims[2])
elif res["chain"].ndim == 2:
flatchain = res["chain"][start_index::thin, :]
pct = np.array([quantile(p, ptile, weights=res.get("weights", None)) for p in flatchain.T])
return dict(zip(parnames, pct))
def quantile(data, percents, weights=None):
''' percents in units of 1%
weights specifies the frequency (count) of data.
'''
if weights is None:
return np.percentile(data, percents)
ind = np.argsort(data)
d = data[ind]
w = weights[ind]
p = 1.*w.cumsum()/w.sum()*100
y = np.interp(percents, p, d)
return y
def get_stats(res, pnames, **kwargs):
"""For selected parameters, get the truth (if known), the MAP value from
the chain, and the percentiles.
:param res:
A results dictionary, containing a "chain" and "theta_labels" keys.
:param pnames:
List of strings giving the names of the desired parameters.
"""
truth = get_truths(res)
best = dict(zip(*get_best(res)))
pct = get_percentiles(res, **kwargs)
if truth is not None:
truths = np.array([truth[k] for k in pnames])
else:
truths = None
pcts = np.array([pct[k] for i,k in enumerate(pnames)])
bests = np.array([best[k] for i,k in enumerate(pnames)])
return pnames, truths, bests, pcts
def trim_walkers(res, threshold=-1e4):
"""Remove walkers with probability below some threshold. Useful for
removing stuck walkers
"""
good = res['lnprobability'][:, -1] > threshold
trimmed = {}
trimmed['chain'] = res['chain'][good, :, :]
trimmed['lnprobability'] = res['lnprobability'][good, :]
trimmed['model'] = res['model']
return trimmed
def joint_pdf(res, p1, p2, pmap={}, **kwargs):
"""Build a 2-dimensional array representing the binned joint PDF of 2
parameters, in terms of sigma or fraction of the total distribution.
For example, to plot contours of the joint PDF of parameters ``"parname1"``
and ``"parname2"`` from the last half of a chain with 30bins in each
dimension;
::
xb, yb, sigma = joint_pdf(res, parname1, parname2, nbins=30, start=0.5)
ax.contour(xb, yb, sigma, **plotting_kwargs)
:param p1:
The name of the parameter for the x-axis
:param p2:
The name of the parameter for the y axis
:returns xb, yb, sigma:
The bins and the 2-d histogram
"""
trace, pars = hist_samples(res, [p1, p2], **kwargs)
trace = trace.copy().T
if pars[0] == p1:
trace = trace[::-1, :]
x = pmap.get(p2, lambda x: x)(trace[0])
y = pmap.get(p1, lambda x: x)(trace[1])
xbins, ybins, sigma = compute_sigma_level(x, y, **kwargs)
return xbins, ybins, sigma.T
def posterior_samples(res, nsample=None, **kwargs):
"""Pull samples of theta from the MCMC chain
:param res:
A results dictionary, containing a "chain" and "theta_labels" keys.
:param nsample:
Number of random samples to draw.
:param **kwargs:
Extra keywords are passed to ``hist_samples``.
:returns thetas:
A list of parameter vectors pulled at random from the chain, of same
length as ``samples``.
"""
flatchain, pnames = hist_samples(res, **kwargs)
weights = res.get("weights", None)
ns = flatchain.shape[0]
if nsample is None:
nsample = ns
s = np.random.choice(ns, p=weights, size=nsample)
thetas = flatchain[s, :]
return thetas
def hist_samples(res, showpars=None, start=0, thin=1,
return_lnprob=False, **extras):
"""Get posterior samples for the parameters listed in ``showpars``. This
can be done for different ending fractions of the (thinned) chain.
:param res:
A results dictionary, containing a "chain" and "theta_labels" keys.
:param showpars:
A list of strings giving the desired parameters.
:param start: (optional, default: 0.5)
How much of the beginning of chains to throw away before calculating
percentiles, expressed as a fraction of the total number of iterations.
:param thin: (optional, default: 10.0)
Only use every ``thin`` iteration when calculating percentiles.
"""
parnames = np.array(res.get('theta_labels', res['model'].theta_labels()))
niter = res['chain'].shape[-2]
start_index = np.floor(start * (niter-1)).astype(int)
if res["chain"].ndim > 2:
# emcee
flatchain = res['chain'][:, start_index::thin, :]
dims = flatchain.shape
flatchain = flatchain.reshape(dims[0]*dims[1], dims[2])
flatlnprob = res['lnprobability'][:, start_index::thin].reshape(dims[0]*dims[1])
elif res["chain"].ndim == 2:
# dynesty
flatchain = res["chain"][start_index::thin, :]
flatlnprob = res['lnprobability'][start_index::thin]
if showpars is None:
ind_show = slice(None)
else:
ind_show = np.array([p in showpars for p in parnames], dtype=bool)
flatchain = flatchain[:, ind_show]
if return_lnprob:
return flatchain, parnames[ind_show], flatlnprob
return flatchain, parnames[ind_show]
def compute_sigma_level(trace1, trace2, nbins=30, weights=None, extents=None, **extras):
"""From a set of traces in two parameters, make a 2-d histogram of number
of standard deviations. Following examples from <NAME>.
"""
L, xbins, ybins = np.histogram2d(trace1, trace2, bins=nbins,
weights=weights,
range=extents)
L[L == 0] = 1E-16
logL = np.log(L)
shape = L.shape
L = L.ravel()
# obtain the indices to sort and unsort the flattened array
i_sort = np.argsort(L)[::-1]
i_unsort = np.argsort(i_sort)
L_cumsum = L[i_sort].cumsum()
L_cumsum /= L_cumsum[-1]
xbins = 0.5 * (xbins[1:] + xbins[:-1])
ybins = 0.5 * (ybins[1:] + ybins[:-1])
return xbins, ybins, L_cumsum[i_unsort].reshape(shape)
def figgrid(ny, nx, figsize=None, left=0.1, right=0.85,
top=0.9, bottom=0.1, wspace=0.2, hspace=0.10):
"""Gridpars is
left, right
"""
from matplotlib import gridspec
if figsize is None:
figsize = (nx*4.5, ny*3)
fig = pl.figure(figsize=figsize)
axarray = np.zeros([ny, nx], dtype=np.dtype('O'))
gs1 = gridspec.GridSpec(ny, nx)
gs1.update(left=left, right=right, top=top, bottom=bottom,
wspace=wspace, hspace=hspace)
for i in range(ny):
for j in range(nx):
axarray[i, j] = fig.add_subplot(gs1[i, j])
return fig, axarray
def fill_between(x, y1, y2=0, ax=None, **kwargs):
"""Plot filled region between `y1` and `y2`.
This function works exactly the same as matplotlib's fill_between, except
that it also plots a proxy artist (specifically, a rectangle of 0 size)
so that it can be added it appears on a legend.
"""
ax = ax if ax is not None else pl.gca()
ax.fill_between(x, y1, y2, **kwargs)
p = pl.Rectangle((0, 0), 0, 0, **kwargs)
ax.add_patch(p)
return p
def logify(x):
return np.log10(x) | 0.769557 | 0.606469 |
def render_graph_PathTracerGraph():
g = RenderGraph("PathTracerGraph")
loadRenderPassLibrary("AccumulatePass.dll")
loadRenderPassLibrary("GBuffer.dll")
loadRenderPassLibrary("OptixDenoiser.dll")
loadRenderPassLibrary("ToneMapper.dll")
loadRenderPassLibrary("WavefrontPathTracer.dll")
AccumulatePass = createPass("AccumulatePass", {'enabled': True})
g.addPass(AccumulatePass, "AccumulatePass")
ToneMappingPass = createPass("ToneMapper", {'autoExposure': False, 'exposureCompensation': 0.0})
g.addPass(ToneMappingPass, "ToneMappingPass")
GBufferRT = createPass("GBufferRT", {'forceCullMode': False, 'cull': CullMode.CullBack, 'samplePattern': SamplePattern.Stratified, 'sampleCount': 16})
GBufferRaster = createPass("GBufferRaster", {'forceCullMode': False, 'cull': CullMode.CullBack, 'samplePattern': SamplePattern.Stratified, 'sampleCount': 16}) # viewW not exported ? Not compatible with Path Tracers anymore ?
g.addPass(GBufferRT, "GBuffer")
# MegakernelPathTracer = createPass("MegakernelPathTracer", {'params': PathTracerParams(useVBuffer=0, rayFootprintMode=0)}) # Generates an error apparently because of rayFootprintMode being unsigned, is there a specific syntac to use ?
MegakernelPathTracer = createPass("MegakernelPathTracer", {'params': PathTracerParams(useVBuffer=0)})
g.addPass(MegakernelPathTracer, "PathTracer")
g.addEdge("GBuffer.vbuffer", "PathTracer.vbuffer") # Required by Ray Footprint.
g.addEdge("GBuffer.posW", "PathTracer.posW")
g.addEdge("GBuffer.normW", "PathTracer.normalW")
g.addEdge("GBuffer.tangentW", "PathTracer.tangentW")
g.addEdge("GBuffer.faceNormalW", "PathTracer.faceNormalW")
g.addEdge("GBuffer.viewW", "PathTracer.viewW")
g.addEdge("GBuffer.diffuseOpacity", "PathTracer.mtlDiffOpacity")
g.addEdge("GBuffer.specRough", "PathTracer.mtlSpecRough")
g.addEdge("GBuffer.emissive", "PathTracer.mtlEmissive")
g.addEdge("GBuffer.matlExtra", "PathTracer.mtlParams")
g.addEdge("PathTracer.color", "AccumulatePass.input")
g.addEdge("AccumulatePass.output", "ToneMappingPass.src")
g.markOutput("ToneMappingPass.dst")
return g
PathTracerGraph = render_graph_PathTracerGraph()
try: m.addGraph(PathTracerGraph)
except NameError: None | Source/RenderPasses/MegakernelPathTracer/Data/PathTracerTexLOD_Megakernel.py | def render_graph_PathTracerGraph():
g = RenderGraph("PathTracerGraph")
loadRenderPassLibrary("AccumulatePass.dll")
loadRenderPassLibrary("GBuffer.dll")
loadRenderPassLibrary("OptixDenoiser.dll")
loadRenderPassLibrary("ToneMapper.dll")
loadRenderPassLibrary("WavefrontPathTracer.dll")
AccumulatePass = createPass("AccumulatePass", {'enabled': True})
g.addPass(AccumulatePass, "AccumulatePass")
ToneMappingPass = createPass("ToneMapper", {'autoExposure': False, 'exposureCompensation': 0.0})
g.addPass(ToneMappingPass, "ToneMappingPass")
GBufferRT = createPass("GBufferRT", {'forceCullMode': False, 'cull': CullMode.CullBack, 'samplePattern': SamplePattern.Stratified, 'sampleCount': 16})
GBufferRaster = createPass("GBufferRaster", {'forceCullMode': False, 'cull': CullMode.CullBack, 'samplePattern': SamplePattern.Stratified, 'sampleCount': 16}) # viewW not exported ? Not compatible with Path Tracers anymore ?
g.addPass(GBufferRT, "GBuffer")
# MegakernelPathTracer = createPass("MegakernelPathTracer", {'params': PathTracerParams(useVBuffer=0, rayFootprintMode=0)}) # Generates an error apparently because of rayFootprintMode being unsigned, is there a specific syntac to use ?
MegakernelPathTracer = createPass("MegakernelPathTracer", {'params': PathTracerParams(useVBuffer=0)})
g.addPass(MegakernelPathTracer, "PathTracer")
g.addEdge("GBuffer.vbuffer", "PathTracer.vbuffer") # Required by Ray Footprint.
g.addEdge("GBuffer.posW", "PathTracer.posW")
g.addEdge("GBuffer.normW", "PathTracer.normalW")
g.addEdge("GBuffer.tangentW", "PathTracer.tangentW")
g.addEdge("GBuffer.faceNormalW", "PathTracer.faceNormalW")
g.addEdge("GBuffer.viewW", "PathTracer.viewW")
g.addEdge("GBuffer.diffuseOpacity", "PathTracer.mtlDiffOpacity")
g.addEdge("GBuffer.specRough", "PathTracer.mtlSpecRough")
g.addEdge("GBuffer.emissive", "PathTracer.mtlEmissive")
g.addEdge("GBuffer.matlExtra", "PathTracer.mtlParams")
g.addEdge("PathTracer.color", "AccumulatePass.input")
g.addEdge("AccumulatePass.output", "ToneMappingPass.src")
g.markOutput("ToneMappingPass.dst")
return g
PathTracerGraph = render_graph_PathTracerGraph()
try: m.addGraph(PathTracerGraph)
except NameError: None | 0.403097 | 0.204401 |
from autotabular.pipeline.components.base import AutotabularPreprocessingAlgorithm
from autotabular.pipeline.components.feature_preprocessing.select_percentile import SelectPercentileBase
from autotabular.pipeline.constants import DENSE, INPUT, SPARSE, UNSIGNED_DATA
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import CategoricalHyperparameter, UniformFloatHyperparameter
class SelectPercentileRegression(SelectPercentileBase,
AutotabularPreprocessingAlgorithm):
def __init__(self,
percentile,
score_func='f_regression',
random_state=None):
""" Parameters:
random state : ignored
score_func : callable, Function taking two arrays X and y, and
returning a pair of arrays (scores, pvalues).
"""
import sklearn.feature_selection
self.random_state = random_state # We don't use this
self.percentile = int(float(percentile))
if score_func == 'f_regression':
self.score_func = sklearn.feature_selection.f_regression
elif score_func == 'mutual_info':
self.score_func = sklearn.feature_selection.mutual_info_regression
else:
raise ValueError("Don't know this scoring function: %s" %
score_func)
@staticmethod
def get_properties(dataset_properties=None):
return {
'shortname': 'SPR',
'name': 'Select Percentile Regression',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'handles_multioutput': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (INPUT, )
}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
percentile = UniformFloatHyperparameter(
'percentile', lower=1, upper=99, default_value=50)
score_func = CategoricalHyperparameter(
name='score_func', choices=['f_regression', 'mutual_info'])
cs = ConfigurationSpace()
cs.add_hyperparameters([percentile, score_func])
return cs | autotabular/pipeline/components/feature_preprocessing/select_percentile_regression.py | from autotabular.pipeline.components.base import AutotabularPreprocessingAlgorithm
from autotabular.pipeline.components.feature_preprocessing.select_percentile import SelectPercentileBase
from autotabular.pipeline.constants import DENSE, INPUT, SPARSE, UNSIGNED_DATA
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import CategoricalHyperparameter, UniformFloatHyperparameter
class SelectPercentileRegression(SelectPercentileBase,
AutotabularPreprocessingAlgorithm):
def __init__(self,
percentile,
score_func='f_regression',
random_state=None):
""" Parameters:
random state : ignored
score_func : callable, Function taking two arrays X and y, and
returning a pair of arrays (scores, pvalues).
"""
import sklearn.feature_selection
self.random_state = random_state # We don't use this
self.percentile = int(float(percentile))
if score_func == 'f_regression':
self.score_func = sklearn.feature_selection.f_regression
elif score_func == 'mutual_info':
self.score_func = sklearn.feature_selection.mutual_info_regression
else:
raise ValueError("Don't know this scoring function: %s" %
score_func)
@staticmethod
def get_properties(dataset_properties=None):
return {
'shortname': 'SPR',
'name': 'Select Percentile Regression',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'handles_multioutput': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (INPUT, )
}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
percentile = UniformFloatHyperparameter(
'percentile', lower=1, upper=99, default_value=50)
score_func = CategoricalHyperparameter(
name='score_func', choices=['f_regression', 'mutual_info'])
cs = ConfigurationSpace()
cs.add_hyperparameters([percentile, score_func])
return cs | 0.808143 | 0.62892 |
from functools import partial
import logging
from homeassistant.const import ATTR_BATTERY_LEVEL, STATE_OFF, STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import CHILD_CALLBACK, NODE_CALLBACK, UPDATE_DELAY
_LOGGER = logging.getLogger(__name__)
ATTR_CHILD_ID = "child_id"
ATTR_DESCRIPTION = "description"
ATTR_DEVICE = "device"
ATTR_NODE_ID = "node_id"
ATTR_HEARTBEAT = "heartbeat"
MYSENSORS_PLATFORM_DEVICES = "mysensors_devices_{}"
def get_mysensors_devices(hass, domain):
"""Return MySensors devices for a platform."""
if MYSENSORS_PLATFORM_DEVICES.format(domain) not in hass.data:
hass.data[MYSENSORS_PLATFORM_DEVICES.format(domain)] = {}
return hass.data[MYSENSORS_PLATFORM_DEVICES.format(domain)]
class MySensorsDevice:
"""Representation of a MySensors device."""
def __init__(self, gateway, node_id, child_id, name, value_type):
"""Set up the MySensors device."""
self.gateway = gateway
self.node_id = node_id
self.child_id = child_id
self._name = name
self.value_type = value_type
child = gateway.sensors[node_id].children[child_id]
self.child_type = child.type
self._values = {}
self._update_scheduled = False
self.hass = None
@property
def name(self):
"""Return the name of this entity."""
return self._name
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
node = self.gateway.sensors[self.node_id]
child = node.children[self.child_id]
attr = {
ATTR_BATTERY_LEVEL: node.battery_level,
ATTR_HEARTBEAT: node.heartbeat,
ATTR_CHILD_ID: self.child_id,
ATTR_DESCRIPTION: child.description,
ATTR_DEVICE: self.gateway.device,
ATTR_NODE_ID: self.node_id,
}
set_req = self.gateway.const.SetReq
for value_type, value in self._values.items():
attr[set_req(value_type).name] = value
return attr
async def async_update(self):
"""Update the controller with the latest value from a sensor."""
node = self.gateway.sensors[self.node_id]
child = node.children[self.child_id]
set_req = self.gateway.const.SetReq
for value_type, value in child.values.items():
_LOGGER.debug(
"Entity update: %s: value_type %s, value = %s",
self._name,
value_type,
value,
)
if value_type in (
set_req.V_ARMED,
set_req.V_LIGHT,
set_req.V_LOCK_STATUS,
set_req.V_TRIPPED,
):
self._values[value_type] = STATE_ON if int(value) == 1 else STATE_OFF
elif value_type == set_req.V_DIMMER:
self._values[value_type] = int(value)
else:
self._values[value_type] = value
async def _async_update_callback(self):
"""Update the device."""
raise NotImplementedError
@callback
def async_update_callback(self):
"""Update the device after delay."""
if self._update_scheduled:
return
async def update():
"""Perform update."""
try:
await self._async_update_callback()
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error updating %s", self.name)
finally:
self._update_scheduled = False
self._update_scheduled = True
delayed_update = partial(self.hass.async_create_task, update())
self.hass.loop.call_later(UPDATE_DELAY, delayed_update)
class MySensorsEntity(MySensorsDevice, Entity):
"""Representation of a MySensors entity."""
@property
def should_poll(self):
"""Return the polling state. The gateway pushes its states."""
return False
@property
def available(self):
"""Return true if entity is available."""
return self.value_type in self._values
async def _async_update_callback(self):
"""Update the entity."""
await self.async_update_ha_state(True)
async def async_added_to_hass(self):
"""Register update callback."""
gateway_id = id(self.gateway)
dev_id = gateway_id, self.node_id, self.child_id, self.value_type
self.async_on_remove(
async_dispatcher_connect(
self.hass, CHILD_CALLBACK.format(*dev_id), self.async_update_callback
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
NODE_CALLBACK.format(gateway_id, self.node_id),
self.async_update_callback,
)
) | homeassistant/components/mysensors/device.py | from functools import partial
import logging
from homeassistant.const import ATTR_BATTERY_LEVEL, STATE_OFF, STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import CHILD_CALLBACK, NODE_CALLBACK, UPDATE_DELAY
_LOGGER = logging.getLogger(__name__)
ATTR_CHILD_ID = "child_id"
ATTR_DESCRIPTION = "description"
ATTR_DEVICE = "device"
ATTR_NODE_ID = "node_id"
ATTR_HEARTBEAT = "heartbeat"
MYSENSORS_PLATFORM_DEVICES = "mysensors_devices_{}"
def get_mysensors_devices(hass, domain):
"""Return MySensors devices for a platform."""
if MYSENSORS_PLATFORM_DEVICES.format(domain) not in hass.data:
hass.data[MYSENSORS_PLATFORM_DEVICES.format(domain)] = {}
return hass.data[MYSENSORS_PLATFORM_DEVICES.format(domain)]
class MySensorsDevice:
"""Representation of a MySensors device."""
def __init__(self, gateway, node_id, child_id, name, value_type):
"""Set up the MySensors device."""
self.gateway = gateway
self.node_id = node_id
self.child_id = child_id
self._name = name
self.value_type = value_type
child = gateway.sensors[node_id].children[child_id]
self.child_type = child.type
self._values = {}
self._update_scheduled = False
self.hass = None
@property
def name(self):
"""Return the name of this entity."""
return self._name
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
node = self.gateway.sensors[self.node_id]
child = node.children[self.child_id]
attr = {
ATTR_BATTERY_LEVEL: node.battery_level,
ATTR_HEARTBEAT: node.heartbeat,
ATTR_CHILD_ID: self.child_id,
ATTR_DESCRIPTION: child.description,
ATTR_DEVICE: self.gateway.device,
ATTR_NODE_ID: self.node_id,
}
set_req = self.gateway.const.SetReq
for value_type, value in self._values.items():
attr[set_req(value_type).name] = value
return attr
async def async_update(self):
"""Update the controller with the latest value from a sensor."""
node = self.gateway.sensors[self.node_id]
child = node.children[self.child_id]
set_req = self.gateway.const.SetReq
for value_type, value in child.values.items():
_LOGGER.debug(
"Entity update: %s: value_type %s, value = %s",
self._name,
value_type,
value,
)
if value_type in (
set_req.V_ARMED,
set_req.V_LIGHT,
set_req.V_LOCK_STATUS,
set_req.V_TRIPPED,
):
self._values[value_type] = STATE_ON if int(value) == 1 else STATE_OFF
elif value_type == set_req.V_DIMMER:
self._values[value_type] = int(value)
else:
self._values[value_type] = value
async def _async_update_callback(self):
"""Update the device."""
raise NotImplementedError
@callback
def async_update_callback(self):
"""Update the device after delay."""
if self._update_scheduled:
return
async def update():
"""Perform update."""
try:
await self._async_update_callback()
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error updating %s", self.name)
finally:
self._update_scheduled = False
self._update_scheduled = True
delayed_update = partial(self.hass.async_create_task, update())
self.hass.loop.call_later(UPDATE_DELAY, delayed_update)
class MySensorsEntity(MySensorsDevice, Entity):
"""Representation of a MySensors entity."""
@property
def should_poll(self):
"""Return the polling state. The gateway pushes its states."""
return False
@property
def available(self):
"""Return true if entity is available."""
return self.value_type in self._values
async def _async_update_callback(self):
"""Update the entity."""
await self.async_update_ha_state(True)
async def async_added_to_hass(self):
"""Register update callback."""
gateway_id = id(self.gateway)
dev_id = gateway_id, self.node_id, self.child_id, self.value_type
self.async_on_remove(
async_dispatcher_connect(
self.hass, CHILD_CALLBACK.format(*dev_id), self.async_update_callback
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
NODE_CALLBACK.format(gateway_id, self.node_id),
self.async_update_callback,
)
) | 0.747247 | 0.133726 |
"""Tests the `client` module in client–server mode."""
########################################
# Dependencies #
########################################
import mph
from fixtures import logging_disabled
from fixtures import setup_logging
from pytest import raises
from pathlib import Path
########################################
# Fixtures #
########################################
client = None
model = None
demo = Path(__file__).resolve().parent.parent/'demos'/'capacitor.mph'
########################################
# Tests #
########################################
# The test are mostly in source-code order of the Client class. Except
# that we load a model and create another one early on, just so we have
# something to work with. And connect() is already called from __init__(),
# which is why disconnect() comes before connect(), which actually tests
# reconnecting the client.
def test_init():
global client
mph.option('session', 'client-server')
client = mph.start(cores=1)
assert client.version
assert client.port
assert client.host == 'localhost'
assert client.java
assert not client.standalone
with logging_disabled():
with raises(NotImplementedError):
mph.Client()
def test_load():
global model
assert demo.is_file()
model = client.load(demo)
assert model
def test_create():
name = 'empty'
client.create(name)
assert name in client.names()
client.create()
assert 'Untitled' in client.names()
def test_repr():
assert repr(client) == f"Client(port={client.port}, host='localhost')"
def test_contains():
assert model in client
assert 'capacitor' in client
assert 'empty' in client
assert 'non-existing' not in client
def test_iter():
models = list(client)
assert model in models
def test_truediv():
assert client/'capacitor' == model
with logging_disabled():
with raises(ValueError):
client/'non-existing'
with raises(TypeError):
client/model
with raises(TypeError):
client/False
def test_cores():
assert client.cores == 1
def test_models():
assert model in client.models()
def test_names():
assert model.name() in client.names()
def test_files():
assert demo in client.files()
def test_modules():
for key in mph.client.modules.keys():
assert client.java.hasProduct(key) in (True, False)
for value in mph.client.modules.values():
assert value in mph.model.modules.values()
assert 'Comsol core' in client.modules()
mph.client.modules['invalid'] = 'invalid'
client.modules()
del mph.client.modules['invalid']
def test_caching():
assert not client.caching()
copy = client.load(demo)
assert model != copy
client.remove(copy)
client.caching(True)
assert client.caching()
copy = client.load(demo)
assert model == copy
client.caching(False)
assert not client.caching()
with logging_disabled():
with raises(ValueError):
client.caching('anything else')
def test_remove():
name = model.name()
assert name in client.names()
client.remove(model)
assert name not in client.names()
assert 'empty' in client.names()
client.remove('empty')
assert 'empty' not in client.names()
with logging_disabled():
with raises(Exception, match='is no longer in the model'):
model.java.component()
with raises(ValueError):
client.remove(model)
with raises(ValueError):
client.remove('non-existing')
with raises(TypeError):
client.remove(True)
def test_clear():
client.clear()
assert not client.models()
def test_disconnect():
client.disconnect()
assert client.host is None
assert client.port is None
assert repr(client) == 'Client(disconnected)'
with logging_disabled():
with raises(Exception):
client.models()
with raises(RuntimeError):
client.disconnect()
def test_connect():
server = mph.Server(cores=1)
client.connect(server.port)
assert client.port == server.port
assert client.cores == 1
with logging_disabled():
with raises(RuntimeError):
client.connect(server.port)
client.disconnect()
server.stop()
########################################
# Main #
########################################
if __name__ == '__main__':
setup_logging()
test_init()
test_load()
test_create()
test_repr()
test_contains()
test_iter()
test_truediv()
test_cores()
test_models()
test_names()
test_files()
test_caching()
test_remove()
test_clear()
test_disconnect()
test_connect() | tests/test_client.py | """Tests the `client` module in client–server mode."""
########################################
# Dependencies #
########################################
import mph
from fixtures import logging_disabled
from fixtures import setup_logging
from pytest import raises
from pathlib import Path
########################################
# Fixtures #
########################################
client = None
model = None
demo = Path(__file__).resolve().parent.parent/'demos'/'capacitor.mph'
########################################
# Tests #
########################################
# The test are mostly in source-code order of the Client class. Except
# that we load a model and create another one early on, just so we have
# something to work with. And connect() is already called from __init__(),
# which is why disconnect() comes before connect(), which actually tests
# reconnecting the client.
def test_init():
global client
mph.option('session', 'client-server')
client = mph.start(cores=1)
assert client.version
assert client.port
assert client.host == 'localhost'
assert client.java
assert not client.standalone
with logging_disabled():
with raises(NotImplementedError):
mph.Client()
def test_load():
global model
assert demo.is_file()
model = client.load(demo)
assert model
def test_create():
name = 'empty'
client.create(name)
assert name in client.names()
client.create()
assert 'Untitled' in client.names()
def test_repr():
assert repr(client) == f"Client(port={client.port}, host='localhost')"
def test_contains():
assert model in client
assert 'capacitor' in client
assert 'empty' in client
assert 'non-existing' not in client
def test_iter():
models = list(client)
assert model in models
def test_truediv():
assert client/'capacitor' == model
with logging_disabled():
with raises(ValueError):
client/'non-existing'
with raises(TypeError):
client/model
with raises(TypeError):
client/False
def test_cores():
assert client.cores == 1
def test_models():
assert model in client.models()
def test_names():
assert model.name() in client.names()
def test_files():
assert demo in client.files()
def test_modules():
for key in mph.client.modules.keys():
assert client.java.hasProduct(key) in (True, False)
for value in mph.client.modules.values():
assert value in mph.model.modules.values()
assert 'Comsol core' in client.modules()
mph.client.modules['invalid'] = 'invalid'
client.modules()
del mph.client.modules['invalid']
def test_caching():
assert not client.caching()
copy = client.load(demo)
assert model != copy
client.remove(copy)
client.caching(True)
assert client.caching()
copy = client.load(demo)
assert model == copy
client.caching(False)
assert not client.caching()
with logging_disabled():
with raises(ValueError):
client.caching('anything else')
def test_remove():
name = model.name()
assert name in client.names()
client.remove(model)
assert name not in client.names()
assert 'empty' in client.names()
client.remove('empty')
assert 'empty' not in client.names()
with logging_disabled():
with raises(Exception, match='is no longer in the model'):
model.java.component()
with raises(ValueError):
client.remove(model)
with raises(ValueError):
client.remove('non-existing')
with raises(TypeError):
client.remove(True)
def test_clear():
client.clear()
assert not client.models()
def test_disconnect():
client.disconnect()
assert client.host is None
assert client.port is None
assert repr(client) == 'Client(disconnected)'
with logging_disabled():
with raises(Exception):
client.models()
with raises(RuntimeError):
client.disconnect()
def test_connect():
server = mph.Server(cores=1)
client.connect(server.port)
assert client.port == server.port
assert client.cores == 1
with logging_disabled():
with raises(RuntimeError):
client.connect(server.port)
client.disconnect()
server.stop()
########################################
# Main #
########################################
if __name__ == '__main__':
setup_logging()
test_init()
test_load()
test_create()
test_repr()
test_contains()
test_iter()
test_truediv()
test_cores()
test_models()
test_names()
test_files()
test_caching()
test_remove()
test_clear()
test_disconnect()
test_connect() | 0.70791 | 0.266778 |
# pylint: disable=unused-argument, line-too-long
from msrestazure.tools import is_valid_resource_id, parse_resource_id, is_valid_resource_name, resource_id # pylint: disable=import-error
from knack.log import get_logger
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.util import CLIError
from azure.cli.core.azclierror import ValidationError
from azure.mgmt.privatedns.models import VirtualNetworkLink
from azure.mgmt.privatedns.models import PrivateZone
from azure.mgmt.privatedns.models import SubResource
from ._client_factory import resource_client_factory, network_client_factory, private_dns_client_factory, private_dns_link_client_factory, cf_postgres_flexible_private_dns_zone_suffix_operations
from ._flexible_server_util import get_id_components, check_existence
logger = get_logger(__name__)
DEFAULT_VNET_ADDRESS_PREFIX = '10.0.0.0/16'
DEFAULT_SUBNET_ADDRESS_PREFIX = '10.0.0.0/24'
# pylint: disable=too-many-locals, too-many-statements, too-many-branches
def prepare_private_network(cmd, resource_group_name, server_name, vnet, subnet, location, delegation_service_name, vnet_address_pref, subnet_address_pref):
nw_client = network_client_factory(cmd.cli_ctx)
resource_client = resource_client_factory(cmd.cli_ctx)
# Handle vnet and subnet prefix
if (vnet_address_pref is not None and subnet_address_pref is None) or \
(vnet_address_pref is None and subnet_address_pref is not None):
raise ValidationError("You need to provide both Vnet address prefix and Subnet address prefix.")
if vnet_address_pref is None:
vnet_address_pref = DEFAULT_VNET_ADDRESS_PREFIX
if subnet_address_pref is None:
subnet_address_pref = DEFAULT_SUBNET_ADDRESS_PREFIX
# pylint: disable=too-many-nested-blocks
if subnet is not None and vnet is None:
if not is_valid_resource_id(subnet):
raise ValidationError("Incorrectly formed Subnet ID. If you are providing only --subnet (not --vnet), the Subnet parameter should be in resource ID format.")
if 'child_name_1' not in parse_resource_id(subnet):
raise ValidationError("Incorrectly formed Subnet ID. Check if the Subnet ID is in the right format.")
logger.warning("You have supplied a Subnet ID. Verifying its existence...")
subnet_result = address_private_network_with_id_input(cmd, subnet, nw_client, resource_client, server_name, location, delegation_service_name, vnet_address_pref, subnet_address_pref)
elif subnet is None and vnet is not None:
if is_valid_resource_id(vnet):
logger.warning("You have supplied a Vnet ID. Verifying its existence...")
subnet_result = address_private_network_with_id_input(cmd, vnet, nw_client, resource_client, server_name, location, delegation_service_name, vnet_address_pref, subnet_address_pref)
elif _check_if_resource_name(vnet) and is_valid_resource_name(vnet):
logger.warning("You have supplied a Vnet name. Verifying its existence...")
subnet_result = _create_vnet_subnet_delegation(cmd, nw_client, resource_client, delegation_service_name, resource_group_name, vnet, 'Subnet' + server_name[6:],
location, server_name, vnet_address_pref, subnet_address_pref)
else:
raise ValidationError("Incorrectly formed Vnet ID or Vnet name")
elif subnet is not None and vnet is not None:
if _check_if_resource_name(vnet) and _check_if_resource_name(subnet):
logger.warning("You have supplied a Vnet and Subnet name. Verifying its existence...")
subnet_result = _create_vnet_subnet_delegation(cmd, nw_client, resource_client, delegation_service_name, resource_group_name, vnet, subnet,
location, server_name, vnet_address_pref, subnet_address_pref)
else:
raise ValidationError("If you pass both --vnet and --subnet, consider passing names instead of IDs. If you want to use exising subnet, please provide subnet Id (not vnet Id).")
elif subnet is None and vnet is None:
subnet_result = _create_vnet_subnet_delegation(cmd, nw_client, resource_client, delegation_service_name, resource_group_name, 'Vnet' + server_name[6:], 'Subnet' + server_name[6:],
location, server_name, vnet_address_pref, subnet_address_pref)
else:
return None
return subnet_result.id
def address_private_network_with_id_input(cmd, rid, nw_client, resource_client, server_name, location, delegation_service_name, vnet_address_pref, subnet_address_pref):
id_subscription, id_resource_group, id_vnet, id_subnet = get_id_components(rid)
nw_client, resource_client = _change_client_with_different_subscription(cmd, id_subscription, nw_client, resource_client)
_resource_group_verify_and_create(resource_client, id_resource_group, location)
if id_subnet is None:
id_subnet = 'Subnet' + server_name[6:]
return _create_vnet_subnet_delegation(cmd, nw_client, resource_client, delegation_service_name, id_resource_group, id_vnet, id_subnet,
location, server_name, vnet_address_pref, subnet_address_pref)
def _change_client_with_different_subscription(cmd, subscription, nw_client, resource_client):
if subscription != get_subscription_id(cmd.cli_ctx):
logger.warning('The Vnet/Subnet ID provided is in different subscription from the server')
nw_client = network_client_factory(cmd.cli_ctx, subscription_id=subscription)
resource_client = resource_client_factory(cmd.cli_ctx, subscription_id=subscription)
return nw_client, resource_client
def _resource_group_verify_and_create(resource_client, resource_group, location):
if not resource_client.resource_groups.check_existence(resource_group):
logger.warning("Provided resource group in the resource ID doesn't exist. Creating the resource group %s", resource_group)
resource_client.resource_groups.create_or_update(resource_group, {'location': location})
def _create_vnet_subnet_delegation(cmd, nw_client, resource_client, delegation_service_name, resource_group, vnet_name, subnet_name, location, server_name, vnet_address_pref, subnet_address_pref):
VirtualNetwork, AddressSpace = cmd.get_models('VirtualNetwork', 'AddressSpace', resource_type=ResourceType.MGMT_NETWORK)
if not check_existence(resource_client, vnet_name, resource_group, 'Microsoft.Network', 'virtualNetworks'):
logger.warning('Creating new Vnet "%s" in resource group "%s"', vnet_name, resource_group)
nw_client.virtual_networks.begin_create_or_update(resource_group,
vnet_name,
VirtualNetwork(name=vnet_name,
location=location,
address_space=AddressSpace(
address_prefixes=[
vnet_address_pref]))).result()
else:
logger.warning('Using existing Vnet "%s" in resource group "%s"', vnet_name, resource_group)
# check if vnet prefix is in address space and add if not there
vnet = nw_client.virtual_networks.get(resource_group, vnet_name)
prefixes = vnet.address_space.address_prefixes
subnet_exist = check_existence(resource_client, subnet_name, resource_group, 'Microsoft.Network', 'subnets', parent_name=vnet_name, parent_type='virtualNetworks')
if not subnet_exist and vnet_address_pref not in prefixes:
logger.warning('Adding address prefix %s to Vnet %s', vnet_address_pref, vnet_name)
nw_client.virtual_networks.begin_create_or_update(resource_group, vnet_name,
VirtualNetwork(location=location,
address_space=AddressSpace(
address_prefixes=prefixes + [vnet_address_pref]))).result()
return _create_subnet_delegation(cmd, nw_client, resource_client, delegation_service_name, resource_group, vnet_name, subnet_name, location, server_name, subnet_address_pref)
def _create_subnet_delegation(cmd, nw_client, resource_client, delegation_service_name, resource_group, vnet_name, subnet_name, location, server_name, subnet_address_pref):
Delegation, Subnet, ServiceEndpoint = cmd.get_models('Delegation', 'Subnet', 'ServiceEndpointPropertiesFormat', resource_type=ResourceType.MGMT_NETWORK)
delegation = Delegation(name=delegation_service_name, service_name=delegation_service_name)
service_endpoint = ServiceEndpoint(service='Microsoft.Storage')
# subnet exist
if not check_existence(resource_client, subnet_name, resource_group, 'Microsoft.Network', 'subnets', parent_name=vnet_name, parent_type='virtualNetworks'):
subnet_result = Subnet(
name=subnet_name,
location=location,
address_prefix=subnet_address_pref,
delegations=[delegation],
service_endpoints=[service_endpoint])
vnet = nw_client.virtual_networks.get(resource_group, vnet_name)
vnet_subnet_prefixes = [subnet.address_prefix for subnet in vnet.subnets]
if subnet_address_pref in vnet_subnet_prefixes:
raise ValidationError("The Subnet (default) prefix {} is already taken by another Subnet in the Vnet. Please provide a different prefix for --subnet-prefix parameter".format(subnet_address_pref))
logger.warning('Creating new Subnet "%s" in resource group "%s"', subnet_name, resource_group)
subnet = nw_client.subnets.begin_create_or_update(resource_group, vnet_name, subnet_name,
subnet_result).result()
else:
subnet = nw_client.subnets.get(resource_group, vnet_name, subnet_name)
logger.warning('Using existing Subnet "%s" in resource group "%s"', subnet_name, resource_group)
if subnet_address_pref not in (DEFAULT_SUBNET_ADDRESS_PREFIX, subnet.address_prefix):
logger.warning("The prefix of the subnet you provided does not match the --subnet-prefix value %s. Using current prefix %s", subnet_address_pref, subnet.address_prefix)
# Add Delegation if not delegated already
if not subnet.delegations:
logger.warning('Adding "%s" delegation to the existing subnet %s.', delegation_service_name, subnet_name)
else:
for delgtn in subnet.delegations:
if delgtn.service_name != delegation_service_name:
raise CLIError("Can not use subnet with existing delegations other than {}".format(
delegation_service_name))
subnet.service_endpoints = [service_endpoint]
subnet.delegations = [delegation]
subnet = nw_client.subnets.begin_create_or_update(resource_group, vnet_name, subnet_name, subnet).result()
return subnet
def prepare_private_dns_zone(cmd, database_engine, resource_group, server_name, private_dns_zone, subnet_id, location):
dns_suffix_client = cf_postgres_flexible_private_dns_zone_suffix_operations(cmd.cli_ctx, '_')
private_dns_zone_suffix = dns_suffix_client.execute(database_engine)
vnet_sub, vnet_rg, vnet_name, _ = get_id_components(subnet_id)
private_dns_client = private_dns_client_factory(cmd.cli_ctx)
private_dns_link_client = private_dns_link_client_factory(cmd.cli_ctx)
resource_client = resource_client_factory(cmd.cli_ctx)
vnet_id = resource_id(subscription=vnet_sub,
resource_group=vnet_rg,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_name)
nw_client = network_client_factory(cmd.cli_ctx, subscription_id=vnet_sub)
vnet = nw_client.virtual_networks.get(vnet_rg, vnet_name)
if private_dns_zone is None:
private_dns_zone = server_name + '.' + private_dns_zone_suffix
elif not _check_if_resource_name(private_dns_zone) and is_valid_resource_id(private_dns_zone):
subscription, resource_group, private_dns_zone, _ = get_id_components(private_dns_zone)
if private_dns_zone[-len(private_dns_zone_suffix):] != private_dns_zone_suffix:
raise ValidationError('The suffix for the private DNS zone should be "{}"'.format(private_dns_zone_suffix))
if subscription != get_subscription_id(cmd.cli_ctx):
logger.warning('The provided private DNS zone ID is in different subscription from the server')
resource_client = resource_client_factory(cmd.cli_ctx, subscription_id=subscription)
private_dns_client = private_dns_client_factory(cmd.cli_ctx, subscription_id=subscription)
private_dns_link_client = private_dns_link_client_factory(cmd.cli_ctx, subscription_id=subscription)
_resource_group_verify_and_create(resource_client, resource_group, location)
elif _check_if_resource_name(private_dns_zone) and not is_valid_resource_name(private_dns_zone) \
or not _check_if_resource_name(private_dns_zone) and not is_valid_resource_id(private_dns_zone):
raise ValidationError("Check if the private dns zone name or id is in correct format.")
elif _check_if_resource_name(private_dns_zone) and private_dns_zone[-len(private_dns_zone_suffix):] != private_dns_zone_suffix:
raise ValidationError('The suffix for the private DNS zone should be "{}"'.format(private_dns_zone_suffix))
link = VirtualNetworkLink(location='global', virtual_network=SubResource(id=vnet.id))
link.registration_enabled = True
if not check_existence(resource_client, private_dns_zone, resource_group, 'Microsoft.Network', 'privateDnsZones'):
logger.warning('Creating a private dns zone %s..', private_dns_zone)
private_zone = private_dns_client.create_or_update(resource_group_name=resource_group,
private_zone_name=private_dns_zone,
parameters=PrivateZone(location='global'),
if_none_match='*').result()
private_dns_link_client.create_or_update(resource_group_name=resource_group,
private_zone_name=private_dns_zone,
virtual_network_link_name=vnet_name + '-link',
parameters=link, if_none_match='*').result()
else:
logger.warning('Using the existing private dns zone %s', private_dns_zone)
private_zone = private_dns_client.get(resource_group_name=resource_group,
private_zone_name=private_dns_zone)
# private dns zone link list
virtual_links = private_dns_link_client.list(resource_group_name=resource_group,
private_zone_name=private_dns_zone)
link_exist_flag = False
for virtual_link in virtual_links:
if virtual_link.virtual_network.id == vnet_id:
link_exist_flag = True
break
if not link_exist_flag:
private_dns_link_client.create_or_update(resource_group_name=resource_group,
private_zone_name=private_dns_zone,
virtual_network_link_name=vnet_name + '-link',
parameters=link, if_none_match='*').result()
return private_zone.id
def _check_if_resource_name(resource):
if len(resource.split('/')) == 1:
return True
return False | src/azure-cli/azure/cli/command_modules/rdbms/flexible_server_virtual_network.py |
# pylint: disable=unused-argument, line-too-long
from msrestazure.tools import is_valid_resource_id, parse_resource_id, is_valid_resource_name, resource_id # pylint: disable=import-error
from knack.log import get_logger
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.util import CLIError
from azure.cli.core.azclierror import ValidationError
from azure.mgmt.privatedns.models import VirtualNetworkLink
from azure.mgmt.privatedns.models import PrivateZone
from azure.mgmt.privatedns.models import SubResource
from ._client_factory import resource_client_factory, network_client_factory, private_dns_client_factory, private_dns_link_client_factory, cf_postgres_flexible_private_dns_zone_suffix_operations
from ._flexible_server_util import get_id_components, check_existence
logger = get_logger(__name__)
DEFAULT_VNET_ADDRESS_PREFIX = '10.0.0.0/16'
DEFAULT_SUBNET_ADDRESS_PREFIX = '10.0.0.0/24'
# pylint: disable=too-many-locals, too-many-statements, too-many-branches
def prepare_private_network(cmd, resource_group_name, server_name, vnet, subnet, location, delegation_service_name, vnet_address_pref, subnet_address_pref):
nw_client = network_client_factory(cmd.cli_ctx)
resource_client = resource_client_factory(cmd.cli_ctx)
# Handle vnet and subnet prefix
if (vnet_address_pref is not None and subnet_address_pref is None) or \
(vnet_address_pref is None and subnet_address_pref is not None):
raise ValidationError("You need to provide both Vnet address prefix and Subnet address prefix.")
if vnet_address_pref is None:
vnet_address_pref = DEFAULT_VNET_ADDRESS_PREFIX
if subnet_address_pref is None:
subnet_address_pref = DEFAULT_SUBNET_ADDRESS_PREFIX
# pylint: disable=too-many-nested-blocks
if subnet is not None and vnet is None:
if not is_valid_resource_id(subnet):
raise ValidationError("Incorrectly formed Subnet ID. If you are providing only --subnet (not --vnet), the Subnet parameter should be in resource ID format.")
if 'child_name_1' not in parse_resource_id(subnet):
raise ValidationError("Incorrectly formed Subnet ID. Check if the Subnet ID is in the right format.")
logger.warning("You have supplied a Subnet ID. Verifying its existence...")
subnet_result = address_private_network_with_id_input(cmd, subnet, nw_client, resource_client, server_name, location, delegation_service_name, vnet_address_pref, subnet_address_pref)
elif subnet is None and vnet is not None:
if is_valid_resource_id(vnet):
logger.warning("You have supplied a Vnet ID. Verifying its existence...")
subnet_result = address_private_network_with_id_input(cmd, vnet, nw_client, resource_client, server_name, location, delegation_service_name, vnet_address_pref, subnet_address_pref)
elif _check_if_resource_name(vnet) and is_valid_resource_name(vnet):
logger.warning("You have supplied a Vnet name. Verifying its existence...")
subnet_result = _create_vnet_subnet_delegation(cmd, nw_client, resource_client, delegation_service_name, resource_group_name, vnet, 'Subnet' + server_name[6:],
location, server_name, vnet_address_pref, subnet_address_pref)
else:
raise ValidationError("Incorrectly formed Vnet ID or Vnet name")
elif subnet is not None and vnet is not None:
if _check_if_resource_name(vnet) and _check_if_resource_name(subnet):
logger.warning("You have supplied a Vnet and Subnet name. Verifying its existence...")
subnet_result = _create_vnet_subnet_delegation(cmd, nw_client, resource_client, delegation_service_name, resource_group_name, vnet, subnet,
location, server_name, vnet_address_pref, subnet_address_pref)
else:
raise ValidationError("If you pass both --vnet and --subnet, consider passing names instead of IDs. If you want to use exising subnet, please provide subnet Id (not vnet Id).")
elif subnet is None and vnet is None:
subnet_result = _create_vnet_subnet_delegation(cmd, nw_client, resource_client, delegation_service_name, resource_group_name, 'Vnet' + server_name[6:], 'Subnet' + server_name[6:],
location, server_name, vnet_address_pref, subnet_address_pref)
else:
return None
return subnet_result.id
def address_private_network_with_id_input(cmd, rid, nw_client, resource_client, server_name, location, delegation_service_name, vnet_address_pref, subnet_address_pref):
id_subscription, id_resource_group, id_vnet, id_subnet = get_id_components(rid)
nw_client, resource_client = _change_client_with_different_subscription(cmd, id_subscription, nw_client, resource_client)
_resource_group_verify_and_create(resource_client, id_resource_group, location)
if id_subnet is None:
id_subnet = 'Subnet' + server_name[6:]
return _create_vnet_subnet_delegation(cmd, nw_client, resource_client, delegation_service_name, id_resource_group, id_vnet, id_subnet,
location, server_name, vnet_address_pref, subnet_address_pref)
def _change_client_with_different_subscription(cmd, subscription, nw_client, resource_client):
if subscription != get_subscription_id(cmd.cli_ctx):
logger.warning('The Vnet/Subnet ID provided is in different subscription from the server')
nw_client = network_client_factory(cmd.cli_ctx, subscription_id=subscription)
resource_client = resource_client_factory(cmd.cli_ctx, subscription_id=subscription)
return nw_client, resource_client
def _resource_group_verify_and_create(resource_client, resource_group, location):
if not resource_client.resource_groups.check_existence(resource_group):
logger.warning("Provided resource group in the resource ID doesn't exist. Creating the resource group %s", resource_group)
resource_client.resource_groups.create_or_update(resource_group, {'location': location})
def _create_vnet_subnet_delegation(cmd, nw_client, resource_client, delegation_service_name, resource_group, vnet_name, subnet_name, location, server_name, vnet_address_pref, subnet_address_pref):
VirtualNetwork, AddressSpace = cmd.get_models('VirtualNetwork', 'AddressSpace', resource_type=ResourceType.MGMT_NETWORK)
if not check_existence(resource_client, vnet_name, resource_group, 'Microsoft.Network', 'virtualNetworks'):
logger.warning('Creating new Vnet "%s" in resource group "%s"', vnet_name, resource_group)
nw_client.virtual_networks.begin_create_or_update(resource_group,
vnet_name,
VirtualNetwork(name=vnet_name,
location=location,
address_space=AddressSpace(
address_prefixes=[
vnet_address_pref]))).result()
else:
logger.warning('Using existing Vnet "%s" in resource group "%s"', vnet_name, resource_group)
# check if vnet prefix is in address space and add if not there
vnet = nw_client.virtual_networks.get(resource_group, vnet_name)
prefixes = vnet.address_space.address_prefixes
subnet_exist = check_existence(resource_client, subnet_name, resource_group, 'Microsoft.Network', 'subnets', parent_name=vnet_name, parent_type='virtualNetworks')
if not subnet_exist and vnet_address_pref not in prefixes:
logger.warning('Adding address prefix %s to Vnet %s', vnet_address_pref, vnet_name)
nw_client.virtual_networks.begin_create_or_update(resource_group, vnet_name,
VirtualNetwork(location=location,
address_space=AddressSpace(
address_prefixes=prefixes + [vnet_address_pref]))).result()
return _create_subnet_delegation(cmd, nw_client, resource_client, delegation_service_name, resource_group, vnet_name, subnet_name, location, server_name, subnet_address_pref)
def _create_subnet_delegation(cmd, nw_client, resource_client, delegation_service_name, resource_group, vnet_name, subnet_name, location, server_name, subnet_address_pref):
Delegation, Subnet, ServiceEndpoint = cmd.get_models('Delegation', 'Subnet', 'ServiceEndpointPropertiesFormat', resource_type=ResourceType.MGMT_NETWORK)
delegation = Delegation(name=delegation_service_name, service_name=delegation_service_name)
service_endpoint = ServiceEndpoint(service='Microsoft.Storage')
# subnet exist
if not check_existence(resource_client, subnet_name, resource_group, 'Microsoft.Network', 'subnets', parent_name=vnet_name, parent_type='virtualNetworks'):
subnet_result = Subnet(
name=subnet_name,
location=location,
address_prefix=subnet_address_pref,
delegations=[delegation],
service_endpoints=[service_endpoint])
vnet = nw_client.virtual_networks.get(resource_group, vnet_name)
vnet_subnet_prefixes = [subnet.address_prefix for subnet in vnet.subnets]
if subnet_address_pref in vnet_subnet_prefixes:
raise ValidationError("The Subnet (default) prefix {} is already taken by another Subnet in the Vnet. Please provide a different prefix for --subnet-prefix parameter".format(subnet_address_pref))
logger.warning('Creating new Subnet "%s" in resource group "%s"', subnet_name, resource_group)
subnet = nw_client.subnets.begin_create_or_update(resource_group, vnet_name, subnet_name,
subnet_result).result()
else:
subnet = nw_client.subnets.get(resource_group, vnet_name, subnet_name)
logger.warning('Using existing Subnet "%s" in resource group "%s"', subnet_name, resource_group)
if subnet_address_pref not in (DEFAULT_SUBNET_ADDRESS_PREFIX, subnet.address_prefix):
logger.warning("The prefix of the subnet you provided does not match the --subnet-prefix value %s. Using current prefix %s", subnet_address_pref, subnet.address_prefix)
# Add Delegation if not delegated already
if not subnet.delegations:
logger.warning('Adding "%s" delegation to the existing subnet %s.', delegation_service_name, subnet_name)
else:
for delgtn in subnet.delegations:
if delgtn.service_name != delegation_service_name:
raise CLIError("Can not use subnet with existing delegations other than {}".format(
delegation_service_name))
subnet.service_endpoints = [service_endpoint]
subnet.delegations = [delegation]
subnet = nw_client.subnets.begin_create_or_update(resource_group, vnet_name, subnet_name, subnet).result()
return subnet
def prepare_private_dns_zone(cmd, database_engine, resource_group, server_name, private_dns_zone, subnet_id, location):
dns_suffix_client = cf_postgres_flexible_private_dns_zone_suffix_operations(cmd.cli_ctx, '_')
private_dns_zone_suffix = dns_suffix_client.execute(database_engine)
vnet_sub, vnet_rg, vnet_name, _ = get_id_components(subnet_id)
private_dns_client = private_dns_client_factory(cmd.cli_ctx)
private_dns_link_client = private_dns_link_client_factory(cmd.cli_ctx)
resource_client = resource_client_factory(cmd.cli_ctx)
vnet_id = resource_id(subscription=vnet_sub,
resource_group=vnet_rg,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_name)
nw_client = network_client_factory(cmd.cli_ctx, subscription_id=vnet_sub)
vnet = nw_client.virtual_networks.get(vnet_rg, vnet_name)
if private_dns_zone is None:
private_dns_zone = server_name + '.' + private_dns_zone_suffix
elif not _check_if_resource_name(private_dns_zone) and is_valid_resource_id(private_dns_zone):
subscription, resource_group, private_dns_zone, _ = get_id_components(private_dns_zone)
if private_dns_zone[-len(private_dns_zone_suffix):] != private_dns_zone_suffix:
raise ValidationError('The suffix for the private DNS zone should be "{}"'.format(private_dns_zone_suffix))
if subscription != get_subscription_id(cmd.cli_ctx):
logger.warning('The provided private DNS zone ID is in different subscription from the server')
resource_client = resource_client_factory(cmd.cli_ctx, subscription_id=subscription)
private_dns_client = private_dns_client_factory(cmd.cli_ctx, subscription_id=subscription)
private_dns_link_client = private_dns_link_client_factory(cmd.cli_ctx, subscription_id=subscription)
_resource_group_verify_and_create(resource_client, resource_group, location)
elif _check_if_resource_name(private_dns_zone) and not is_valid_resource_name(private_dns_zone) \
or not _check_if_resource_name(private_dns_zone) and not is_valid_resource_id(private_dns_zone):
raise ValidationError("Check if the private dns zone name or id is in correct format.")
elif _check_if_resource_name(private_dns_zone) and private_dns_zone[-len(private_dns_zone_suffix):] != private_dns_zone_suffix:
raise ValidationError('The suffix for the private DNS zone should be "{}"'.format(private_dns_zone_suffix))
link = VirtualNetworkLink(location='global', virtual_network=SubResource(id=vnet.id))
link.registration_enabled = True
if not check_existence(resource_client, private_dns_zone, resource_group, 'Microsoft.Network', 'privateDnsZones'):
logger.warning('Creating a private dns zone %s..', private_dns_zone)
private_zone = private_dns_client.create_or_update(resource_group_name=resource_group,
private_zone_name=private_dns_zone,
parameters=PrivateZone(location='global'),
if_none_match='*').result()
private_dns_link_client.create_or_update(resource_group_name=resource_group,
private_zone_name=private_dns_zone,
virtual_network_link_name=vnet_name + '-link',
parameters=link, if_none_match='*').result()
else:
logger.warning('Using the existing private dns zone %s', private_dns_zone)
private_zone = private_dns_client.get(resource_group_name=resource_group,
private_zone_name=private_dns_zone)
# private dns zone link list
virtual_links = private_dns_link_client.list(resource_group_name=resource_group,
private_zone_name=private_dns_zone)
link_exist_flag = False
for virtual_link in virtual_links:
if virtual_link.virtual_network.id == vnet_id:
link_exist_flag = True
break
if not link_exist_flag:
private_dns_link_client.create_or_update(resource_group_name=resource_group,
private_zone_name=private_dns_zone,
virtual_network_link_name=vnet_name + '-link',
parameters=link, if_none_match='*').result()
return private_zone.id
def _check_if_resource_name(resource):
if len(resource.split('/')) == 1:
return True
return False | 0.524395 | 0.046551 |
import argparse
import logging
import locale
import relman
from relman.cmd import release
from relman.cmd import release_verify
logging.basicConfig(level=logging.INFO)
LG = logging.getLogger(__name__)
def parse_args():
"""parse command-line arguments."""
parser = argparse.ArgumentParser(
description="ragnarios %s - atlassian automation" % relman.__version__
)
parser.add_argument(
"-d", "--debug",
action="store_true",
help="enables debug mode."
)
parser.add_argument(
"-s", "--server",
dest="server",
default="https://metacloud.jira.com",
metavar="server",
help="JIRA server endpoint."
)
parser.add_argument(
"-u", "--user",
dest="user",
default="david.lapsley",
metavar="user",
help="JIRA user to use."
)
parser.add_argument(
"-p", "--password",
default="",
metavar="password",
help="Password associated with JIRA user."
)
parser.add_argument(
"-D", "--dry-run",
dest="dry_run",
help="Enable dry run mode.",
action='store_true',
)
parser.add_argument(
"-c", "--command",
dest="command",
default="",
metavar="command",
help="The command to run."
)
parser.add_argument(
"-t", "--ticket_file",
dest="ticket_file",
default="",
metavar="ticket_file",
help="CSV file containing tickets to be uploaded."
)
parser.add_argument(
"-e", "--epic",
dest="epic",
default="",
metavar="epic",
help="Epic to associate issues."
)
parser.add_argument(
"-q", "--query",
dest="query",
default="",
metavar="query",
help="Query to pull issues from."
)
parser.add_argument(
"-m", "--max_results",
dest="max_results",
default=50,
metavar="max_results",
help="Max results to return from JIRA."
)
parser.add_argument(
"-o", "--output",
dest="output",
default=50,
metavar="output",
help="File to store results in."
)
return parser.parse_args()
def run():
"""Entry point for the application."""
locale.setlocale(locale.LC_ALL, "")
parsed_args = parse_args()
if parsed_args.debug:
logging.getLogger().setLevel(logging.DEBUG)
if parsed_args.command == 'release':
release.run(parsed_args)
elif parsed_args.command == 'release_verify':
release_verify.run(parsed_args)
if __name__ == '__main__':
run() | relman/main.py | import argparse
import logging
import locale
import relman
from relman.cmd import release
from relman.cmd import release_verify
logging.basicConfig(level=logging.INFO)
LG = logging.getLogger(__name__)
def parse_args():
"""parse command-line arguments."""
parser = argparse.ArgumentParser(
description="ragnarios %s - atlassian automation" % relman.__version__
)
parser.add_argument(
"-d", "--debug",
action="store_true",
help="enables debug mode."
)
parser.add_argument(
"-s", "--server",
dest="server",
default="https://metacloud.jira.com",
metavar="server",
help="JIRA server endpoint."
)
parser.add_argument(
"-u", "--user",
dest="user",
default="david.lapsley",
metavar="user",
help="JIRA user to use."
)
parser.add_argument(
"-p", "--password",
default="",
metavar="password",
help="Password associated with JIRA user."
)
parser.add_argument(
"-D", "--dry-run",
dest="dry_run",
help="Enable dry run mode.",
action='store_true',
)
parser.add_argument(
"-c", "--command",
dest="command",
default="",
metavar="command",
help="The command to run."
)
parser.add_argument(
"-t", "--ticket_file",
dest="ticket_file",
default="",
metavar="ticket_file",
help="CSV file containing tickets to be uploaded."
)
parser.add_argument(
"-e", "--epic",
dest="epic",
default="",
metavar="epic",
help="Epic to associate issues."
)
parser.add_argument(
"-q", "--query",
dest="query",
default="",
metavar="query",
help="Query to pull issues from."
)
parser.add_argument(
"-m", "--max_results",
dest="max_results",
default=50,
metavar="max_results",
help="Max results to return from JIRA."
)
parser.add_argument(
"-o", "--output",
dest="output",
default=50,
metavar="output",
help="File to store results in."
)
return parser.parse_args()
def run():
"""Entry point for the application."""
locale.setlocale(locale.LC_ALL, "")
parsed_args = parse_args()
if parsed_args.debug:
logging.getLogger().setLevel(logging.DEBUG)
if parsed_args.command == 'release':
release.run(parsed_args)
elif parsed_args.command == 'release_verify':
release_verify.run(parsed_args)
if __name__ == '__main__':
run() | 0.398055 | 0.089574 |
from __future__ import unicode_literals
import six
def _update_instance(instance, obj_dict):
if not obj_dict:
return
for p_name in instance.__class__.core_properties:
if p_name in obj_dict:
object.__setattr__(instance, p_name, obj_dict.pop(p_name))
class PropertyDictMeta(type):
def __init__(cls, name, bases, dct):
cls.core_properties = [d_name for d_name, d_type in six.iteritems(dct) if isinstance(d_type, property)]
cls.core_properties.extend(''.join(('_', d_name))
for d_name, d_type in six.iteritems(dct) if isinstance(d_type, property))
super(PropertyDictMeta, cls).__init__(name, bases, dct)
class DictMap(six.with_metaclass(PropertyDictMeta, dict)):
"""
Utility class which allows access to a dictionary by attributes and keys. Also overrides the default iteration to
return keys and values.
"""
def __init__(self, *args, **kwargs):
_update_instance(self, kwargs)
super(DictMap, self).__init__(*args, **kwargs)
def __getattr__(self, item):
return self[item]
def __setattr__(self, key, value):
if key in self.__class__.core_properties:
object.__setattr__(self, key, value)
else:
self[key] = value
def __delattr__(self, item):
if hasattr(self, item):
super(DictMap, self).__delattr__(item)
else:
self.pop(item)
def __iter__(self):
return six.iteritems(self)
def update(self, other=None, **kwargs):
if other:
if isinstance(other, self.__class__):
for p in self.__class__.core_properties:
object.__setattr__(self, p, getattr(other, p))
elif isinstance(other, dict):
other = other.copy()
_update_instance(self, other)
else:
raise ValueError("Expected {0} or dictionary; found '{1}'".format(type(self).__name__, type(other).__name__))
_update_instance(self, kwargs)
super(DictMap, self).update(other, **kwargs) | dockermap/map/__init__.py | from __future__ import unicode_literals
import six
def _update_instance(instance, obj_dict):
if not obj_dict:
return
for p_name in instance.__class__.core_properties:
if p_name in obj_dict:
object.__setattr__(instance, p_name, obj_dict.pop(p_name))
class PropertyDictMeta(type):
def __init__(cls, name, bases, dct):
cls.core_properties = [d_name for d_name, d_type in six.iteritems(dct) if isinstance(d_type, property)]
cls.core_properties.extend(''.join(('_', d_name))
for d_name, d_type in six.iteritems(dct) if isinstance(d_type, property))
super(PropertyDictMeta, cls).__init__(name, bases, dct)
class DictMap(six.with_metaclass(PropertyDictMeta, dict)):
"""
Utility class which allows access to a dictionary by attributes and keys. Also overrides the default iteration to
return keys and values.
"""
def __init__(self, *args, **kwargs):
_update_instance(self, kwargs)
super(DictMap, self).__init__(*args, **kwargs)
def __getattr__(self, item):
return self[item]
def __setattr__(self, key, value):
if key in self.__class__.core_properties:
object.__setattr__(self, key, value)
else:
self[key] = value
def __delattr__(self, item):
if hasattr(self, item):
super(DictMap, self).__delattr__(item)
else:
self.pop(item)
def __iter__(self):
return six.iteritems(self)
def update(self, other=None, **kwargs):
if other:
if isinstance(other, self.__class__):
for p in self.__class__.core_properties:
object.__setattr__(self, p, getattr(other, p))
elif isinstance(other, dict):
other = other.copy()
_update_instance(self, other)
else:
raise ValueError("Expected {0} or dictionary; found '{1}'".format(type(self).__name__, type(other).__name__))
_update_instance(self, kwargs)
super(DictMap, self).update(other, **kwargs) | 0.663342 | 0.09277 |
from pathlib import Path
import click
import tensorflow as tf
import numpy as np
import cv2
from modules.utils import load_yaml, draw_bbox_landm, resize_and_pad_input_image, recover_pad_output
from export import export_to_saved_model
def _run_detection(detector_model, image_arr, score_thres, iou_thres, detection_width):
image_arr = np.float32(image_arr)
image_arr, pad_params = resize_and_pad_input_image(
image_arr,
padded_width=detection_width,
padded_height=detection_width,
max_steps=32,
keep_aspect_ratio=True,
)
outputs = detector_model(
[
tf.expand_dims(image_arr, axis=0),
tf.constant([score_thres], dtype=tf.float32),
tf.constant([iou_thres], dtype=tf.float32),
]
).numpy()
outputs = recover_pad_output(outputs, pad_params)
return outputs
@click.command()
@click.option("--image_path", type=str, required=True)
@click.option("--config_path", type=str, default="configs/retinaface_res50.yaml")
@click.option("--export_path", type=str, default="saved_models/retinaface_res50_end2end")
@click.option("--ckpt_path", type=str, default="checkpoints/retinaface_res50/ckpt-81")
@click.option("--score_thres", type=float, default=0.5)
@click.option("--iou_thres", type=float, default=0.4)
@click.option("--detection_width", type=int, default=0)
@click.option("--result_save_path", type=str, default="results")
def main(
image_path,
config_path,
export_path,
ckpt_path,
score_thres,
iou_thres,
detection_width,
result_save_path,
):
config = load_yaml(config_path)
if not Path(export_path).joinpath("saved_model.pb").exists() and ckpt_path is not None:
export_to_saved_model(ckpt_path, export_path, config)
elif not Path(export_path).joinpath("saved_model.pb").exists() and ckpt_path is None:
raise ValueError(f"Must provide a checkpoint to export model.")
loaded_model = tf.saved_model.load(export_path)
print("model_loaded")
img_raw = cv2.imread(image_path)
img_rgb = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
img_height_raw, img_width_raw, _ = img_rgb.shape
outputs = _run_detection(loaded_model, img_rgb, score_thres, iou_thres, detection_width)
# draw and save results
result_save_path = Path(result_save_path)
result_save_path.mkdir(exist_ok=True, parents=True)
save_img_path = result_save_path.joinpath("result_" + Path(image_path).name)
for prior_index in range(len(outputs)):
draw_bbox_landm(
img_raw, outputs[prior_index], img_height_raw, img_width_raw, draw_score=True, draw_lm=True
)
cv2.imwrite(str(save_img_path), img_raw)
print(f"Results saved at {save_img_path}")
if __name__ == "__main__":
main() | inference.py | from pathlib import Path
import click
import tensorflow as tf
import numpy as np
import cv2
from modules.utils import load_yaml, draw_bbox_landm, resize_and_pad_input_image, recover_pad_output
from export import export_to_saved_model
def _run_detection(detector_model, image_arr, score_thres, iou_thres, detection_width):
image_arr = np.float32(image_arr)
image_arr, pad_params = resize_and_pad_input_image(
image_arr,
padded_width=detection_width,
padded_height=detection_width,
max_steps=32,
keep_aspect_ratio=True,
)
outputs = detector_model(
[
tf.expand_dims(image_arr, axis=0),
tf.constant([score_thres], dtype=tf.float32),
tf.constant([iou_thres], dtype=tf.float32),
]
).numpy()
outputs = recover_pad_output(outputs, pad_params)
return outputs
@click.command()
@click.option("--image_path", type=str, required=True)
@click.option("--config_path", type=str, default="configs/retinaface_res50.yaml")
@click.option("--export_path", type=str, default="saved_models/retinaface_res50_end2end")
@click.option("--ckpt_path", type=str, default="checkpoints/retinaface_res50/ckpt-81")
@click.option("--score_thres", type=float, default=0.5)
@click.option("--iou_thres", type=float, default=0.4)
@click.option("--detection_width", type=int, default=0)
@click.option("--result_save_path", type=str, default="results")
def main(
image_path,
config_path,
export_path,
ckpt_path,
score_thres,
iou_thres,
detection_width,
result_save_path,
):
config = load_yaml(config_path)
if not Path(export_path).joinpath("saved_model.pb").exists() and ckpt_path is not None:
export_to_saved_model(ckpt_path, export_path, config)
elif not Path(export_path).joinpath("saved_model.pb").exists() and ckpt_path is None:
raise ValueError(f"Must provide a checkpoint to export model.")
loaded_model = tf.saved_model.load(export_path)
print("model_loaded")
img_raw = cv2.imread(image_path)
img_rgb = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
img_height_raw, img_width_raw, _ = img_rgb.shape
outputs = _run_detection(loaded_model, img_rgb, score_thres, iou_thres, detection_width)
# draw and save results
result_save_path = Path(result_save_path)
result_save_path.mkdir(exist_ok=True, parents=True)
save_img_path = result_save_path.joinpath("result_" + Path(image_path).name)
for prior_index in range(len(outputs)):
draw_bbox_landm(
img_raw, outputs[prior_index], img_height_raw, img_width_raw, draw_score=True, draw_lm=True
)
cv2.imwrite(str(save_img_path), img_raw)
print(f"Results saved at {save_img_path}")
if __name__ == "__main__":
main() | 0.702122 | 0.188548 |
from shipmmg.mmg_3dof import (
Mmg3DofBasicParams,
Mmg3DofManeuveringParams,
simulate_mmg_3dof,
get_sub_values_from_simulation_result,
zigzag_test_mmg_3dof,
)
from shipmmg.ship_obj_3dof import ShipObj3dof
import numpy as np
import pytest
import os
import matplotlib.pyplot as plt
@pytest.fixture
def ship_KVLCC2_L7_model():
ρ = 1.025 # 海水密度
L_pp = 7.00 # 船長Lpp[m]
B = 1.27 # 船幅[m]
d = 0.46 # 喫水[m]
nabla = 3.27 # 排水量[m^3]
x_G = 0.25 # 重心位置[m]
# C_b = 0.810 # 方形係数[-]
D_p = 0.216 # プロペラ直径[m]
H_R = 0.345 # 舵高さ[m]
A_R = 0.0539 # 舵断面積[m^2]
t_P = 0.220 # 推力減少率
w_P0 = 0.40 # 有効伴流率
m_x_dash = 0.022 # 付加質量x(無次元)
m_y_dash = 0.223 # 付加質量y(無次元)
J_z_dash = 0.011 # 付加質量Izz(無次元)
t_R = 0.387 # 操縦抵抗減少率
a_H = 0.312 # 舵力増加係数
x_H_dash = -0.464 # 舵力増分作用位置
γ_R_minus = 0.395 # 整流係数
γ_R_plus = 0.640 # 整流係数
l_r_dash = -0.710 # 船長に対する舵位置
x_P_dash = -0.690 # 船長に対するプロペラ位置
ϵ = 1.09 # プロペラ・舵位置伴流係数比
κ = 0.50 # 修正係数
f_α = 2.747 # 直圧力勾配係数
basic_params = Mmg3DofBasicParams(
L_pp=L_pp, # 船長Lpp[m]
B=B, # 船幅[m]
d=d, # 喫水[m]
x_G=x_G, # 重心位置[]
D_p=D_p, # プロペラ直径[m]
m=ρ * nabla, # 質量(無次元化)[kg]
I_zG=ρ * nabla * ((0.25 * L_pp) ** 2), # 慣性モーメント[-]
A_R=A_R, # 船の断面に対する舵面積比[-]
η=D_p / H_R, # プロペラ直径に対する舵高さ(Dp/H)
m_x=(0.5 * ρ * (L_pp ** 2) * d) * m_x_dash, # 付加質量x(無次元)
m_y=(0.5 * ρ * (L_pp ** 2) * d) * m_y_dash, # 付加質量y(無次元)
J_z=(0.5 * ρ * (L_pp ** 4) * d) * J_z_dash, # 付加質量Izz(無次元)
f_α=f_α,
ϵ=ϵ, # プロペラ・舵位置伴流係数比
t_R=t_R, # 操縦抵抗減少率
a_H=a_H, # 舵力増加係数
x_H=x_H_dash * L_pp, # 舵力増分作用位置
γ_R_minus=γ_R_minus, # 整流係数
γ_R_plus=γ_R_plus, # 整流係数
l_R=l_r_dash, # 船長に対する舵位置
κ=κ, # 修正係数
t_P=t_P, # 推力減少率
w_P0=w_P0, # 有効伴流率
x_P=x_P_dash, # 船長に対するプロペラ位置
)
k_0 = 0.2931
k_1 = -0.2753
k_2 = -0.1385
R_0_dash = 0.022
X_vv_dash = -0.040
X_vr_dash = 0.002
X_rr_dash = 0.011
X_vvvv_dash = 0.771
Y_v_dash = -0.315
Y_r_dash = 0.083
Y_vvv_dash = -1.607
Y_vvr_dash = 0.379
Y_vrr_dash = -0.391
Y_rrr_dash = 0.008
N_v_dash = -0.137
N_r_dash = -0.049
N_vvv_dash = -0.030
N_vvr_dash = -0.294
N_vrr_dash = 0.055
N_rrr_dash = -0.013
maneuvering_params = Mmg3DofManeuveringParams(
k_0=k_0,
k_1=k_1,
k_2=k_2,
R_0_dash=R_0_dash,
X_vv_dash=X_vv_dash,
X_vr_dash=X_vr_dash,
X_rr_dash=X_rr_dash,
X_vvvv_dash=X_vvvv_dash,
Y_v_dash=Y_v_dash,
Y_r_dash=Y_r_dash,
Y_vvv_dash=Y_vvv_dash,
Y_vvr_dash=Y_vvr_dash,
Y_vrr_dash=Y_vrr_dash,
Y_rrr_dash=Y_rrr_dash,
N_v_dash=N_v_dash,
N_r_dash=N_r_dash,
N_vvv_dash=N_vvv_dash,
N_vvr_dash=N_vvr_dash,
N_vrr_dash=N_vrr_dash,
N_rrr_dash=N_rrr_dash,
)
return basic_params, maneuvering_params
@pytest.fixture
def kvlcc2_L7_35_turning(ship_KVLCC2_L7_model):
basic_params, maneuvering_params = ship_KVLCC2_L7_model
duration = 200 # [s]
# steering_rate = 1.76 * 4 # [°/s]
max_δ_rad = 35 * np.pi / 180.0 # [rad]
n_const = 17.95 # [rpm]
sampling = duration * 10
time_list = np.linspace(0.00, duration, sampling)
δ_rad_list = [0] * sampling
for i in range(sampling):
δ_rad_list[i] = max_δ_rad
npm_list = np.array([n_const for i in range(sampling)])
sol = simulate_mmg_3dof(
basic_params,
maneuvering_params,
time_list,
δ_rad_list,
npm_list,
u0=2.29 * 0.512,
v0=0.0,
r0=0.0,
)
sim_result = sol.sol(time_list)
ship = ShipObj3dof(L=basic_params.L_pp, B=basic_params.B)
ship.load_simulation_result(time_list, sim_result[0], sim_result[1], sim_result[2])
ship.npm = npm_list
ship.δ = δ_rad_list
return ship
def test_get_sub_values_from_simulation_result(
kvlcc2_L7_35_turning, ship_KVLCC2_L7_model, tmpdir
):
basic_params, maneuvering_params = ship_KVLCC2_L7_model
(
X_H_list,
X_R_list,
X_P_list,
Y_H_list,
Y_R_list,
N_H_list,
N_R_list,
) = get_sub_values_from_simulation_result(
kvlcc2_L7_35_turning.u,
kvlcc2_L7_35_turning.v,
kvlcc2_L7_35_turning.r,
kvlcc2_L7_35_turning.δ,
kvlcc2_L7_35_turning.npm,
basic_params,
maneuvering_params,
)
(
X_H_list,
X_R_list,
X_P_list,
Y_H_list,
Y_R_list,
N_H_list,
N_R_list,
U_list,
β_list,
v_dash_list,
r_dash_list,
β_P_list,
w_P_list,
J_list,
K_T_list,
β_R_list,
γ_R_list,
v_R_list,
u_R_list,
U_R_list,
α_R_list,
F_N_list,
) = get_sub_values_from_simulation_result(
kvlcc2_L7_35_turning.u,
kvlcc2_L7_35_turning.v,
kvlcc2_L7_35_turning.r,
kvlcc2_L7_35_turning.δ,
kvlcc2_L7_35_turning.npm,
basic_params,
maneuvering_params,
return_all_vals=True,
)
save_fig_path = os.path.join(str(tmpdir),"testFN.png")
fig = plt.figure()
plt.plot(kvlcc2_L7_35_turning.time, F_N_list)
fig.savefig(save_fig_path)
def test_Ship3DOF_drawing_function(kvlcc2_L7_35_turning,tmpdir):
"""Check drawing functions of Ship3DOF class by using MMG 3DOF simulation results."""
# Ship3DOF.draw_xy_trajectory()
save_fig_path = os.path.join(str(tmpdir),"trajectory.png")
kvlcc2_L7_35_turning.draw_xy_trajectory(dimensionless=True)
kvlcc2_L7_35_turning.draw_xy_trajectory(save_fig_path=save_fig_path)
# Ship3DOF.draw_chart()
save_fig_path = os.path.join(str(tmpdir),"param.png")
kvlcc2_L7_35_turning.draw_chart(
"time",
"u",
xlabel="time [sec]",
ylabel=r"$u$" + " [m/s]",
save_fig_path=save_fig_path,
)
x_index_list = ["time", "u", "v", "r", "x", "y", "psi"]
y_index_list = ["time", "u", "v", "r", "x", "y", "psi"]
for x_index in x_index_list:
for y_index in y_index_list:
kvlcc2_L7_35_turning.draw_chart(x_index, y_index)
with pytest.raises(Exception):
kvlcc2_L7_35_turning.draw_chart("time", "hogehoge")
with pytest.raises(Exception):
kvlcc2_L7_35_turning.draw_chart("hogehoge", "y")
# Ship3DOF.draw_gif()
save_fig_path = os.path.join(str(tmpdir),"test.gif")
kvlcc2_L7_35_turning.draw_gif(save_fig_path=save_fig_path)
kvlcc2_L7_35_turning.draw_gif(dimensionless=True, save_fig_path=save_fig_path)
def test_zigzag_test_mmg_before(ship_KVLCC2_L7_model,tmpdir):
basic_params, maneuvering_params = ship_KVLCC2_L7_model
target_δ_rad = 20.0 * np.pi / 180.0
target_ψ_rad_deviation = -20.0 * np.pi / 180.0
duration = 100
num_of_sampling = 10000
time_list = np.linspace(0.00, duration, num_of_sampling)
n_const = 17.95 # [rpm]
npm_list = np.array([n_const for i in range(num_of_sampling)])
δ_list, u_list, v_list, r_list = zigzag_test_mmg_3dof(
basic_params,
maneuvering_params,
target_δ_rad,
target_ψ_rad_deviation,
time_list,
npm_list,
δ_rad_rate=10.0 * np.pi / 180,
)
ship = ShipObj3dof(L=100, B=10)
ship.load_simulation_result(time_list, u_list, v_list, r_list)
ship.δ = δ_list
save_fig_path = os.path.join(str(tmpdir),"test_psi.png")
ship.draw_xy_trajectory(save_fig_path=save_fig_path)
ship.draw_chart(
"time",
"psi",
xlabel="time [sec]",
ylabel=r"$\psi$" + " [rad]",
save_fig_path=save_fig_path,
)
save_fig_path = os.path.join(str(tmpdir),"test_delta.png")
ship.draw_xy_trajectory(save_fig_path=save_fig_path)
ship.draw_chart(
"time",
"delta",
xlabel="time [sec]",
ylabel=r"$\delta$" + " [rad]",
save_fig_path=save_fig_path,
)
save_fig_path = os.path.join(str(tmpdir),"test_delta_psi.png")
ship.draw_multi_y_chart(
"time",
["delta", "psi"],
xlabel="time [sec]",
ylabel="[rad]",
save_fig_path=save_fig_path,
)
save_fig_path = os.path.join(str(tmpdir),"test_delta_psi.png")
ship.draw_multi_x_chart(
["delta", "psi"],
"time",
ylabel="time [sec]",
xlabel="[rad]",
save_fig_path=save_fig_path,
)
def test_zigzag_test_mmg(ship_KVLCC2_L7_model, tmpdir):
basic_params, maneuvering_params = ship_KVLCC2_L7_model
target_δ_rad = 20.0 * np.pi / 180.0
target_ψ_rad_deviation = 20.0 * np.pi / 180.0
duration = 80
num_of_sampling = 10000
time_list = np.linspace(0.00, duration, num_of_sampling)
n_const = 17.95 # [rpm]
npm_list = np.array([n_const for i in range(num_of_sampling)])
δ_list, u_list, v_list, r_list = zigzag_test_mmg_3dof(
basic_params,
maneuvering_params,
target_δ_rad,
target_ψ_rad_deviation,
time_list,
npm_list,
δ_rad_rate=15.0 * np.pi / 180,
)
ship = ShipObj3dof(L=100, B=10)
ship.load_simulation_result(time_list, u_list, v_list, r_list)
ship.δ = δ_list
ship.npm = npm_list
save_fig_path = os.path.join(str(tmpdir),"delta_psi.png")
fig = plt.figure()
plt.plot(time_list, list(map(lambda δ: δ * 180 / np.pi, ship.δ)))
plt.plot(time_list, list(map(lambda psi: psi * 180 / np.pi, ship.psi)))
fig.savefig(save_fig_path)
plt.close()
(
X_H_list,
X_R_list,
X_P_list,
Y_H_list,
Y_R_list,
N_H_list,
N_R_list,
U_list,
β_list,
v_dash_list,
r_dash_list,
β_P_list,
w_P_list,
J_list,
K_T_list,
β_R_list,
γ_R_list,
v_R_list,
u_R_list,
U_R_list,
α_R_list,
F_N_list,
) = get_sub_values_from_simulation_result(
ship.u,
ship.v,
ship.r,
ship.δ,
ship.npm,
basic_params,
maneuvering_params,
return_all_vals=True,
)
save_fig_path = os.path.join(str(tmpdir),"w_P.png")
fig = plt.figure()
plt.plot(time_list, w_P_list)
fig.savefig(save_fig_path)
plt.close()
save_fig_path = os.path.join(str(tmpdir),"J.png")
fig = plt.figure()
plt.plot(time_list, J_list)
fig.savefig(save_fig_path)
plt.close()
save_fig_path = os.path.join(str(tmpdir),"K_T.png")
fig = plt.figure()
plt.plot(time_list, K_T_list)
fig.savefig(save_fig_path)
plt.close()
save_fig_path = os.path.join(str(tmpdir),"U_R.png")
fig = plt.figure()
plt.plot(time_list, U_R_list)
fig.savefig(save_fig_path)
plt.close()
save_fig_path = os.path.join(str(tmpdir),"α_R.png")
fig = plt.figure()
plt.plot(time_list, α_R_list)
fig.savefig(save_fig_path)
plt.close()
save_fig_path = os.path.join(str(tmpdir),"F_N.png")
fig = plt.figure()
plt.plot(time_list, F_N_list)
fig.savefig(save_fig_path)
plt.close()
save_fig_path = os.path.join(str(tmpdir),"gamma_R.png")
fig = plt.figure()
plt.plot(time_list, γ_R_list)
fig.savefig(save_fig_path)
plt.close() | tests/test_mmg_3dof.py |
from shipmmg.mmg_3dof import (
Mmg3DofBasicParams,
Mmg3DofManeuveringParams,
simulate_mmg_3dof,
get_sub_values_from_simulation_result,
zigzag_test_mmg_3dof,
)
from shipmmg.ship_obj_3dof import ShipObj3dof
import numpy as np
import pytest
import os
import matplotlib.pyplot as plt
@pytest.fixture
def ship_KVLCC2_L7_model():
ρ = 1.025 # 海水密度
L_pp = 7.00 # 船長Lpp[m]
B = 1.27 # 船幅[m]
d = 0.46 # 喫水[m]
nabla = 3.27 # 排水量[m^3]
x_G = 0.25 # 重心位置[m]
# C_b = 0.810 # 方形係数[-]
D_p = 0.216 # プロペラ直径[m]
H_R = 0.345 # 舵高さ[m]
A_R = 0.0539 # 舵断面積[m^2]
t_P = 0.220 # 推力減少率
w_P0 = 0.40 # 有効伴流率
m_x_dash = 0.022 # 付加質量x(無次元)
m_y_dash = 0.223 # 付加質量y(無次元)
J_z_dash = 0.011 # 付加質量Izz(無次元)
t_R = 0.387 # 操縦抵抗減少率
a_H = 0.312 # 舵力増加係数
x_H_dash = -0.464 # 舵力増分作用位置
γ_R_minus = 0.395 # 整流係数
γ_R_plus = 0.640 # 整流係数
l_r_dash = -0.710 # 船長に対する舵位置
x_P_dash = -0.690 # 船長に対するプロペラ位置
ϵ = 1.09 # プロペラ・舵位置伴流係数比
κ = 0.50 # 修正係数
f_α = 2.747 # 直圧力勾配係数
basic_params = Mmg3DofBasicParams(
L_pp=L_pp, # 船長Lpp[m]
B=B, # 船幅[m]
d=d, # 喫水[m]
x_G=x_G, # 重心位置[]
D_p=D_p, # プロペラ直径[m]
m=ρ * nabla, # 質量(無次元化)[kg]
I_zG=ρ * nabla * ((0.25 * L_pp) ** 2), # 慣性モーメント[-]
A_R=A_R, # 船の断面に対する舵面積比[-]
η=D_p / H_R, # プロペラ直径に対する舵高さ(Dp/H)
m_x=(0.5 * ρ * (L_pp ** 2) * d) * m_x_dash, # 付加質量x(無次元)
m_y=(0.5 * ρ * (L_pp ** 2) * d) * m_y_dash, # 付加質量y(無次元)
J_z=(0.5 * ρ * (L_pp ** 4) * d) * J_z_dash, # 付加質量Izz(無次元)
f_α=f_α,
ϵ=ϵ, # プロペラ・舵位置伴流係数比
t_R=t_R, # 操縦抵抗減少率
a_H=a_H, # 舵力増加係数
x_H=x_H_dash * L_pp, # 舵力増分作用位置
γ_R_minus=γ_R_minus, # 整流係数
γ_R_plus=γ_R_plus, # 整流係数
l_R=l_r_dash, # 船長に対する舵位置
κ=κ, # 修正係数
t_P=t_P, # 推力減少率
w_P0=w_P0, # 有効伴流率
x_P=x_P_dash, # 船長に対するプロペラ位置
)
k_0 = 0.2931
k_1 = -0.2753
k_2 = -0.1385
R_0_dash = 0.022
X_vv_dash = -0.040
X_vr_dash = 0.002
X_rr_dash = 0.011
X_vvvv_dash = 0.771
Y_v_dash = -0.315
Y_r_dash = 0.083
Y_vvv_dash = -1.607
Y_vvr_dash = 0.379
Y_vrr_dash = -0.391
Y_rrr_dash = 0.008
N_v_dash = -0.137
N_r_dash = -0.049
N_vvv_dash = -0.030
N_vvr_dash = -0.294
N_vrr_dash = 0.055
N_rrr_dash = -0.013
maneuvering_params = Mmg3DofManeuveringParams(
k_0=k_0,
k_1=k_1,
k_2=k_2,
R_0_dash=R_0_dash,
X_vv_dash=X_vv_dash,
X_vr_dash=X_vr_dash,
X_rr_dash=X_rr_dash,
X_vvvv_dash=X_vvvv_dash,
Y_v_dash=Y_v_dash,
Y_r_dash=Y_r_dash,
Y_vvv_dash=Y_vvv_dash,
Y_vvr_dash=Y_vvr_dash,
Y_vrr_dash=Y_vrr_dash,
Y_rrr_dash=Y_rrr_dash,
N_v_dash=N_v_dash,
N_r_dash=N_r_dash,
N_vvv_dash=N_vvv_dash,
N_vvr_dash=N_vvr_dash,
N_vrr_dash=N_vrr_dash,
N_rrr_dash=N_rrr_dash,
)
return basic_params, maneuvering_params
@pytest.fixture
def kvlcc2_L7_35_turning(ship_KVLCC2_L7_model):
basic_params, maneuvering_params = ship_KVLCC2_L7_model
duration = 200 # [s]
# steering_rate = 1.76 * 4 # [°/s]
max_δ_rad = 35 * np.pi / 180.0 # [rad]
n_const = 17.95 # [rpm]
sampling = duration * 10
time_list = np.linspace(0.00, duration, sampling)
δ_rad_list = [0] * sampling
for i in range(sampling):
δ_rad_list[i] = max_δ_rad
npm_list = np.array([n_const for i in range(sampling)])
sol = simulate_mmg_3dof(
basic_params,
maneuvering_params,
time_list,
δ_rad_list,
npm_list,
u0=2.29 * 0.512,
v0=0.0,
r0=0.0,
)
sim_result = sol.sol(time_list)
ship = ShipObj3dof(L=basic_params.L_pp, B=basic_params.B)
ship.load_simulation_result(time_list, sim_result[0], sim_result[1], sim_result[2])
ship.npm = npm_list
ship.δ = δ_rad_list
return ship
def test_get_sub_values_from_simulation_result(
kvlcc2_L7_35_turning, ship_KVLCC2_L7_model, tmpdir
):
basic_params, maneuvering_params = ship_KVLCC2_L7_model
(
X_H_list,
X_R_list,
X_P_list,
Y_H_list,
Y_R_list,
N_H_list,
N_R_list,
) = get_sub_values_from_simulation_result(
kvlcc2_L7_35_turning.u,
kvlcc2_L7_35_turning.v,
kvlcc2_L7_35_turning.r,
kvlcc2_L7_35_turning.δ,
kvlcc2_L7_35_turning.npm,
basic_params,
maneuvering_params,
)
(
X_H_list,
X_R_list,
X_P_list,
Y_H_list,
Y_R_list,
N_H_list,
N_R_list,
U_list,
β_list,
v_dash_list,
r_dash_list,
β_P_list,
w_P_list,
J_list,
K_T_list,
β_R_list,
γ_R_list,
v_R_list,
u_R_list,
U_R_list,
α_R_list,
F_N_list,
) = get_sub_values_from_simulation_result(
kvlcc2_L7_35_turning.u,
kvlcc2_L7_35_turning.v,
kvlcc2_L7_35_turning.r,
kvlcc2_L7_35_turning.δ,
kvlcc2_L7_35_turning.npm,
basic_params,
maneuvering_params,
return_all_vals=True,
)
save_fig_path = os.path.join(str(tmpdir),"testFN.png")
fig = plt.figure()
plt.plot(kvlcc2_L7_35_turning.time, F_N_list)
fig.savefig(save_fig_path)
def test_Ship3DOF_drawing_function(kvlcc2_L7_35_turning,tmpdir):
"""Check drawing functions of Ship3DOF class by using MMG 3DOF simulation results."""
# Ship3DOF.draw_xy_trajectory()
save_fig_path = os.path.join(str(tmpdir),"trajectory.png")
kvlcc2_L7_35_turning.draw_xy_trajectory(dimensionless=True)
kvlcc2_L7_35_turning.draw_xy_trajectory(save_fig_path=save_fig_path)
# Ship3DOF.draw_chart()
save_fig_path = os.path.join(str(tmpdir),"param.png")
kvlcc2_L7_35_turning.draw_chart(
"time",
"u",
xlabel="time [sec]",
ylabel=r"$u$" + " [m/s]",
save_fig_path=save_fig_path,
)
x_index_list = ["time", "u", "v", "r", "x", "y", "psi"]
y_index_list = ["time", "u", "v", "r", "x", "y", "psi"]
for x_index in x_index_list:
for y_index in y_index_list:
kvlcc2_L7_35_turning.draw_chart(x_index, y_index)
with pytest.raises(Exception):
kvlcc2_L7_35_turning.draw_chart("time", "hogehoge")
with pytest.raises(Exception):
kvlcc2_L7_35_turning.draw_chart("hogehoge", "y")
# Ship3DOF.draw_gif()
save_fig_path = os.path.join(str(tmpdir),"test.gif")
kvlcc2_L7_35_turning.draw_gif(save_fig_path=save_fig_path)
kvlcc2_L7_35_turning.draw_gif(dimensionless=True, save_fig_path=save_fig_path)
def test_zigzag_test_mmg_before(ship_KVLCC2_L7_model,tmpdir):
basic_params, maneuvering_params = ship_KVLCC2_L7_model
target_δ_rad = 20.0 * np.pi / 180.0
target_ψ_rad_deviation = -20.0 * np.pi / 180.0
duration = 100
num_of_sampling = 10000
time_list = np.linspace(0.00, duration, num_of_sampling)
n_const = 17.95 # [rpm]
npm_list = np.array([n_const for i in range(num_of_sampling)])
δ_list, u_list, v_list, r_list = zigzag_test_mmg_3dof(
basic_params,
maneuvering_params,
target_δ_rad,
target_ψ_rad_deviation,
time_list,
npm_list,
δ_rad_rate=10.0 * np.pi / 180,
)
ship = ShipObj3dof(L=100, B=10)
ship.load_simulation_result(time_list, u_list, v_list, r_list)
ship.δ = δ_list
save_fig_path = os.path.join(str(tmpdir),"test_psi.png")
ship.draw_xy_trajectory(save_fig_path=save_fig_path)
ship.draw_chart(
"time",
"psi",
xlabel="time [sec]",
ylabel=r"$\psi$" + " [rad]",
save_fig_path=save_fig_path,
)
save_fig_path = os.path.join(str(tmpdir),"test_delta.png")
ship.draw_xy_trajectory(save_fig_path=save_fig_path)
ship.draw_chart(
"time",
"delta",
xlabel="time [sec]",
ylabel=r"$\delta$" + " [rad]",
save_fig_path=save_fig_path,
)
save_fig_path = os.path.join(str(tmpdir),"test_delta_psi.png")
ship.draw_multi_y_chart(
"time",
["delta", "psi"],
xlabel="time [sec]",
ylabel="[rad]",
save_fig_path=save_fig_path,
)
save_fig_path = os.path.join(str(tmpdir),"test_delta_psi.png")
ship.draw_multi_x_chart(
["delta", "psi"],
"time",
ylabel="time [sec]",
xlabel="[rad]",
save_fig_path=save_fig_path,
)
def test_zigzag_test_mmg(ship_KVLCC2_L7_model, tmpdir):
basic_params, maneuvering_params = ship_KVLCC2_L7_model
target_δ_rad = 20.0 * np.pi / 180.0
target_ψ_rad_deviation = 20.0 * np.pi / 180.0
duration = 80
num_of_sampling = 10000
time_list = np.linspace(0.00, duration, num_of_sampling)
n_const = 17.95 # [rpm]
npm_list = np.array([n_const for i in range(num_of_sampling)])
δ_list, u_list, v_list, r_list = zigzag_test_mmg_3dof(
basic_params,
maneuvering_params,
target_δ_rad,
target_ψ_rad_deviation,
time_list,
npm_list,
δ_rad_rate=15.0 * np.pi / 180,
)
ship = ShipObj3dof(L=100, B=10)
ship.load_simulation_result(time_list, u_list, v_list, r_list)
ship.δ = δ_list
ship.npm = npm_list
save_fig_path = os.path.join(str(tmpdir),"delta_psi.png")
fig = plt.figure()
plt.plot(time_list, list(map(lambda δ: δ * 180 / np.pi, ship.δ)))
plt.plot(time_list, list(map(lambda psi: psi * 180 / np.pi, ship.psi)))
fig.savefig(save_fig_path)
plt.close()
(
X_H_list,
X_R_list,
X_P_list,
Y_H_list,
Y_R_list,
N_H_list,
N_R_list,
U_list,
β_list,
v_dash_list,
r_dash_list,
β_P_list,
w_P_list,
J_list,
K_T_list,
β_R_list,
γ_R_list,
v_R_list,
u_R_list,
U_R_list,
α_R_list,
F_N_list,
) = get_sub_values_from_simulation_result(
ship.u,
ship.v,
ship.r,
ship.δ,
ship.npm,
basic_params,
maneuvering_params,
return_all_vals=True,
)
save_fig_path = os.path.join(str(tmpdir),"w_P.png")
fig = plt.figure()
plt.plot(time_list, w_P_list)
fig.savefig(save_fig_path)
plt.close()
save_fig_path = os.path.join(str(tmpdir),"J.png")
fig = plt.figure()
plt.plot(time_list, J_list)
fig.savefig(save_fig_path)
plt.close()
save_fig_path = os.path.join(str(tmpdir),"K_T.png")
fig = plt.figure()
plt.plot(time_list, K_T_list)
fig.savefig(save_fig_path)
plt.close()
save_fig_path = os.path.join(str(tmpdir),"U_R.png")
fig = plt.figure()
plt.plot(time_list, U_R_list)
fig.savefig(save_fig_path)
plt.close()
save_fig_path = os.path.join(str(tmpdir),"α_R.png")
fig = plt.figure()
plt.plot(time_list, α_R_list)
fig.savefig(save_fig_path)
plt.close()
save_fig_path = os.path.join(str(tmpdir),"F_N.png")
fig = plt.figure()
plt.plot(time_list, F_N_list)
fig.savefig(save_fig_path)
plt.close()
save_fig_path = os.path.join(str(tmpdir),"gamma_R.png")
fig = plt.figure()
plt.plot(time_list, γ_R_list)
fig.savefig(save_fig_path)
plt.close() | 0.315314 | 0.32748 |
import logging
import hashlib
from xml.sax import SAXParseException
from collections import defaultdict
from rdflib import URIRef, ConjunctiveGraph, util as rdflib_util
from rdflib.namespace import DC, RDF, OWL
from dipper.utils.CurieUtil import CurieUtil
__author__ = 'nlw'
LOG = logging.getLogger(__name__)
class GraphUtils:
def __init__(self, curie_map):
self.curie_map = curie_map
self.cu = CurieUtil(curie_map)
return
@staticmethod
def write(graph, fileformat=None, filename=None):
"""
A basic graph writer (to stdout) for any of the sources.
this will write raw triples in rdfxml, unless specified.
to write turtle, specify format='turtle'
an optional file can be supplied instead of stdout
:return: None
"""
filewriter = None
if fileformat is None:
fileformat = 'turtle'
if filename is not None:
with open(filename, 'wb') as filewriter:
LOG.info("Writing triples in %s to %s", fileformat, filename)
# rdflib serialize
graph.serialize(filewriter, format=fileformat)
else:
print(graph.serialize(fileformat).decode())
return
@staticmethod
def get_properties_from_graph(graph):
"""
Wrapper for RDFLib.graph.predicates() that returns a unique set
:param graph: RDFLib.graph
:return: set, set of properties
"""
# collapse to single list
property_set = list()
for row in graph.predicates():
property_set.append(row)
return set(property_set)
@staticmethod
def add_property_axioms(graph, properties):
ontology_graph = ConjunctiveGraph()
GH = 'https://raw.githubusercontent.com'
OBO = 'http://purl.obolibrary.org/obo'
ontologies = [
OBO + '/sepio.owl',
OBO + '/geno.owl',
OBO + '/iao.owl',
OBO + '/ero.owl',
OBO + '/pco.owl',
OBO + '/xco.owl',
OBO + '/ro.owl',
GH + '/jamesmalone/OBAN/master/ontology/oban_core.ttl',
]
# random timeouts can waste hours. (too many redirects?)
# there is a timeout param in urllib.request,
# but it is not exposed by rdflib.parsing
# so retry once on URLError
for ontology in ontologies:
LOG.info("parsing: " + ontology)
try:
ontology_graph.parse(
ontology, format=rdflib_util.guess_format(ontology))
except SAXParseException as e:
LOG.error(e)
LOG.error('Retrying as turtle: ' + ontology)
ontology_graph.parse(ontology, format="turtle")
except OSError as e: # URLError:
# simple retry
LOG.error(e)
LOG.error('Retrying: ' + ontology)
ontology_graph.parse(
ontology, format=rdflib_util.guess_format(ontology))
# Get object properties
graph = GraphUtils.add_property_to_graph(
ontology_graph.subjects(RDF['type'], OWL['ObjectProperty']),
graph, OWL['ObjectProperty'], properties)
# Get annotation properties
graph = GraphUtils.add_property_to_graph(
ontology_graph.subjects(RDF['type'], OWL['AnnotationProperty']),
graph, OWL['AnnotationProperty'], properties)
# Get data properties
graph = GraphUtils.add_property_to_graph(
ontology_graph.subjects(RDF['type'], OWL['DatatypeProperty']),
graph, OWL['DatatypeProperty'], properties)
for row in graph.predicates(DC['source'], OWL['AnnotationProperty']):
if row == RDF['type']:
graph.remove(
(DC['source'], RDF['type'], OWL['AnnotationProperty']))
graph.add((DC['source'], RDF['type'], OWL['ObjectProperty']))
# Hardcoded properties
graph.add((
URIRef('https://monarchinitiative.org/MONARCH_cliqueLeader'), RDF['type'],
OWL['AnnotationProperty']))
graph.add((
URIRef('https://monarchinitiative.org/MONARCH_anonymous'), RDF['type'],
OWL['AnnotationProperty']))
return graph
@staticmethod
def add_property_to_graph(results, graph, property_type, property_list):
for row in results:
if row in property_list:
graph.add((row, RDF['type'], property_type))
return graph
@staticmethod
def digest_id(wordage): # same as source/Source.hash_id(wordage)
'''
Form a deterministic digest of input
Leading 'b' is an experiment forcing the first char to be non numeric
but valid hex
Not required for RDF but some other contexts do not want the leading
char to be a digit
: param str wordage arbitrary string
: return str
'''
return 'b' + hashlib.sha1(wordage.encode('utf-8')).hexdigest()[1:20]
@staticmethod
def compare_graph_predicates(graph1, graph2):
'''
From rdf graphs, count predicates in each and return a list of
: param graph1 graph, hopefully RDFlib-like
: param graph2 graph, ditto
: return dict with count of predicates in each graph:
: e.g.:
: {
: "has_a_property": {
: "graph1": 1234,
: "graph2": 1023},
: "has_another_property": {
: "graph1": 94,
: "graph2": 51}
: }
'''
# dict of dicts that acts sensibly when a key that doesn't
# exist is accessed
counts = defaultdict(lambda: defaultdict(int))
for this_g in [graph1, graph2]:
for this_p in this_g.predicates():
counts[this_p][str(this_g.identifier)] = \
counts[this_p][str(this_g.identifier)] + 1
return counts
@staticmethod
def count_predicates(graph):
'''
From rdf graphs, count predicates in each and return a list of
: param graph
: return dict with count of predicates in each graph:
: e.g.:
: {
: "has_a_property": 1234,
: "has_another_property": 482
: }
'''
# dict of dicts that acts sensibly when a key that doesn't
# exist is accessed
counts = defaultdict(int)
for this_p in graph.predicates():
counts[this_p] = counts[this_p] + 1
return counts | dipper/utils/GraphUtils.py | import logging
import hashlib
from xml.sax import SAXParseException
from collections import defaultdict
from rdflib import URIRef, ConjunctiveGraph, util as rdflib_util
from rdflib.namespace import DC, RDF, OWL
from dipper.utils.CurieUtil import CurieUtil
__author__ = 'nlw'
LOG = logging.getLogger(__name__)
class GraphUtils:
def __init__(self, curie_map):
self.curie_map = curie_map
self.cu = CurieUtil(curie_map)
return
@staticmethod
def write(graph, fileformat=None, filename=None):
"""
A basic graph writer (to stdout) for any of the sources.
this will write raw triples in rdfxml, unless specified.
to write turtle, specify format='turtle'
an optional file can be supplied instead of stdout
:return: None
"""
filewriter = None
if fileformat is None:
fileformat = 'turtle'
if filename is not None:
with open(filename, 'wb') as filewriter:
LOG.info("Writing triples in %s to %s", fileformat, filename)
# rdflib serialize
graph.serialize(filewriter, format=fileformat)
else:
print(graph.serialize(fileformat).decode())
return
@staticmethod
def get_properties_from_graph(graph):
"""
Wrapper for RDFLib.graph.predicates() that returns a unique set
:param graph: RDFLib.graph
:return: set, set of properties
"""
# collapse to single list
property_set = list()
for row in graph.predicates():
property_set.append(row)
return set(property_set)
@staticmethod
def add_property_axioms(graph, properties):
ontology_graph = ConjunctiveGraph()
GH = 'https://raw.githubusercontent.com'
OBO = 'http://purl.obolibrary.org/obo'
ontologies = [
OBO + '/sepio.owl',
OBO + '/geno.owl',
OBO + '/iao.owl',
OBO + '/ero.owl',
OBO + '/pco.owl',
OBO + '/xco.owl',
OBO + '/ro.owl',
GH + '/jamesmalone/OBAN/master/ontology/oban_core.ttl',
]
# random timeouts can waste hours. (too many redirects?)
# there is a timeout param in urllib.request,
# but it is not exposed by rdflib.parsing
# so retry once on URLError
for ontology in ontologies:
LOG.info("parsing: " + ontology)
try:
ontology_graph.parse(
ontology, format=rdflib_util.guess_format(ontology))
except SAXParseException as e:
LOG.error(e)
LOG.error('Retrying as turtle: ' + ontology)
ontology_graph.parse(ontology, format="turtle")
except OSError as e: # URLError:
# simple retry
LOG.error(e)
LOG.error('Retrying: ' + ontology)
ontology_graph.parse(
ontology, format=rdflib_util.guess_format(ontology))
# Get object properties
graph = GraphUtils.add_property_to_graph(
ontology_graph.subjects(RDF['type'], OWL['ObjectProperty']),
graph, OWL['ObjectProperty'], properties)
# Get annotation properties
graph = GraphUtils.add_property_to_graph(
ontology_graph.subjects(RDF['type'], OWL['AnnotationProperty']),
graph, OWL['AnnotationProperty'], properties)
# Get data properties
graph = GraphUtils.add_property_to_graph(
ontology_graph.subjects(RDF['type'], OWL['DatatypeProperty']),
graph, OWL['DatatypeProperty'], properties)
for row in graph.predicates(DC['source'], OWL['AnnotationProperty']):
if row == RDF['type']:
graph.remove(
(DC['source'], RDF['type'], OWL['AnnotationProperty']))
graph.add((DC['source'], RDF['type'], OWL['ObjectProperty']))
# Hardcoded properties
graph.add((
URIRef('https://monarchinitiative.org/MONARCH_cliqueLeader'), RDF['type'],
OWL['AnnotationProperty']))
graph.add((
URIRef('https://monarchinitiative.org/MONARCH_anonymous'), RDF['type'],
OWL['AnnotationProperty']))
return graph
@staticmethod
def add_property_to_graph(results, graph, property_type, property_list):
for row in results:
if row in property_list:
graph.add((row, RDF['type'], property_type))
return graph
@staticmethod
def digest_id(wordage): # same as source/Source.hash_id(wordage)
'''
Form a deterministic digest of input
Leading 'b' is an experiment forcing the first char to be non numeric
but valid hex
Not required for RDF but some other contexts do not want the leading
char to be a digit
: param str wordage arbitrary string
: return str
'''
return 'b' + hashlib.sha1(wordage.encode('utf-8')).hexdigest()[1:20]
@staticmethod
def compare_graph_predicates(graph1, graph2):
'''
From rdf graphs, count predicates in each and return a list of
: param graph1 graph, hopefully RDFlib-like
: param graph2 graph, ditto
: return dict with count of predicates in each graph:
: e.g.:
: {
: "has_a_property": {
: "graph1": 1234,
: "graph2": 1023},
: "has_another_property": {
: "graph1": 94,
: "graph2": 51}
: }
'''
# dict of dicts that acts sensibly when a key that doesn't
# exist is accessed
counts = defaultdict(lambda: defaultdict(int))
for this_g in [graph1, graph2]:
for this_p in this_g.predicates():
counts[this_p][str(this_g.identifier)] = \
counts[this_p][str(this_g.identifier)] + 1
return counts
@staticmethod
def count_predicates(graph):
'''
From rdf graphs, count predicates in each and return a list of
: param graph
: return dict with count of predicates in each graph:
: e.g.:
: {
: "has_a_property": 1234,
: "has_another_property": 482
: }
'''
# dict of dicts that acts sensibly when a key that doesn't
# exist is accessed
counts = defaultdict(int)
for this_p in graph.predicates():
counts[this_p] = counts[this_p] + 1
return counts | 0.666062 | 0.175998 |
import logging
import random
from ipaddress import ip_address
import ptf
from scapy.all import IP, Ether
import ptf.packet as scapy
from ptf.base_tests import BaseTest
from ptf.mask import Mask
from ptf.testutils import *
# packet count for verifying traffic is forwarded via IPinIP tunnel
PACKET_NUM = 10000
# packet count for verifying traffic is not forwarded from standby tor to server directly
PACKET_NUM_FOR_NEGATIVE_CHECK = 100
DIFF = 0.25 # The valid range for balance check
SRC_IP_RANGE = [unicode('172.16.31.10'), unicode('172.16.31.10')]
TIMEOUT = 1
class IpinIPTunnelTest(BaseTest):
'''
@summary: Overview of functionality
This script send traffic to standby ToR, and capture traffic
on all portchannel interfaces to check balance.
'''
def __init__(self):
'''
@summary: constructor
'''
BaseTest.__init__(self)
self.test_params = test_params_get()
self.logger = logging.getLogger("IPinIPTunnel")
def setUp(self):
self.server_ip = self.test_params['server_ip']
self.server_port = int(self.test_params['server_port'])
self.vlan_mac = self.test_params['vlan_mac']
self.standby_tor_mac = self.test_params['standby_tor_mac']
self.active_tor_ip = self.test_params['active_tor_ip']
self.standby_tor_ip = self.test_params['standby_tor_ip']
self.ptf_portchannel_indices = self.test_params['ptf_portchannel_indices']
self.indice_to_portchannel = {}
for port_channel, indices in self.ptf_portchannel_indices.items():
for indice in indices:
self.indice_to_portchannel[indice] = port_channel
self.hash_key_list = self.test_params['hash_key_list']
self.dataplane = ptf.dataplane_instance
def runTest(self):
"""
Entrypoint of test script.
"""
self.send_and_verify_packets()
def random_ip(self, begin, end):
"""
Generate a random IP from given ip range
"""
length = int(ip_address(end)) - int(ip_address(begin))
return str(ip_address(begin) + random.randint(0, length))
def generate_packet_to_server(self, hash_key):
"""
Generate a packet to server. The value of field in packet is filled with random value according to hash_key
"""
base_src_mac = self.dataplane.get_mac(0, 0)
ip_src = self.random_ip(SRC_IP_RANGE[0], SRC_IP_RANGE[1]) if hash_key == 'src-ip' else SRC_IP_RANGE[0]
ip_dst = self.server_ip
sport = random.randint(1, 65535) if hash_key == 'src-port' else 1234
dport = random.randint(1, 65535) if hash_key == 'dst-port' else 80
src_mac = (base_src_mac[:-5] + "%02x" % random.randint(0, 255) + ":" + "%02x" % random.randint(0, 255)) if hash_key == 'src-mac' else base_src_mac
dst_mac = self.standby_tor_mac
vlan_id = random.randint(1, 4094) if hash_key == 'vlan-id' else 0
pkt = simple_tcp_packet(pktlen=128 if vlan_id == 0 else 132,
eth_dst=dst_mac,
eth_src=src_mac,
dl_vlan_enable=False if vlan_id == 0 else True,
vlan_vid=vlan_id,
vlan_pcp=0,
ip_src=ip_src,
ip_dst=ip_dst,
tcp_sport=sport,
tcp_dport=dport,
ip_ttl=64)
return pkt
def generate_expected_packet(self, inner_pkt):
"""
Generate ip_in_ip packet for verifying.
"""
inner_pkt = inner_pkt.copy()
inner_pkt.ttl = inner_pkt.ttl - 1
pkt = scapy.Ether(dst="aa:aa:aa:aa:aa:aa", src=self.standby_tor_mac) / \
scapy.IP(src=self.standby_tor_ip, dst=self.active_tor_ip) / inner_pkt[IP]
exp_pkt = Mask(pkt)
exp_pkt.set_do_not_care_scapy(scapy.Ether, 'dst')
exp_pkt.set_do_not_care_scapy(scapy.IP, "ihl")
exp_pkt.set_do_not_care_scapy(scapy.IP, "tos")
exp_pkt.set_do_not_care_scapy(scapy.IP, "len")
exp_pkt.set_do_not_care_scapy(scapy.IP, "id")
exp_pkt.set_do_not_care_scapy(scapy.IP, "flags")
exp_pkt.set_do_not_care_scapy(scapy.IP, "frag")
exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl")
exp_pkt.set_do_not_care_scapy(scapy.IP, "proto")
exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum")
exp_pkt.set_do_not_care_scapy(scapy.TCP, "sport")
exp_pkt.set_do_not_care_scapy(scapy.TCP, "seq")
exp_pkt.set_do_not_care_scapy(scapy.TCP, "ack")
exp_pkt.set_do_not_care_scapy(scapy.TCP, "reserved")
exp_pkt.set_do_not_care_scapy(scapy.TCP, "dataofs")
exp_pkt.set_do_not_care_scapy(scapy.TCP, "window")
exp_pkt.set_do_not_care_scapy(scapy.TCP, "chksum")
exp_pkt.set_do_not_care_scapy(scapy.TCP, "urgptr")
exp_pkt.set_ignore_extra_bytes()
return exp_pkt
def generate_unexpected_packet(self, inner_pkt):
"""
Generate a packet that shouldn't be observed.
All packet should be forward via tunnel, so no packet should be observed on server port
"""
pkt = inner_pkt.copy()
pkt[Ether].src = self.vlan_mac
# TTL of packets from active tor to server is decreased by 1
pkt[IP].ttl -= 1
unexpected_packet = Mask(pkt)
# Ignore dst mac
unexpected_packet.set_do_not_care_scapy(scapy.Ether, 'dst')
# Ignore check sum
unexpected_packet.set_do_not_care_scapy(scapy.IP, "chksum")
#Ignore extra bytes
unexpected_packet.set_ignore_extra_bytes()
return unexpected_packet
def check_balance(self, pkt_distribution, hash_key):
portchannel_num = len(self.ptf_portchannel_indices)
expect_packet_num = PACKET_NUM / portchannel_num
pkt_num_lo = expect_packet_num * (1.0 - DIFF)
pkt_num_hi = expect_packet_num * (1.0 + DIFF)
self.logger.info("hash key = {}".format(hash_key))
self.logger.info("%-10s \t %10s \t %10s \t" % ("port(s)", "exp_cnt", "act_cnt"))
balance = True
for portchannel, count in pkt_distribution.items():
self.logger.info("%-10s \t %10s \t %10s \t" % (portchannel, str(expect_packet_num), str(count)))
if count < pkt_num_lo or count > pkt_num_hi:
balance = False
if not balance:
print("Check balance failed for {}".format(hash_key))
assert(balance)
def send_and_verify_packets(self):
"""
Send packet from ptf (T1) to standby ToR, and verify
"""
dst_ports = self.indice_to_portchannel.keys()
# Select the first ptf indice as src port
src_port = dst_ports[0]
# Step 1. verify no packet is received from standby_tor to server
for i in range(0, PACKET_NUM_FOR_NEGATIVE_CHECK):
inner_pkt = self.generate_packet_to_server('src-ip')
unexpected_packet = self.generate_unexpected_packet(inner_pkt)
send_packet(self, src_port, inner_pkt)
verify_no_packet(test=self,
port_id=self.server_port,
pkt=unexpected_packet,
timeout=TIMEOUT)
# Step 2. verify packet is received from IPinIP tunnel and check balance
for hash_key in self.hash_key_list:
self.logger.info("Verifying traffic balance for hash key {}".format(hash_key))
pkt_distribution = {}
for i in range(0, PACKET_NUM):
inner_pkt = self.generate_packet_to_server(hash_key)
tunnel_pkt = self.generate_expected_packet(inner_pkt)
self.logger.info("Sending packet dst_mac = {} src_mac = {} dst_ip = {} src_ip = {} from port {}" \
.format(inner_pkt[Ether].dst, inner_pkt[Ether].src, inner_pkt[IP].dst, inner_pkt[IP].src, src_port))
send_packet(self, src_port, inner_pkt)
# Verify packet is received from IPinIP tunnel
idx, count = verify_packet_any_port(test=self,
pkt=tunnel_pkt,
ports=dst_ports,
device_number=0,
timeout=TIMEOUT)
pkt_distribution[self.indice_to_portchannel[dst_ports[idx]]] = pkt_distribution.get(self.indice_to_portchannel[dst_ports[idx]], 0) + 1
self.check_balance(pkt_distribution, hash_key) | ansible/roles/test/files/ptftests/ip_in_ip_tunnel_test.py | import logging
import random
from ipaddress import ip_address
import ptf
from scapy.all import IP, Ether
import ptf.packet as scapy
from ptf.base_tests import BaseTest
from ptf.mask import Mask
from ptf.testutils import *
# packet count for verifying traffic is forwarded via IPinIP tunnel
PACKET_NUM = 10000
# packet count for verifying traffic is not forwarded from standby tor to server directly
PACKET_NUM_FOR_NEGATIVE_CHECK = 100
DIFF = 0.25 # The valid range for balance check
SRC_IP_RANGE = [unicode('172.16.31.10'), unicode('172.16.31.10')]
TIMEOUT = 1
class IpinIPTunnelTest(BaseTest):
'''
@summary: Overview of functionality
This script send traffic to standby ToR, and capture traffic
on all portchannel interfaces to check balance.
'''
def __init__(self):
'''
@summary: constructor
'''
BaseTest.__init__(self)
self.test_params = test_params_get()
self.logger = logging.getLogger("IPinIPTunnel")
def setUp(self):
self.server_ip = self.test_params['server_ip']
self.server_port = int(self.test_params['server_port'])
self.vlan_mac = self.test_params['vlan_mac']
self.standby_tor_mac = self.test_params['standby_tor_mac']
self.active_tor_ip = self.test_params['active_tor_ip']
self.standby_tor_ip = self.test_params['standby_tor_ip']
self.ptf_portchannel_indices = self.test_params['ptf_portchannel_indices']
self.indice_to_portchannel = {}
for port_channel, indices in self.ptf_portchannel_indices.items():
for indice in indices:
self.indice_to_portchannel[indice] = port_channel
self.hash_key_list = self.test_params['hash_key_list']
self.dataplane = ptf.dataplane_instance
def runTest(self):
"""
Entrypoint of test script.
"""
self.send_and_verify_packets()
def random_ip(self, begin, end):
"""
Generate a random IP from given ip range
"""
length = int(ip_address(end)) - int(ip_address(begin))
return str(ip_address(begin) + random.randint(0, length))
def generate_packet_to_server(self, hash_key):
"""
Generate a packet to server. The value of field in packet is filled with random value according to hash_key
"""
base_src_mac = self.dataplane.get_mac(0, 0)
ip_src = self.random_ip(SRC_IP_RANGE[0], SRC_IP_RANGE[1]) if hash_key == 'src-ip' else SRC_IP_RANGE[0]
ip_dst = self.server_ip
sport = random.randint(1, 65535) if hash_key == 'src-port' else 1234
dport = random.randint(1, 65535) if hash_key == 'dst-port' else 80
src_mac = (base_src_mac[:-5] + "%02x" % random.randint(0, 255) + ":" + "%02x" % random.randint(0, 255)) if hash_key == 'src-mac' else base_src_mac
dst_mac = self.standby_tor_mac
vlan_id = random.randint(1, 4094) if hash_key == 'vlan-id' else 0
pkt = simple_tcp_packet(pktlen=128 if vlan_id == 0 else 132,
eth_dst=dst_mac,
eth_src=src_mac,
dl_vlan_enable=False if vlan_id == 0 else True,
vlan_vid=vlan_id,
vlan_pcp=0,
ip_src=ip_src,
ip_dst=ip_dst,
tcp_sport=sport,
tcp_dport=dport,
ip_ttl=64)
return pkt
def generate_expected_packet(self, inner_pkt):
"""
Generate ip_in_ip packet for verifying.
"""
inner_pkt = inner_pkt.copy()
inner_pkt.ttl = inner_pkt.ttl - 1
pkt = scapy.Ether(dst="aa:aa:aa:aa:aa:aa", src=self.standby_tor_mac) / \
scapy.IP(src=self.standby_tor_ip, dst=self.active_tor_ip) / inner_pkt[IP]
exp_pkt = Mask(pkt)
exp_pkt.set_do_not_care_scapy(scapy.Ether, 'dst')
exp_pkt.set_do_not_care_scapy(scapy.IP, "ihl")
exp_pkt.set_do_not_care_scapy(scapy.IP, "tos")
exp_pkt.set_do_not_care_scapy(scapy.IP, "len")
exp_pkt.set_do_not_care_scapy(scapy.IP, "id")
exp_pkt.set_do_not_care_scapy(scapy.IP, "flags")
exp_pkt.set_do_not_care_scapy(scapy.IP, "frag")
exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl")
exp_pkt.set_do_not_care_scapy(scapy.IP, "proto")
exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum")
exp_pkt.set_do_not_care_scapy(scapy.TCP, "sport")
exp_pkt.set_do_not_care_scapy(scapy.TCP, "seq")
exp_pkt.set_do_not_care_scapy(scapy.TCP, "ack")
exp_pkt.set_do_not_care_scapy(scapy.TCP, "reserved")
exp_pkt.set_do_not_care_scapy(scapy.TCP, "dataofs")
exp_pkt.set_do_not_care_scapy(scapy.TCP, "window")
exp_pkt.set_do_not_care_scapy(scapy.TCP, "chksum")
exp_pkt.set_do_not_care_scapy(scapy.TCP, "urgptr")
exp_pkt.set_ignore_extra_bytes()
return exp_pkt
def generate_unexpected_packet(self, inner_pkt):
"""
Generate a packet that shouldn't be observed.
All packet should be forward via tunnel, so no packet should be observed on server port
"""
pkt = inner_pkt.copy()
pkt[Ether].src = self.vlan_mac
# TTL of packets from active tor to server is decreased by 1
pkt[IP].ttl -= 1
unexpected_packet = Mask(pkt)
# Ignore dst mac
unexpected_packet.set_do_not_care_scapy(scapy.Ether, 'dst')
# Ignore check sum
unexpected_packet.set_do_not_care_scapy(scapy.IP, "chksum")
#Ignore extra bytes
unexpected_packet.set_ignore_extra_bytes()
return unexpected_packet
def check_balance(self, pkt_distribution, hash_key):
portchannel_num = len(self.ptf_portchannel_indices)
expect_packet_num = PACKET_NUM / portchannel_num
pkt_num_lo = expect_packet_num * (1.0 - DIFF)
pkt_num_hi = expect_packet_num * (1.0 + DIFF)
self.logger.info("hash key = {}".format(hash_key))
self.logger.info("%-10s \t %10s \t %10s \t" % ("port(s)", "exp_cnt", "act_cnt"))
balance = True
for portchannel, count in pkt_distribution.items():
self.logger.info("%-10s \t %10s \t %10s \t" % (portchannel, str(expect_packet_num), str(count)))
if count < pkt_num_lo or count > pkt_num_hi:
balance = False
if not balance:
print("Check balance failed for {}".format(hash_key))
assert(balance)
def send_and_verify_packets(self):
"""
Send packet from ptf (T1) to standby ToR, and verify
"""
dst_ports = self.indice_to_portchannel.keys()
# Select the first ptf indice as src port
src_port = dst_ports[0]
# Step 1. verify no packet is received from standby_tor to server
for i in range(0, PACKET_NUM_FOR_NEGATIVE_CHECK):
inner_pkt = self.generate_packet_to_server('src-ip')
unexpected_packet = self.generate_unexpected_packet(inner_pkt)
send_packet(self, src_port, inner_pkt)
verify_no_packet(test=self,
port_id=self.server_port,
pkt=unexpected_packet,
timeout=TIMEOUT)
# Step 2. verify packet is received from IPinIP tunnel and check balance
for hash_key in self.hash_key_list:
self.logger.info("Verifying traffic balance for hash key {}".format(hash_key))
pkt_distribution = {}
for i in range(0, PACKET_NUM):
inner_pkt = self.generate_packet_to_server(hash_key)
tunnel_pkt = self.generate_expected_packet(inner_pkt)
self.logger.info("Sending packet dst_mac = {} src_mac = {} dst_ip = {} src_ip = {} from port {}" \
.format(inner_pkt[Ether].dst, inner_pkt[Ether].src, inner_pkt[IP].dst, inner_pkt[IP].src, src_port))
send_packet(self, src_port, inner_pkt)
# Verify packet is received from IPinIP tunnel
idx, count = verify_packet_any_port(test=self,
pkt=tunnel_pkt,
ports=dst_ports,
device_number=0,
timeout=TIMEOUT)
pkt_distribution[self.indice_to_portchannel[dst_ports[idx]]] = pkt_distribution.get(self.indice_to_portchannel[dst_ports[idx]], 0) + 1
self.check_balance(pkt_distribution, hash_key) | 0.610337 | 0.155655 |
import os
TRAIN_SPLIT_FILE = os.path.join('data','classification_data','training_split.csv')
TEST_SPLIT_FILE = os.path.join('data','classification_data','test_split.csv')
VAL_SPLIT_FILE = os.path.join('data','classification_data','dev_split.csv')
D_ND_DIR = os.path.join('data','disc_nondisc')
POS_NEG_DIR = os.path.join('data','pos_neg')
SEL_FEAT_TRAIN_REGULAR_CLASSIFY = os.path.join('data','selected_features','regular','classify','train')
SEL_FEAT_TEST_REGULAR_CLASSIFY = os.path.join('data','selected_features','regular','classify','test')
SEL_FEAT_VAL_REGULAR_CLASSIFY = os.path.join('data','selected_features','regular','classify','val')
SEL_FEAT_TRAIN_REGULAR_ESTIMATE = os.path.join('data','selected_features','regular','estimate','train')
SEL_FEAT_VAL_REGULAR_ESTIMATE = os.path.join('data','selected_features','regular','estimate','val')
SEL_FEAT_TEST_REGULAR_ESTIMATE = os.path.join('data','selected_features','regular','estimate','test')
ALL_FEAT_TRAIN_REGULAR_CLASSIFY = os.path.join('data','all_features','regular','classify','train')
ALL_FEAT_TEST_REGULAR_CLASSIFY = os.path.join('data','all_features','regular','classify','test')
ALL_FEAT_VAL_REGULAR_CLASSIFY = os.path.join('data','all_features','regular','classify','val')
ALL_FEAT_TRAIN_REGULAR_ESTIMATE = os.path.join('data','all_features','regular','estimate','train')
ALL_FEAT_VAL_REGULAR_ESTIMATE = os.path.join('data','all_features','regular','estimate','val')
ALL_FEAT_TEST_REGULAR_ESTIMATE = os.path.join('data','all_features','regular','estimate','test')
SEL_FEAT_TRAIN_NORMALIZED_CLASSIFY = os.path.join('data','selected_features','normalize','classify','train')
SEL_FEAT_VAL_NORMALIZED_CLASSIFY = os.path.join('data','selected_features','normalize','classify','val')
SEL_FEAT_TRAIN_NORMALIZED_ESTIMATE = os.path.join('data','selected_features','normalize','estimate','train')
SEL_FEAT_VAL_NORMALIZED_ESTIMATE = os.path.join('data','selected_features','normalize','estimate','val')
RESULTS_CLASSIFY = os.path.join('results','grid_search','classification')
RESULTS_ESTIMATE = os.path.join('results','grid_search','regression')
SEL_FEAT = os.path.join('data','selected_features')
ALL_FEAT = os.path.join('data','all_features')
ANOVA_DIR = os.path.join('results','anova')
GRID_SEARCH_CLF_DIR = os.path.join('results','grid_search','classification')
GRID_SEARCH_REG_DIR = os.path.join('results','grid_search','regression') | src/main/config.py | import os
TRAIN_SPLIT_FILE = os.path.join('data','classification_data','training_split.csv')
TEST_SPLIT_FILE = os.path.join('data','classification_data','test_split.csv')
VAL_SPLIT_FILE = os.path.join('data','classification_data','dev_split.csv')
D_ND_DIR = os.path.join('data','disc_nondisc')
POS_NEG_DIR = os.path.join('data','pos_neg')
SEL_FEAT_TRAIN_REGULAR_CLASSIFY = os.path.join('data','selected_features','regular','classify','train')
SEL_FEAT_TEST_REGULAR_CLASSIFY = os.path.join('data','selected_features','regular','classify','test')
SEL_FEAT_VAL_REGULAR_CLASSIFY = os.path.join('data','selected_features','regular','classify','val')
SEL_FEAT_TRAIN_REGULAR_ESTIMATE = os.path.join('data','selected_features','regular','estimate','train')
SEL_FEAT_VAL_REGULAR_ESTIMATE = os.path.join('data','selected_features','regular','estimate','val')
SEL_FEAT_TEST_REGULAR_ESTIMATE = os.path.join('data','selected_features','regular','estimate','test')
ALL_FEAT_TRAIN_REGULAR_CLASSIFY = os.path.join('data','all_features','regular','classify','train')
ALL_FEAT_TEST_REGULAR_CLASSIFY = os.path.join('data','all_features','regular','classify','test')
ALL_FEAT_VAL_REGULAR_CLASSIFY = os.path.join('data','all_features','regular','classify','val')
ALL_FEAT_TRAIN_REGULAR_ESTIMATE = os.path.join('data','all_features','regular','estimate','train')
ALL_FEAT_VAL_REGULAR_ESTIMATE = os.path.join('data','all_features','regular','estimate','val')
ALL_FEAT_TEST_REGULAR_ESTIMATE = os.path.join('data','all_features','regular','estimate','test')
SEL_FEAT_TRAIN_NORMALIZED_CLASSIFY = os.path.join('data','selected_features','normalize','classify','train')
SEL_FEAT_VAL_NORMALIZED_CLASSIFY = os.path.join('data','selected_features','normalize','classify','val')
SEL_FEAT_TRAIN_NORMALIZED_ESTIMATE = os.path.join('data','selected_features','normalize','estimate','train')
SEL_FEAT_VAL_NORMALIZED_ESTIMATE = os.path.join('data','selected_features','normalize','estimate','val')
RESULTS_CLASSIFY = os.path.join('results','grid_search','classification')
RESULTS_ESTIMATE = os.path.join('results','grid_search','regression')
SEL_FEAT = os.path.join('data','selected_features')
ALL_FEAT = os.path.join('data','all_features')
ANOVA_DIR = os.path.join('results','anova')
GRID_SEARCH_CLF_DIR = os.path.join('results','grid_search','classification')
GRID_SEARCH_REG_DIR = os.path.join('results','grid_search','regression') | 0.163112 | 0.103477 |
import numpy as np
from numpy.lib.stride_tricks import as_strided
import scipy.fftpack as fft
import scipy
import scipy.signal
import six
MAX_MEM_BLOCK = 2**8 * 2**10
def stft(y, n_fft=2048, hop_length=None, win_length=None, window=None,
center=True, dtype=np.complex64):
"""Short-time Fourier transform (STFT)
Returns a complex-valued matrix D such that
`np.abs(D[f, t])` is the magnitude of frequency bin `f`
at frame `t`
`np.angle(D[f, t])` is the phase of frequency bin `f`
at frame `t`
Parameters
----------
y : np.ndarray [shape=(n,)], real-valued
the input signal (audio time series)
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
number audio of frames between STFT columns.
If unspecified, defaults `win_length / 4`.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : None, function, np.ndarray [shape=(n_fft,)]
- None (default): use an asymmetric Hann window
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
center : boolean
- If `True`, the signal `y` is padded so that frame
`D[:, t]` is centered at `y[t * hop_length]`.
- If `False`, then `D[:, t]` begins at `y[t * hop_length]`
dtype : numeric type
Complex numeric type for `D`. Default is 64-bit complex.
Returns
-------
D : np.ndarray [shape=(1 + n_fft/2, t), dtype=dtype]
STFT matrix
Raises
------
ParameterError
If `window` is supplied as a vector of length `n_fft`.
See Also
--------
istft : Inverse STFT
ifgram : Instantaneous frequency spectrogram
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y)
>>> D
array([[ 2.576e-03 -0.000e+00j, 4.327e-02 -0.000e+00j, ...,
3.189e-04 -0.000e+00j, -5.961e-06 -0.000e+00j],
[ 2.441e-03 +2.884e-19j, 5.145e-02 -5.076e-03j, ...,
-3.885e-04 -7.253e-05j, 7.334e-05 +3.868e-04j],
...,
[ -7.120e-06 -1.029e-19j, -1.951e-09 -3.568e-06j, ...,
-4.912e-07 -1.487e-07j, 4.438e-06 -1.448e-05j],
[ 7.136e-06 -0.000e+00j, 3.561e-06 -0.000e+00j, ...,
-5.144e-07 -0.000e+00j, -1.514e-05 -0.000e+00j]], dtype=complex64)
Use left-aligned frames, instead of centered frames
>>> D_left = librosa.stft(y, center=False)
Use a shorter hop length
>>> D_short = librosa.stft(y, hop_length=64)
Display a spectrogram
>>> import matplotlib.pyplot as plt
>>> librosa.display.specshow(librosa.logamplitude(np.abs(D)**2,
... ref_power=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Power spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
"""
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length / 4)
if window is None:
# Default is an asymmetric Hann window
fft_window = scipy.signal.hann(win_length, sym=False)
elif six.callable(window):
# User supplied a window function
fft_window = window(win_length)
else:
# User supplied a window vector.
# Make sure it's an array:
fft_window = np.asarray(window)
# validate length compatibility
if fft_window.size != n_fft:
raise ParameterError('Size mismatch between n_fft and len(window)')
# Pad the window out to n_fft size
fft_window = pad_center(fft_window, n_fft) # fft 변환된 np array 반환
# Reshape so that the window can be broadcast
fft_window = fft_window.reshape((-1, 1))
# Pad the time series so that frames are centered
if center:
valid_audio(y)
y = np.pad(y, int(n_fft // 2), mode='reflect')
# Window the time series.
y_frames = frame(y, frame_length=n_fft, hop_length=hop_length)
# Pre-allocate the STFT matrix
stft_matrix = np.empty((int(1 + n_fft // 2), y_frames.shape[1]),
dtype=dtype,
order='F')
# how many columns can we fit within MAX_MEM_BLOCK?
n_columns = int(MAX_MEM_BLOCK / (stft_matrix.shape[0] *
stft_matrix.itemsize))
for bl_s in range(0, stft_matrix.shape[1], n_columns):
bl_t = min(bl_s + n_columns, stft_matrix.shape[1])
# RFFT and Conjugate here to match phase from DPWE code
stft_matrix[:, bl_s:bl_t] = fft.fft(fft_window *
y_frames[:, bl_s:bl_t],
axis=0)[:stft_matrix.shape[0]].conj()
return stft_matrix
def istft(stft_matrix, hop_length=None, win_length=None, window=None,
center=True, dtype=np.float32):
"""
Inverse short-time Fourier transform.
Converts a complex-valued spectrogram `stft_matrix` to time-series `y`.
Parameters
----------
stft_matrix : np.ndarray [shape=(1 + n_fft/2, t)]
STFT matrix from `stft`
hop_length : int > 0 [scalar]
Number of frames between STFT columns.
If unspecified, defaults to `win_length / 4`.
win_length : int <= n_fft = 2 * (stft_matrix.shape[0] - 1)
When reconstructing the time series, each frame is windowed
according to the `window` function (see below).
If unspecified, defaults to `n_fft`.
window : None, function, np.ndarray [shape=(n_fft,)]
- None (default): use an asymmetric Hann window * 2/3
- a window function, such as `scipy.signal.hanning`
- a user-specified window vector of length `n_fft`
center : boolean
- If `True`, `D` is assumed to have centered frames.
- If `False`, `D` is assumed to have left-aligned frames.
dtype : numeric type
Real numeric type for `y`. Default is 32-bit float.
Returns
-------
y : np.ndarray [shape=(n,)]
time domain signal reconstructed from `stft_matrix`
Raises
------
ParameterError
If `window` is supplied as a vector of length `n_fft`
See Also
--------
stft : Short-time Fourier Transform
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y)
>>> y_hat = librosa.istft(D)
>>> y_hat
array([ -4.812e-06, -4.267e-06, ..., 6.271e-06, 2.827e-07], dtype=float32)
"""
n_fft = 2 * (stft_matrix.shape[0] - 1)
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length / 4)
if window is None:
# Default is an asymmetric Hann window.
# 2/3 scaling is to make stft(istft(.)) identity for 25% hop
ifft_window = scipy.signal.hann(win_length, sym=False) * (2.0 / 3)
elif six.callable(window):
# User supplied a windowing function
ifft_window = window(win_length)
else:
# User supplied a window vector.
# Make it into an array
ifft_window = np.asarray(window)
# Verify that the shape matches
if ifft_window.size != n_fft:
raise ParameterError('Size mismatch between n_fft and window size')
# Pad out to match n_fft
ifft_window = pad_center(ifft_window, n_fft)
n_frames = stft_matrix.shape[1]
y = np.zeros(n_fft + hop_length * (n_frames - 1), dtype=dtype)
for i in range(n_frames):
sample = i * hop_length
spec = stft_matrix[:, i].flatten()
spec = np.concatenate((spec.conj(), spec[-2:0:-1]), 0)
ytmp = ifft_window * fft.ifft(spec).real
y[sample:(sample+n_fft)] = y[sample:(sample+n_fft)] + ytmp
if center:
y = y[int(n_fft // 2):-int(n_fft // 2)]
return y
class LibrosaError(Exception):
'''The root librosa exception class'''
pass
class ParameterError(LibrosaError):
'''Exception class for mal-formed inputs'''
pass
def pad_center(data, size, axis=-1, **kwargs):
'''Wrapper for np.pad to automatically center an array prior to padding.
This is analogous to `str.center()`
Examples
--------
>>> # Generate a vector
>>> data = np.ones(5)
>>> librosa.util.pad_center(data, 10, mode='constant')
array([ 0., 0., 1., 1., 1., 1., 1., 0., 0., 0.])
>>> # Pad a matrix along its first dimension
>>> data = np.ones((3, 5))
>>> librosa.util.pad_center(data, 7, axis=0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> # Or its second dimension
>>> librosa.util.pad_center(data, 7, axis=1)
array([[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.]])
Parameters
----------
data : np.ndarray
Vector to be padded and centered
size : int >= len(data) [scalar]
Length to pad `data`
axis : int
Axis along which to pad and center the data
kwargs : additional keyword arguments
arguments passed to `np.pad()`
Returns
-------
data_padded : np.ndarray
`data` centered and padded to length `size` along the
specified axis
Raises
------
ParameterError
If `size < data.shape[axis]`
See Also
--------
numpy.pad
'''
kwargs.setdefault('mode', 'constant')
n = data.shape[axis]
lpad = int((size - n) // 2)
lengths = [(0, 0)] * data.ndim
lengths[axis] = (lpad, size - n - lpad)
if lpad < 0:
raise ParameterError(('Target size ({:d}) must be '
'at least input size ({:d})').format(size,
n))
return np.pad(data, lengths, **kwargs)
def frame(y, frame_length=2048, hop_length=512):
'''Slice a time series into overlapping frames.
This implementation uses low-level stride manipulation to avoid
redundant copies of the time series data.
Parameters
----------
y : np.ndarray [shape=(n,)]
Time series to frame. Must be one-dimensional and contiguous
in memory.
frame_length : int > 0 [scalar]
Length of the frame in samples
hop_length : int > 0 [scalar]
Number of samples to hop between frames
Returns
-------
y_frames : np.ndarray [shape=(frame_length, N_FRAMES)]
An array of frames sampled from `y`:
`y_frames[i, j] == y[j * hop_length + i]`
Raises
------
ParameterError
If `y` is not contiguous in memory, framing is invalid.
See `np.ascontiguous()` for details.
If `hop_length < 1`, frames cannot advance.
Examples
--------
Extract 2048-sample frames from `y` with a hop of 64 samples per frame
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.util.frame(y, frame_length=2048, hop_length=64)
array([[ -9.216e-06, 7.710e-06, ..., -2.117e-06, -4.362e-07],
[ 2.518e-06, -6.294e-06, ..., -1.775e-05, -6.365e-06],
...,
[ -7.429e-04, 5.173e-03, ..., 1.105e-05, -5.074e-06],
[ 2.169e-03, 4.867e-03, ..., 3.666e-06, -5.571e-06]], dtype=float32)
'''
if hop_length < 1:
raise ParameterError('Invalid hop_length: {:d}'.format(hop_length))
if not y.flags['C_CONTIGUOUS']:
raise ParameterError('Input buffer must be contiguous.')
valid_audio(y)
# Compute the number of frames that will fit. The end may get truncated.
n_frames = 1 + int((len(y) - frame_length) / hop_length)
if n_frames < 1:
raise ParameterError('Buffer is too short (n={:d})'
' for frame_length={:d}'.format(len(y),
frame_length))
# Vertical stride is one sample
# Horizontal stride is `hop_length` samples
y_frames = as_strided(y, shape=(frame_length, n_frames),
strides=(y.itemsize, hop_length * y.itemsize))
return y_frames
def valid_audio(y, mono=False):
'''Validate whether a variable contains valid, mono audio data.
Parameters
----------
y : np.ndarray
The input data to validate
mono : bool
Whether or not to force monophonic audio
Returns
-------
valid : bool
True if all tests pass
Raises
------
ParameterError
If `y` fails to meet the following criteria:
- `type(y)` is `np.ndarray`
- `mono == True` and `y.ndim` is not 1
- `mono == False` and `y.ndim` is not 1 or 2
- `np.isfinite(y).all()` is not True
Examples
--------
>>> # Only allow monophonic signals
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.util.valid_audio(y)
True
>>> # If we want to allow stereo signals
>>> y, sr = librosa.load(librosa.util.example_audio_file(), mono=False)
>>> librosa.util.valid_audio(y, mono=False)
True
'''
if not isinstance(y, np.ndarray):
raise ParameterError('data must be of type numpy.ndarray')
if mono and y.ndim != 1:
raise ParameterError('Invalid shape for monophonic audio: '
'ndim={:d}, shape={}'.format(y.ndim,
y.shape))
elif y.ndim > 2:
raise ParameterError('Invalid shape for audio: '
'ndim={:d}, shape={}'.format(y.ndim,
y.shape))
if not np.isfinite(y).all():
raise ParameterError('Audio buffer is not finite everywhere')
return True | gccNMF/realtime/librosaSTFT.py |
import numpy as np
from numpy.lib.stride_tricks import as_strided
import scipy.fftpack as fft
import scipy
import scipy.signal
import six
MAX_MEM_BLOCK = 2**8 * 2**10
def stft(y, n_fft=2048, hop_length=None, win_length=None, window=None,
center=True, dtype=np.complex64):
"""Short-time Fourier transform (STFT)
Returns a complex-valued matrix D such that
`np.abs(D[f, t])` is the magnitude of frequency bin `f`
at frame `t`
`np.angle(D[f, t])` is the phase of frequency bin `f`
at frame `t`
Parameters
----------
y : np.ndarray [shape=(n,)], real-valued
the input signal (audio time series)
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
number audio of frames between STFT columns.
If unspecified, defaults `win_length / 4`.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : None, function, np.ndarray [shape=(n_fft,)]
- None (default): use an asymmetric Hann window
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
center : boolean
- If `True`, the signal `y` is padded so that frame
`D[:, t]` is centered at `y[t * hop_length]`.
- If `False`, then `D[:, t]` begins at `y[t * hop_length]`
dtype : numeric type
Complex numeric type for `D`. Default is 64-bit complex.
Returns
-------
D : np.ndarray [shape=(1 + n_fft/2, t), dtype=dtype]
STFT matrix
Raises
------
ParameterError
If `window` is supplied as a vector of length `n_fft`.
See Also
--------
istft : Inverse STFT
ifgram : Instantaneous frequency spectrogram
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y)
>>> D
array([[ 2.576e-03 -0.000e+00j, 4.327e-02 -0.000e+00j, ...,
3.189e-04 -0.000e+00j, -5.961e-06 -0.000e+00j],
[ 2.441e-03 +2.884e-19j, 5.145e-02 -5.076e-03j, ...,
-3.885e-04 -7.253e-05j, 7.334e-05 +3.868e-04j],
...,
[ -7.120e-06 -1.029e-19j, -1.951e-09 -3.568e-06j, ...,
-4.912e-07 -1.487e-07j, 4.438e-06 -1.448e-05j],
[ 7.136e-06 -0.000e+00j, 3.561e-06 -0.000e+00j, ...,
-5.144e-07 -0.000e+00j, -1.514e-05 -0.000e+00j]], dtype=complex64)
Use left-aligned frames, instead of centered frames
>>> D_left = librosa.stft(y, center=False)
Use a shorter hop length
>>> D_short = librosa.stft(y, hop_length=64)
Display a spectrogram
>>> import matplotlib.pyplot as plt
>>> librosa.display.specshow(librosa.logamplitude(np.abs(D)**2,
... ref_power=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Power spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
"""
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length / 4)
if window is None:
# Default is an asymmetric Hann window
fft_window = scipy.signal.hann(win_length, sym=False)
elif six.callable(window):
# User supplied a window function
fft_window = window(win_length)
else:
# User supplied a window vector.
# Make sure it's an array:
fft_window = np.asarray(window)
# validate length compatibility
if fft_window.size != n_fft:
raise ParameterError('Size mismatch between n_fft and len(window)')
# Pad the window out to n_fft size
fft_window = pad_center(fft_window, n_fft) # fft 변환된 np array 반환
# Reshape so that the window can be broadcast
fft_window = fft_window.reshape((-1, 1))
# Pad the time series so that frames are centered
if center:
valid_audio(y)
y = np.pad(y, int(n_fft // 2), mode='reflect')
# Window the time series.
y_frames = frame(y, frame_length=n_fft, hop_length=hop_length)
# Pre-allocate the STFT matrix
stft_matrix = np.empty((int(1 + n_fft // 2), y_frames.shape[1]),
dtype=dtype,
order='F')
# how many columns can we fit within MAX_MEM_BLOCK?
n_columns = int(MAX_MEM_BLOCK / (stft_matrix.shape[0] *
stft_matrix.itemsize))
for bl_s in range(0, stft_matrix.shape[1], n_columns):
bl_t = min(bl_s + n_columns, stft_matrix.shape[1])
# RFFT and Conjugate here to match phase from DPWE code
stft_matrix[:, bl_s:bl_t] = fft.fft(fft_window *
y_frames[:, bl_s:bl_t],
axis=0)[:stft_matrix.shape[0]].conj()
return stft_matrix
def istft(stft_matrix, hop_length=None, win_length=None, window=None,
center=True, dtype=np.float32):
"""
Inverse short-time Fourier transform.
Converts a complex-valued spectrogram `stft_matrix` to time-series `y`.
Parameters
----------
stft_matrix : np.ndarray [shape=(1 + n_fft/2, t)]
STFT matrix from `stft`
hop_length : int > 0 [scalar]
Number of frames between STFT columns.
If unspecified, defaults to `win_length / 4`.
win_length : int <= n_fft = 2 * (stft_matrix.shape[0] - 1)
When reconstructing the time series, each frame is windowed
according to the `window` function (see below).
If unspecified, defaults to `n_fft`.
window : None, function, np.ndarray [shape=(n_fft,)]
- None (default): use an asymmetric Hann window * 2/3
- a window function, such as `scipy.signal.hanning`
- a user-specified window vector of length `n_fft`
center : boolean
- If `True`, `D` is assumed to have centered frames.
- If `False`, `D` is assumed to have left-aligned frames.
dtype : numeric type
Real numeric type for `y`. Default is 32-bit float.
Returns
-------
y : np.ndarray [shape=(n,)]
time domain signal reconstructed from `stft_matrix`
Raises
------
ParameterError
If `window` is supplied as a vector of length `n_fft`
See Also
--------
stft : Short-time Fourier Transform
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y)
>>> y_hat = librosa.istft(D)
>>> y_hat
array([ -4.812e-06, -4.267e-06, ..., 6.271e-06, 2.827e-07], dtype=float32)
"""
n_fft = 2 * (stft_matrix.shape[0] - 1)
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length / 4)
if window is None:
# Default is an asymmetric Hann window.
# 2/3 scaling is to make stft(istft(.)) identity for 25% hop
ifft_window = scipy.signal.hann(win_length, sym=False) * (2.0 / 3)
elif six.callable(window):
# User supplied a windowing function
ifft_window = window(win_length)
else:
# User supplied a window vector.
# Make it into an array
ifft_window = np.asarray(window)
# Verify that the shape matches
if ifft_window.size != n_fft:
raise ParameterError('Size mismatch between n_fft and window size')
# Pad out to match n_fft
ifft_window = pad_center(ifft_window, n_fft)
n_frames = stft_matrix.shape[1]
y = np.zeros(n_fft + hop_length * (n_frames - 1), dtype=dtype)
for i in range(n_frames):
sample = i * hop_length
spec = stft_matrix[:, i].flatten()
spec = np.concatenate((spec.conj(), spec[-2:0:-1]), 0)
ytmp = ifft_window * fft.ifft(spec).real
y[sample:(sample+n_fft)] = y[sample:(sample+n_fft)] + ytmp
if center:
y = y[int(n_fft // 2):-int(n_fft // 2)]
return y
class LibrosaError(Exception):
'''The root librosa exception class'''
pass
class ParameterError(LibrosaError):
'''Exception class for mal-formed inputs'''
pass
def pad_center(data, size, axis=-1, **kwargs):
'''Wrapper for np.pad to automatically center an array prior to padding.
This is analogous to `str.center()`
Examples
--------
>>> # Generate a vector
>>> data = np.ones(5)
>>> librosa.util.pad_center(data, 10, mode='constant')
array([ 0., 0., 1., 1., 1., 1., 1., 0., 0., 0.])
>>> # Pad a matrix along its first dimension
>>> data = np.ones((3, 5))
>>> librosa.util.pad_center(data, 7, axis=0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> # Or its second dimension
>>> librosa.util.pad_center(data, 7, axis=1)
array([[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.]])
Parameters
----------
data : np.ndarray
Vector to be padded and centered
size : int >= len(data) [scalar]
Length to pad `data`
axis : int
Axis along which to pad and center the data
kwargs : additional keyword arguments
arguments passed to `np.pad()`
Returns
-------
data_padded : np.ndarray
`data` centered and padded to length `size` along the
specified axis
Raises
------
ParameterError
If `size < data.shape[axis]`
See Also
--------
numpy.pad
'''
kwargs.setdefault('mode', 'constant')
n = data.shape[axis]
lpad = int((size - n) // 2)
lengths = [(0, 0)] * data.ndim
lengths[axis] = (lpad, size - n - lpad)
if lpad < 0:
raise ParameterError(('Target size ({:d}) must be '
'at least input size ({:d})').format(size,
n))
return np.pad(data, lengths, **kwargs)
def frame(y, frame_length=2048, hop_length=512):
'''Slice a time series into overlapping frames.
This implementation uses low-level stride manipulation to avoid
redundant copies of the time series data.
Parameters
----------
y : np.ndarray [shape=(n,)]
Time series to frame. Must be one-dimensional and contiguous
in memory.
frame_length : int > 0 [scalar]
Length of the frame in samples
hop_length : int > 0 [scalar]
Number of samples to hop between frames
Returns
-------
y_frames : np.ndarray [shape=(frame_length, N_FRAMES)]
An array of frames sampled from `y`:
`y_frames[i, j] == y[j * hop_length + i]`
Raises
------
ParameterError
If `y` is not contiguous in memory, framing is invalid.
See `np.ascontiguous()` for details.
If `hop_length < 1`, frames cannot advance.
Examples
--------
Extract 2048-sample frames from `y` with a hop of 64 samples per frame
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.util.frame(y, frame_length=2048, hop_length=64)
array([[ -9.216e-06, 7.710e-06, ..., -2.117e-06, -4.362e-07],
[ 2.518e-06, -6.294e-06, ..., -1.775e-05, -6.365e-06],
...,
[ -7.429e-04, 5.173e-03, ..., 1.105e-05, -5.074e-06],
[ 2.169e-03, 4.867e-03, ..., 3.666e-06, -5.571e-06]], dtype=float32)
'''
if hop_length < 1:
raise ParameterError('Invalid hop_length: {:d}'.format(hop_length))
if not y.flags['C_CONTIGUOUS']:
raise ParameterError('Input buffer must be contiguous.')
valid_audio(y)
# Compute the number of frames that will fit. The end may get truncated.
n_frames = 1 + int((len(y) - frame_length) / hop_length)
if n_frames < 1:
raise ParameterError('Buffer is too short (n={:d})'
' for frame_length={:d}'.format(len(y),
frame_length))
# Vertical stride is one sample
# Horizontal stride is `hop_length` samples
y_frames = as_strided(y, shape=(frame_length, n_frames),
strides=(y.itemsize, hop_length * y.itemsize))
return y_frames
def valid_audio(y, mono=False):
'''Validate whether a variable contains valid, mono audio data.
Parameters
----------
y : np.ndarray
The input data to validate
mono : bool
Whether or not to force monophonic audio
Returns
-------
valid : bool
True if all tests pass
Raises
------
ParameterError
If `y` fails to meet the following criteria:
- `type(y)` is `np.ndarray`
- `mono == True` and `y.ndim` is not 1
- `mono == False` and `y.ndim` is not 1 or 2
- `np.isfinite(y).all()` is not True
Examples
--------
>>> # Only allow monophonic signals
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.util.valid_audio(y)
True
>>> # If we want to allow stereo signals
>>> y, sr = librosa.load(librosa.util.example_audio_file(), mono=False)
>>> librosa.util.valid_audio(y, mono=False)
True
'''
if not isinstance(y, np.ndarray):
raise ParameterError('data must be of type numpy.ndarray')
if mono and y.ndim != 1:
raise ParameterError('Invalid shape for monophonic audio: '
'ndim={:d}, shape={}'.format(y.ndim,
y.shape))
elif y.ndim > 2:
raise ParameterError('Invalid shape for audio: '
'ndim={:d}, shape={}'.format(y.ndim,
y.shape))
if not np.isfinite(y).all():
raise ParameterError('Audio buffer is not finite everywhere')
return True | 0.851583 | 0.664078 |
from sfm.routes.work_items import crud
from sfm.dependencies import get_db
from sfm.models import WorkItemRead, WorkItemCreate, WorkItemUpdate
from typing import List, Optional
from sqlmodel import Session
from fastapi import APIRouter, HTTPException, Depends, Path, Header
from opencensus.ext.azure.log_exporter import AzureLogHandler
import logging
from sfm.config import get_settings
from sfm.logger import create_logger
app_settings = get_settings()
logger = create_logger(__name__)
router = APIRouter()
@router.get("/", response_model=List[WorkItemRead])
def get_work_items(
skip: int = 0,
limit: int = 100,
project_id: Optional[int] = None,
project_name: Optional[str] = None,
db: Session = Depends(get_db),
):
"""
## Get WorkItems
Get a list of all the WorkItems stored in the database.
Query Parmeters:
---
- **skip**: sets the number of items to skip at the beginning of the listing
- **limit**: sets the max number of items to be displayed when called
- **project_id**: specifying **project_id** returns only work items in a given project
- **project_name**: specifying **project_name** returns only work items in a given project
"""
work_items = crud.get_all(
db, skip=skip, limit=limit, project_id=project_id, project_name=project_name
)
if not work_items:
raise HTTPException(
status_code=404, detail="WorkItems not found"
) # pragma: no cover
return work_items
@router.get("/{work_item_id}")
def get_work_item(work_item_id: int, db: Session = Depends(get_db)):
"""
## Get WorkItem by ID
Get a specific WorkItem by specifying the ID in the path.
---
Path Parameters:
-**work_item_id**: id of the work item to be requested
"""
work_item = crud.get_by_id(db, work_item_id)
if not work_item:
logger.debug("WorkItem not found")
raise HTTPException(
status_code=404, detail="WorkItem not found"
) # pragma: no cover
return work_item
@router.post("/")
def create_work_item(
work_item_data: WorkItemCreate,
project_auth_token: str = Header(...),
db: Session = Depends(get_db),
):
"""
## Create WorkItem entry in db
Create a new WorkItem in the database by specifying data in the request.
---
Request Headers:
- **project_auth_token**: authentication key to allow for major changes to occur to project data (specific to the WorkItem's project)
---
Request Body Parameters:
- **category**: event category for the work item. Must be one of the following options:
1. "Deployment"
2. "Issue"
3. "Pull Request"
4. "Production Defect"
- **issue**: sets the issue number that the workItem is associated with
- **start_time**: sets the start time of the WorkItem
- **end_time**: sets the end time of the WorkItem (could be merged date or closed date depending on metric needs for the specified WorkItem category)
- **duration_open**: sets duration of WorkItem being open
- **project_id**: sets project the WorkItem belongs to
"""
# Creates the database row and stores it in the table
new_work_item_success = crud.create_work_item(
db, work_item_data, project_auth_token
)
if new_work_item_success:
return {
"code": "success",
"id": new_work_item_success,
}
else:
logger.error("WorkItem not stored correctly")
return {"code": "error", "message": "Row Not Created"} # pragma: no cover
# Since WorkItem has no name, use database id to delete item
@router.delete("/{work_item_id}")
def delete_work_item(
work_item_id: int,
project_auth_token: str = Header(...),
db: Session = Depends(get_db),
):
"""
## Delete a WorkItem
Pass a WorkItem database id value in the path and the WorkItem will be deleted from the database.
---
Path Parameters:
- **work_item_id**: selects WorkItem being open
---
Request Headers:
- **project_auth_token**: authentication key to allow for major changes to occur to project data (specific to the WorkItem's project)
"""
response = crud.delete_work_item(db, work_item_id, project_auth_token)
if response:
return {
"code": "success",
"message": "WorkItem {} Deleted".format(work_item_id),
}
else: # pragma: no cover
logger.error("WorkItem not deleted")
return {
"code": "error",
"message": "WorkItem not deleted or multiple WorkItems with same work_item_id existed.",
}
@router.patch("/{work_item_id}")
def update_work_item(
work_item_id: int,
work_item_data: WorkItemUpdate,
project_auth_token: str = Header(...),
db: Session = Depends(get_db),
):
"""
## Update WorkItem
Update an existing WorkItem in the database from the data provided in the request.
---
Path Parameters:
- **work_item_id**: selects WorkItem being open
---
Request Headers:
- **project_auth_token**: authentication key to allow for major changes to occur to project data (specific to the WorkItem's project)
---
Request Body Parameters:
- **category**: event category for the work item. Must be one of the following options:
1. "Deployment"
2. "Issue"
3. "Pull Request"
4. "Production Defect"
- **issue**: sets the issue number that the workItem is associated with
- **start_time**: sets the start time of the WorkItem
- **end_time**: sets the end time of the WorkItem (could be merged date or closed date depending on metric needs for the specified WorkItem category)
- **project_id**: sets project the WorkItem belongs to
"""
updated_work_item = crud.update_work_item(
db, work_item_id, work_item_data, project_auth_token
)
if update_work_item:
return {
"code": "success",
"id": updated_work_item.id,
}
else:
logger.error("Updated workitem not stored")
return {"code": "error", "message": "Row not updated"} # pragma: no cover | src/backend/sfm/routes/work_items/routes.py | from sfm.routes.work_items import crud
from sfm.dependencies import get_db
from sfm.models import WorkItemRead, WorkItemCreate, WorkItemUpdate
from typing import List, Optional
from sqlmodel import Session
from fastapi import APIRouter, HTTPException, Depends, Path, Header
from opencensus.ext.azure.log_exporter import AzureLogHandler
import logging
from sfm.config import get_settings
from sfm.logger import create_logger
app_settings = get_settings()
logger = create_logger(__name__)
router = APIRouter()
@router.get("/", response_model=List[WorkItemRead])
def get_work_items(
skip: int = 0,
limit: int = 100,
project_id: Optional[int] = None,
project_name: Optional[str] = None,
db: Session = Depends(get_db),
):
"""
## Get WorkItems
Get a list of all the WorkItems stored in the database.
Query Parmeters:
---
- **skip**: sets the number of items to skip at the beginning of the listing
- **limit**: sets the max number of items to be displayed when called
- **project_id**: specifying **project_id** returns only work items in a given project
- **project_name**: specifying **project_name** returns only work items in a given project
"""
work_items = crud.get_all(
db, skip=skip, limit=limit, project_id=project_id, project_name=project_name
)
if not work_items:
raise HTTPException(
status_code=404, detail="WorkItems not found"
) # pragma: no cover
return work_items
@router.get("/{work_item_id}")
def get_work_item(work_item_id: int, db: Session = Depends(get_db)):
"""
## Get WorkItem by ID
Get a specific WorkItem by specifying the ID in the path.
---
Path Parameters:
-**work_item_id**: id of the work item to be requested
"""
work_item = crud.get_by_id(db, work_item_id)
if not work_item:
logger.debug("WorkItem not found")
raise HTTPException(
status_code=404, detail="WorkItem not found"
) # pragma: no cover
return work_item
@router.post("/")
def create_work_item(
work_item_data: WorkItemCreate,
project_auth_token: str = Header(...),
db: Session = Depends(get_db),
):
"""
## Create WorkItem entry in db
Create a new WorkItem in the database by specifying data in the request.
---
Request Headers:
- **project_auth_token**: authentication key to allow for major changes to occur to project data (specific to the WorkItem's project)
---
Request Body Parameters:
- **category**: event category for the work item. Must be one of the following options:
1. "Deployment"
2. "Issue"
3. "Pull Request"
4. "Production Defect"
- **issue**: sets the issue number that the workItem is associated with
- **start_time**: sets the start time of the WorkItem
- **end_time**: sets the end time of the WorkItem (could be merged date or closed date depending on metric needs for the specified WorkItem category)
- **duration_open**: sets duration of WorkItem being open
- **project_id**: sets project the WorkItem belongs to
"""
# Creates the database row and stores it in the table
new_work_item_success = crud.create_work_item(
db, work_item_data, project_auth_token
)
if new_work_item_success:
return {
"code": "success",
"id": new_work_item_success,
}
else:
logger.error("WorkItem not stored correctly")
return {"code": "error", "message": "Row Not Created"} # pragma: no cover
# Since WorkItem has no name, use database id to delete item
@router.delete("/{work_item_id}")
def delete_work_item(
work_item_id: int,
project_auth_token: str = Header(...),
db: Session = Depends(get_db),
):
"""
## Delete a WorkItem
Pass a WorkItem database id value in the path and the WorkItem will be deleted from the database.
---
Path Parameters:
- **work_item_id**: selects WorkItem being open
---
Request Headers:
- **project_auth_token**: authentication key to allow for major changes to occur to project data (specific to the WorkItem's project)
"""
response = crud.delete_work_item(db, work_item_id, project_auth_token)
if response:
return {
"code": "success",
"message": "WorkItem {} Deleted".format(work_item_id),
}
else: # pragma: no cover
logger.error("WorkItem not deleted")
return {
"code": "error",
"message": "WorkItem not deleted or multiple WorkItems with same work_item_id existed.",
}
@router.patch("/{work_item_id}")
def update_work_item(
work_item_id: int,
work_item_data: WorkItemUpdate,
project_auth_token: str = Header(...),
db: Session = Depends(get_db),
):
"""
## Update WorkItem
Update an existing WorkItem in the database from the data provided in the request.
---
Path Parameters:
- **work_item_id**: selects WorkItem being open
---
Request Headers:
- **project_auth_token**: authentication key to allow for major changes to occur to project data (specific to the WorkItem's project)
---
Request Body Parameters:
- **category**: event category for the work item. Must be one of the following options:
1. "Deployment"
2. "Issue"
3. "Pull Request"
4. "Production Defect"
- **issue**: sets the issue number that the workItem is associated with
- **start_time**: sets the start time of the WorkItem
- **end_time**: sets the end time of the WorkItem (could be merged date or closed date depending on metric needs for the specified WorkItem category)
- **project_id**: sets project the WorkItem belongs to
"""
updated_work_item = crud.update_work_item(
db, work_item_id, work_item_data, project_auth_token
)
if update_work_item:
return {
"code": "success",
"id": updated_work_item.id,
}
else:
logger.error("Updated workitem not stored")
return {"code": "error", "message": "Row not updated"} # pragma: no cover | 0.836154 | 0.165728 |
import torch
import torch.nn as nn
from collections import namedtuple
import functools
Conv = namedtuple('Conv', ['stride', 'depth'])
DepthSepConv = namedtuple('DepthSepConv', ['stride', 'depth'])
InvertedResidual = namedtuple('InvertedResidual', ['stride', 'depth', 'num', 't']) # t is the expension factor
V1_CONV_DEFS = [
Conv(stride=2, depth=32),
DepthSepConv(stride=1, depth=64),
DepthSepConv(stride=2, depth=128),
DepthSepConv(stride=1, depth=128),
DepthSepConv(stride=2, depth=256),
DepthSepConv(stride=1, depth=256),
DepthSepConv(stride=2, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=2, depth=1024),
DepthSepConv(stride=1, depth=1024)
]
V1_CONV_DEFS_S1L6 = [
Conv(stride=2, depth=32),
DepthSepConv(stride=1, depth=64),
DepthSepConv(stride=2, depth=128),
DepthSepConv(stride=1, depth=128),
DepthSepConv(stride=2, depth=256),
DepthSepConv(stride=1, depth=256),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=2, depth=1024),
DepthSepConv(stride=1, depth=1024)
]
V1_CONV_DEFS_S2L0_6 = [
Conv(stride=1, depth=32),
DepthSepConv(stride=1, depth=64),
DepthSepConv(stride=2, depth=128),
DepthSepConv(stride=1, depth=128),
DepthSepConv(stride=2, depth=256),
DepthSepConv(stride=1, depth=256),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=2, depth=1024),
DepthSepConv(stride=1, depth=1024)
]
V1_CONV_DEFS_S1L4 = [
Conv(stride=2, depth=32),
DepthSepConv(stride=1, depth=64),
DepthSepConv(stride=2, depth=128),
DepthSepConv(stride=1, depth=128),
DepthSepConv(stride=1, depth=256),
DepthSepConv(stride=1, depth=256),
DepthSepConv(stride=2, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=2, depth=1024),
DepthSepConv(stride=1, depth=1024)
]
V1_CONV_DEFS_S1L0 = [
Conv(stride=1, depth=32),
DepthSepConv(stride=1, depth=64),
DepthSepConv(stride=2, depth=128),
DepthSepConv(stride=1, depth=128),
DepthSepConv(stride=2, depth=256),
DepthSepConv(stride=1, depth=256),
DepthSepConv(stride=2, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=2, depth=1024),
DepthSepConv(stride=1, depth=1024)
]
V2_CONV_DEFS = [
Conv(stride=2, depth=32),
InvertedResidual(stride=1, depth=16, num=1, t=1),
InvertedResidual(stride=2, depth=24, num=2, t=6),
InvertedResidual(stride=2, depth=32, num=3, t=6),
InvertedResidual(stride=2, depth=64, num=4, t=6),
InvertedResidual(stride=1, depth=96, num=3, t=6),
InvertedResidual(stride=2, depth=160, num=3, t=6),
InvertedResidual(stride=1, depth=320, num=1, t=6),
]
class _conv_bn(nn.Module):
def __init__(self, inp, oup, stride):
super(_conv_bn, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
self.depth = oup
def forward(self, x):
return self.conv(x)
class _conv_dw(nn.Module):
def __init__(self, inp, oup, stride):
super(_conv_dw, self).__init__()
self.conv = nn.Sequential(
# dw
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
# pw
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
self.depth = oup
def forward(self, x):
return self.conv(x)
class _inverted_residual_bottleneck(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(_inverted_residual_bottleneck, self).__init__()
self.use_res_connect = stride == 1 and inp == oup
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(inp * expand_ratio, inp * expand_ratio, 3, stride, 1, groups=inp * expand_ratio, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
self.depth = oup
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
def mobilenet(conv_defs, depth_multiplier=1.0, min_depth=8):
depth = lambda d: max(int(d * depth_multiplier), min_depth)
layers = []
in_channels = 3
for conv_def in conv_defs:
if isinstance(conv_def, Conv):
layers += [_conv_bn(in_channels, depth(conv_def.depth), conv_def.stride)]
in_channels = depth(conv_def.depth)
elif isinstance(conv_def, DepthSepConv):
layers += [_conv_dw(in_channels, depth(conv_def.depth), conv_def.stride)]
in_channels = depth(conv_def.depth)
elif isinstance(conv_def, InvertedResidual):
for n in range(conv_def.num):
stride = conv_def.stride if n == 0 else 1
layers += [_inverted_residual_bottleneck(in_channels, depth(conv_def.depth), stride, conv_def.t)]
in_channels = depth(conv_def.depth)
return layers
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
mobilenet_v1 = wrapped_partial(mobilenet, conv_defs=V1_CONV_DEFS, depth_multiplier=1.0)
mobilenet_v1_075 = wrapped_partial(mobilenet, conv_defs=V1_CONV_DEFS, depth_multiplier=0.75)
mobilenet_v1_050 = wrapped_partial(mobilenet, conv_defs=V1_CONV_DEFS, depth_multiplier=0.50)
mobilenet_v1_025 = wrapped_partial(mobilenet, conv_defs=V1_CONV_DEFS, depth_multiplier=0.25)
mobilenet_v2 = wrapped_partial(mobilenet, conv_defs=V2_CONV_DEFS, depth_multiplier=1.0)
mobilenet_v2_075 = wrapped_partial(mobilenet, conv_defs=V2_CONV_DEFS, depth_multiplier=0.75)
mobilenet_v2_050 = wrapped_partial(mobilenet, conv_defs=V2_CONV_DEFS, depth_multiplier=0.50)
mobilenet_v2_025 = wrapped_partial(mobilenet, conv_defs=V2_CONV_DEFS, depth_multiplier=0.25)
### Added definitions for expanded mobilenet
mobilenet_v1_S1L6 = wrapped_partial(mobilenet, conv_defs=V1_CONV_DEFS_S1L6, depth_multiplier=1.0)
mobilenet_v1_S1L4 = wrapped_partial(mobilenet, conv_defs=V1_CONV_DEFS_S1L4, depth_multiplier=1.0)
mobilenet_v1_S1L0 = wrapped_partial(mobilenet, conv_defs=V1_CONV_DEFS_S1L0, depth_multiplier=1.0)
mobilenet_v1_S2L0_6 = wrapped_partial(mobilenet, conv_defs=V1_CONV_DEFS_S2L0_6, depth_multiplier=1.0) | lib/modeling/nets/mobilenet.py | import torch
import torch.nn as nn
from collections import namedtuple
import functools
Conv = namedtuple('Conv', ['stride', 'depth'])
DepthSepConv = namedtuple('DepthSepConv', ['stride', 'depth'])
InvertedResidual = namedtuple('InvertedResidual', ['stride', 'depth', 'num', 't']) # t is the expension factor
V1_CONV_DEFS = [
Conv(stride=2, depth=32),
DepthSepConv(stride=1, depth=64),
DepthSepConv(stride=2, depth=128),
DepthSepConv(stride=1, depth=128),
DepthSepConv(stride=2, depth=256),
DepthSepConv(stride=1, depth=256),
DepthSepConv(stride=2, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=2, depth=1024),
DepthSepConv(stride=1, depth=1024)
]
V1_CONV_DEFS_S1L6 = [
Conv(stride=2, depth=32),
DepthSepConv(stride=1, depth=64),
DepthSepConv(stride=2, depth=128),
DepthSepConv(stride=1, depth=128),
DepthSepConv(stride=2, depth=256),
DepthSepConv(stride=1, depth=256),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=2, depth=1024),
DepthSepConv(stride=1, depth=1024)
]
V1_CONV_DEFS_S2L0_6 = [
Conv(stride=1, depth=32),
DepthSepConv(stride=1, depth=64),
DepthSepConv(stride=2, depth=128),
DepthSepConv(stride=1, depth=128),
DepthSepConv(stride=2, depth=256),
DepthSepConv(stride=1, depth=256),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=2, depth=1024),
DepthSepConv(stride=1, depth=1024)
]
V1_CONV_DEFS_S1L4 = [
Conv(stride=2, depth=32),
DepthSepConv(stride=1, depth=64),
DepthSepConv(stride=2, depth=128),
DepthSepConv(stride=1, depth=128),
DepthSepConv(stride=1, depth=256),
DepthSepConv(stride=1, depth=256),
DepthSepConv(stride=2, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=2, depth=1024),
DepthSepConv(stride=1, depth=1024)
]
V1_CONV_DEFS_S1L0 = [
Conv(stride=1, depth=32),
DepthSepConv(stride=1, depth=64),
DepthSepConv(stride=2, depth=128),
DepthSepConv(stride=1, depth=128),
DepthSepConv(stride=2, depth=256),
DepthSepConv(stride=1, depth=256),
DepthSepConv(stride=2, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=1, depth=512),
DepthSepConv(stride=2, depth=1024),
DepthSepConv(stride=1, depth=1024)
]
V2_CONV_DEFS = [
Conv(stride=2, depth=32),
InvertedResidual(stride=1, depth=16, num=1, t=1),
InvertedResidual(stride=2, depth=24, num=2, t=6),
InvertedResidual(stride=2, depth=32, num=3, t=6),
InvertedResidual(stride=2, depth=64, num=4, t=6),
InvertedResidual(stride=1, depth=96, num=3, t=6),
InvertedResidual(stride=2, depth=160, num=3, t=6),
InvertedResidual(stride=1, depth=320, num=1, t=6),
]
class _conv_bn(nn.Module):
def __init__(self, inp, oup, stride):
super(_conv_bn, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
self.depth = oup
def forward(self, x):
return self.conv(x)
class _conv_dw(nn.Module):
def __init__(self, inp, oup, stride):
super(_conv_dw, self).__init__()
self.conv = nn.Sequential(
# dw
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
# pw
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
self.depth = oup
def forward(self, x):
return self.conv(x)
class _inverted_residual_bottleneck(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(_inverted_residual_bottleneck, self).__init__()
self.use_res_connect = stride == 1 and inp == oup
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(inp * expand_ratio, inp * expand_ratio, 3, stride, 1, groups=inp * expand_ratio, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
self.depth = oup
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
def mobilenet(conv_defs, depth_multiplier=1.0, min_depth=8):
depth = lambda d: max(int(d * depth_multiplier), min_depth)
layers = []
in_channels = 3
for conv_def in conv_defs:
if isinstance(conv_def, Conv):
layers += [_conv_bn(in_channels, depth(conv_def.depth), conv_def.stride)]
in_channels = depth(conv_def.depth)
elif isinstance(conv_def, DepthSepConv):
layers += [_conv_dw(in_channels, depth(conv_def.depth), conv_def.stride)]
in_channels = depth(conv_def.depth)
elif isinstance(conv_def, InvertedResidual):
for n in range(conv_def.num):
stride = conv_def.stride if n == 0 else 1
layers += [_inverted_residual_bottleneck(in_channels, depth(conv_def.depth), stride, conv_def.t)]
in_channels = depth(conv_def.depth)
return layers
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
mobilenet_v1 = wrapped_partial(mobilenet, conv_defs=V1_CONV_DEFS, depth_multiplier=1.0)
mobilenet_v1_075 = wrapped_partial(mobilenet, conv_defs=V1_CONV_DEFS, depth_multiplier=0.75)
mobilenet_v1_050 = wrapped_partial(mobilenet, conv_defs=V1_CONV_DEFS, depth_multiplier=0.50)
mobilenet_v1_025 = wrapped_partial(mobilenet, conv_defs=V1_CONV_DEFS, depth_multiplier=0.25)
mobilenet_v2 = wrapped_partial(mobilenet, conv_defs=V2_CONV_DEFS, depth_multiplier=1.0)
mobilenet_v2_075 = wrapped_partial(mobilenet, conv_defs=V2_CONV_DEFS, depth_multiplier=0.75)
mobilenet_v2_050 = wrapped_partial(mobilenet, conv_defs=V2_CONV_DEFS, depth_multiplier=0.50)
mobilenet_v2_025 = wrapped_partial(mobilenet, conv_defs=V2_CONV_DEFS, depth_multiplier=0.25)
### Added definitions for expanded mobilenet
mobilenet_v1_S1L6 = wrapped_partial(mobilenet, conv_defs=V1_CONV_DEFS_S1L6, depth_multiplier=1.0)
mobilenet_v1_S1L4 = wrapped_partial(mobilenet, conv_defs=V1_CONV_DEFS_S1L4, depth_multiplier=1.0)
mobilenet_v1_S1L0 = wrapped_partial(mobilenet, conv_defs=V1_CONV_DEFS_S1L0, depth_multiplier=1.0)
mobilenet_v1_S2L0_6 = wrapped_partial(mobilenet, conv_defs=V1_CONV_DEFS_S2L0_6, depth_multiplier=1.0) | 0.919681 | 0.600247 |
from __future__ import absolute_import
import json
import sys
from functools import partial
from io import BytesIO
from unittest import mock
from urllib.parse import urlencode
from falcon import HTTP_METHODS
from falcon.testing import StartResponseMock, create_environ
from hug import output_format
from hug.api import API
def call(method, api_or_module, url, body='', headers=None, **params):
"""Simulates a round-trip call against the given API / URL"""
api = API(api_or_module).http.server()
response = StartResponseMock()
headers = {} if headers is None else headers
if not isinstance(body, str) and 'json' in headers.get('content-type', 'application/json'):
body = output_format.json(body)
headers.setdefault('content-type', 'application/json')
result = api(create_environ(path=url, method=method, headers=headers, query_string=urlencode(params, True),
body=body),
response)
if result:
try:
response.data = result[0].decode('utf8')
except TypeError:
response.data = []
for chunk in result:
response.data.append(chunk.decode('utf8'))
response.data = "".join(response.data)
except UnicodeDecodeError:
response.data = result[0]
response.content_type = response.headers_dict['content-type']
if response.content_type == 'application/json':
response.data = json.loads(response.data)
return response
for method in HTTP_METHODS:
tester = partial(call, method)
tester.__doc__ = """Simulates a round-trip HTTP {0} against the given API / URL""".format(method.upper())
globals()[method.lower()] = tester
def cli(method, *kargs, **arguments):
"""Simulates testing a hug cli method from the command line"""
collect_output = arguments.pop('collect_output', True)
command_args = [method.__name__] + list(kargs)
for name, values in arguments.items():
if not isinstance(values, (tuple, list)):
values = (values, )
for value in values:
command_args.append('--{0}'.format(name))
if not value in (True, False):
command_args.append('{0}'.format(value))
old_sys_argv = sys.argv
sys.argv = [str(part) for part in command_args]
old_output = method.interface.cli.output
if collect_output:
method.interface.cli.outputs = lambda data: to_return.append(data)
to_return = []
try:
method.interface.cli()
except Exception as e:
to_return = (e, )
method.interface.cli.output = old_output
sys.argv = old_sys_argv
return to_return and to_return[0] or None | hug/test.py | from __future__ import absolute_import
import json
import sys
from functools import partial
from io import BytesIO
from unittest import mock
from urllib.parse import urlencode
from falcon import HTTP_METHODS
from falcon.testing import StartResponseMock, create_environ
from hug import output_format
from hug.api import API
def call(method, api_or_module, url, body='', headers=None, **params):
"""Simulates a round-trip call against the given API / URL"""
api = API(api_or_module).http.server()
response = StartResponseMock()
headers = {} if headers is None else headers
if not isinstance(body, str) and 'json' in headers.get('content-type', 'application/json'):
body = output_format.json(body)
headers.setdefault('content-type', 'application/json')
result = api(create_environ(path=url, method=method, headers=headers, query_string=urlencode(params, True),
body=body),
response)
if result:
try:
response.data = result[0].decode('utf8')
except TypeError:
response.data = []
for chunk in result:
response.data.append(chunk.decode('utf8'))
response.data = "".join(response.data)
except UnicodeDecodeError:
response.data = result[0]
response.content_type = response.headers_dict['content-type']
if response.content_type == 'application/json':
response.data = json.loads(response.data)
return response
for method in HTTP_METHODS:
tester = partial(call, method)
tester.__doc__ = """Simulates a round-trip HTTP {0} against the given API / URL""".format(method.upper())
globals()[method.lower()] = tester
def cli(method, *kargs, **arguments):
"""Simulates testing a hug cli method from the command line"""
collect_output = arguments.pop('collect_output', True)
command_args = [method.__name__] + list(kargs)
for name, values in arguments.items():
if not isinstance(values, (tuple, list)):
values = (values, )
for value in values:
command_args.append('--{0}'.format(name))
if not value in (True, False):
command_args.append('{0}'.format(value))
old_sys_argv = sys.argv
sys.argv = [str(part) for part in command_args]
old_output = method.interface.cli.output
if collect_output:
method.interface.cli.outputs = lambda data: to_return.append(data)
to_return = []
try:
method.interface.cli()
except Exception as e:
to_return = (e, )
method.interface.cli.output = old_output
sys.argv = old_sys_argv
return to_return and to_return[0] or None | 0.386879 | 0.118819 |
from __future__ import print_function
import numpy as np
import unittest
import time
import argparse
import os
import sys
import subprocess
import traceback
import functools
import pickle
from contextlib import closing
import paddle.fluid as fluid
import paddle.fluid.unique_name as nameGen
from paddle.fluid import core
def DataTypeCast(date_type):
np_data_type = None
if date_type == "float16":
np_data_type = np.float16
elif date_type == "float32":
np_data_type = np.float32
elif date_type == "float64":
np_data_type = np.float64
elif date_type == "int8":
np_data_type = np.int8
elif date_type == "int16":
np_data_type = np.int16
elif date_type == "int32":
np_data_type = np.int32
elif date_type == "uint8":
np_data_type = np.uint8
else:
raise ValueError("This data type is not support!")
return np_data_type
class TestCollectiveRunnerBase(object):
def get_model(self, train_prog, startup_prog):
raise NotImplementedError(
"get model should be implemented by child class.")
def wait_server_ready(self, endpoints):
while True:
all_ok = True
not_ready_endpoints = []
for ep in endpoints:
ip_port = ep.split(":")
with closing(
socket.socket(socket.AF_INET,
socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, 'SO_REUSEPORT'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT,
1)
result = sock.connect_ex((ip_port[0], int(ip_port[1])))
if result != 0:
all_ok = False
not_ready_endpoints.append(ep)
if not all_ok:
sys.stderr.write("server not ready, wait 3 sec to retry...\n")
sys.stderr.write("not ready endpoints:" + str(
not_ready_endpoints) + "\n")
sys.stderr.flush()
time.sleep(3)
else:
break
#endpoints should be ["ip1:port1","ip2:port2"]
def initCommunicator(self, program, rank, nranks, wait_port,
current_endpoint, endpoints):
other_endpoints = endpoints[:]
other_endpoints.remove(current_endpoint)
if rank == 0 and wait_port:
self.wait_server_ready(other_endpoints)
block = program.global_block()
cncl_id_var = block.create_var(
name=nameGen.generate('cncl_id'),
persistable=True,
type=core.VarDesc.VarType.RAW)
block.append_op(
type='c_gen_cncl_id',
inputs={},
outputs={'Out': cncl_id_var},
attrs={
'rank': rank,
'endpoint': current_endpoint,
'other_endpoints': other_endpoints
})
block.append_op(
type='c_comm_init',
inputs={'X': cncl_id_var},
outputs={},
attrs={
'nranks': nranks,
'rank': rank,
'ring_id': self.global_ring_id
})
def run_trainer(self, args):
train_prog = fluid.Program()
startup_prog = fluid.Program()
endpoints = args["endpoints"].split(",")
rank = args["trainerid"]
current_endpoint = args["currentendpoint"]
nranks = 2
self.initCommunicator(startup_prog, rank, nranks, True,
current_endpoint, endpoints)
self.rank = rank
result = self.get_model(train_prog, startup_prog)
device_id = int(os.getenv("FLAGS_selected_mlus", "0"))
place = fluid.MLUPlace(device_id)
exe = fluid.Executor(place)
exe.run(startup_prog)
np.random.seed(os.getpid())
np_data_type = DataTypeCast(args["data_type"])
indata = np.random.random((10, 1000)).astype(np_data_type)
out = exe.run(train_prog,
feed={'tindata': indata},
fetch_list=[result.name])
sys.stdout.buffer.write(pickle.dumps(out))
def runtime_main(test_class, col_type, sub_type):
args = {}
model = test_class()
args["deviceid"] = os.getenv("FLAGS_selected_mlus")
args["trainerid"] = int(os.getenv("PADDLE_TRAINER_ID"))
args["trainernum"] = int(os.getenv("PADDLE_TRAINERS_NUM"))
args["endpoints"] = os.getenv('PADDLE_TRAINER_ENDPOINTS')
args["currentendpoint"] = os.getenv("PADDLE_CURRENT_ENDPOINT")
args["col_type"] = col_type
args["data_type"] = os.getenv("DATA_TYPE")
model.run_trainer(args)
import paddle.compat as cpt
import socket
from contextlib import closing
class TestDistBase(unittest.TestCase):
def setUp(self):
self._port_set = set()
self._trainers = 2
self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
self._find_free_port(), self._find_free_port())
self._python_interp = sys.executable
def _find_free_port(self):
def __free_port():
with closing(socket.socket(socket.AF_INET,
socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
while True:
port = __free_port()
if port not in self._port_set:
self._port_set.add(port)
return port
def _run_cluster(self, model_file, envs):
worker_endpoints = self._ps_endpoints.split(",")
w0_ep, w1_ep = worker_endpoints
#print("w0_ep:",w0_ep," w1_ep:",w1_ep)
env0 = {
"FLAGS_selected_mlus": "0",
"PADDLE_TRAINER_ID": "0",
"PADDLE_TRAINERS_NUM": "2",
"PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
"PADDLE_CURRENT_ENDPOINT": w0_ep
}
env1 = {
"FLAGS_selected_mlus": "1",
"PADDLE_TRAINER_ID": "1",
"PADDLE_TRAINERS_NUM": "2",
"PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
"PADDLE_CURRENT_ENDPOINT": w1_ep
}
#update environment
env0.update(envs)
env1.update(envs)
tr_cmd = "%s %s"
tr0_cmd = tr_cmd % (self._python_interp, model_file)
tr1_cmd = tr_cmd % (self._python_interp, model_file)
tr0_pipe = open("/tmp/tr0_err.log", "wb")
tr1_pipe = open("/tmp/tr1_err.log", "wb")
#print(tr0_cmd)
tr0_proc = subprocess.Popen(
tr0_cmd.strip().split(),
stdout=subprocess.PIPE,
stderr=tr0_pipe,
env=env0)
tr1_proc = subprocess.Popen(
tr0_cmd.strip().split(),
stdout=subprocess.PIPE,
stderr=tr1_pipe,
env=env1)
tr0_out, tr0_err = tr0_proc.communicate()
tr1_out, tr1_err = tr1_proc.communicate()
sys.stderr.write('trainer 0 stderr: %s\n' % tr0_err)
sys.stderr.write('trainer 1 stderr: %s\n' % tr1_err)
# close trainer file
tr0_pipe.close()
tr1_pipe.close()
return pickle.loads(tr0_out), pickle.loads(
tr1_out), tr0_proc.pid, tr1_proc.pid
def check_with_place(self,
model_file,
col_type,
data_type,
check_error_log=False,
need_envs={}):
required_envs = {
"FLAGS_eager_delete_tensor_gb": "0.0",
"PATH": os.getenv("PATH"),
"PYTHONPATH": os.getenv("PYTHONPATH", ""),
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
"LD_PRELOAD": os.getenv("LD_PRELOAD", ""),
"GLOG_v": "3",
"DATA_TYPE": data_type,
}
required_envs.update(need_envs)
if check_error_log:
required_envs["GLOG_v"] = "3"
required_envs["GLOG_logtostderr"] = "1"
tr0_out, tr1_out, pid0, pid1 = self._run_cluster(model_file,
required_envs)
np_data_type = DataTypeCast(data_type)
np.random.seed(pid0)
input1 = np.random.random((10, 1000)).astype(np_data_type)
np.random.seed(pid1)
input2 = np.random.random((10, 1000)).astype(np_data_type)
if col_type == "broadcast":
need_result = input2
self.assertTrue(np.allclose(tr0_out, need_result))
self.assertTrue(np.allclose(tr1_out, need_result))
elif col_type == "allreduce":
need_result = input1 + input2
self.assertTrue(
np.allclose(
tr0_out, need_result, rtol=1e-05, atol=1e-05))
self.assertTrue(
np.allclose(
tr1_out, need_result, rtol=1e-05, atol=1e-05))
elif col_type == "allgather":
need_result = np.vstack((input1, input2))
self.assertTrue(np.allclose(tr0_out, need_result))
self.assertTrue(np.allclose(tr1_out, need_result))
else:
pass | python/paddle/fluid/tests/unittests/mlu/test_collective_base_mlu.py |
from __future__ import print_function
import numpy as np
import unittest
import time
import argparse
import os
import sys
import subprocess
import traceback
import functools
import pickle
from contextlib import closing
import paddle.fluid as fluid
import paddle.fluid.unique_name as nameGen
from paddle.fluid import core
def DataTypeCast(date_type):
np_data_type = None
if date_type == "float16":
np_data_type = np.float16
elif date_type == "float32":
np_data_type = np.float32
elif date_type == "float64":
np_data_type = np.float64
elif date_type == "int8":
np_data_type = np.int8
elif date_type == "int16":
np_data_type = np.int16
elif date_type == "int32":
np_data_type = np.int32
elif date_type == "uint8":
np_data_type = np.uint8
else:
raise ValueError("This data type is not support!")
return np_data_type
class TestCollectiveRunnerBase(object):
def get_model(self, train_prog, startup_prog):
raise NotImplementedError(
"get model should be implemented by child class.")
def wait_server_ready(self, endpoints):
while True:
all_ok = True
not_ready_endpoints = []
for ep in endpoints:
ip_port = ep.split(":")
with closing(
socket.socket(socket.AF_INET,
socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, 'SO_REUSEPORT'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT,
1)
result = sock.connect_ex((ip_port[0], int(ip_port[1])))
if result != 0:
all_ok = False
not_ready_endpoints.append(ep)
if not all_ok:
sys.stderr.write("server not ready, wait 3 sec to retry...\n")
sys.stderr.write("not ready endpoints:" + str(
not_ready_endpoints) + "\n")
sys.stderr.flush()
time.sleep(3)
else:
break
#endpoints should be ["ip1:port1","ip2:port2"]
def initCommunicator(self, program, rank, nranks, wait_port,
current_endpoint, endpoints):
other_endpoints = endpoints[:]
other_endpoints.remove(current_endpoint)
if rank == 0 and wait_port:
self.wait_server_ready(other_endpoints)
block = program.global_block()
cncl_id_var = block.create_var(
name=nameGen.generate('cncl_id'),
persistable=True,
type=core.VarDesc.VarType.RAW)
block.append_op(
type='c_gen_cncl_id',
inputs={},
outputs={'Out': cncl_id_var},
attrs={
'rank': rank,
'endpoint': current_endpoint,
'other_endpoints': other_endpoints
})
block.append_op(
type='c_comm_init',
inputs={'X': cncl_id_var},
outputs={},
attrs={
'nranks': nranks,
'rank': rank,
'ring_id': self.global_ring_id
})
def run_trainer(self, args):
train_prog = fluid.Program()
startup_prog = fluid.Program()
endpoints = args["endpoints"].split(",")
rank = args["trainerid"]
current_endpoint = args["currentendpoint"]
nranks = 2
self.initCommunicator(startup_prog, rank, nranks, True,
current_endpoint, endpoints)
self.rank = rank
result = self.get_model(train_prog, startup_prog)
device_id = int(os.getenv("FLAGS_selected_mlus", "0"))
place = fluid.MLUPlace(device_id)
exe = fluid.Executor(place)
exe.run(startup_prog)
np.random.seed(os.getpid())
np_data_type = DataTypeCast(args["data_type"])
indata = np.random.random((10, 1000)).astype(np_data_type)
out = exe.run(train_prog,
feed={'tindata': indata},
fetch_list=[result.name])
sys.stdout.buffer.write(pickle.dumps(out))
def runtime_main(test_class, col_type, sub_type):
args = {}
model = test_class()
args["deviceid"] = os.getenv("FLAGS_selected_mlus")
args["trainerid"] = int(os.getenv("PADDLE_TRAINER_ID"))
args["trainernum"] = int(os.getenv("PADDLE_TRAINERS_NUM"))
args["endpoints"] = os.getenv('PADDLE_TRAINER_ENDPOINTS')
args["currentendpoint"] = os.getenv("PADDLE_CURRENT_ENDPOINT")
args["col_type"] = col_type
args["data_type"] = os.getenv("DATA_TYPE")
model.run_trainer(args)
import paddle.compat as cpt
import socket
from contextlib import closing
class TestDistBase(unittest.TestCase):
def setUp(self):
self._port_set = set()
self._trainers = 2
self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
self._find_free_port(), self._find_free_port())
self._python_interp = sys.executable
def _find_free_port(self):
def __free_port():
with closing(socket.socket(socket.AF_INET,
socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
while True:
port = __free_port()
if port not in self._port_set:
self._port_set.add(port)
return port
def _run_cluster(self, model_file, envs):
worker_endpoints = self._ps_endpoints.split(",")
w0_ep, w1_ep = worker_endpoints
#print("w0_ep:",w0_ep," w1_ep:",w1_ep)
env0 = {
"FLAGS_selected_mlus": "0",
"PADDLE_TRAINER_ID": "0",
"PADDLE_TRAINERS_NUM": "2",
"PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
"PADDLE_CURRENT_ENDPOINT": w0_ep
}
env1 = {
"FLAGS_selected_mlus": "1",
"PADDLE_TRAINER_ID": "1",
"PADDLE_TRAINERS_NUM": "2",
"PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
"PADDLE_CURRENT_ENDPOINT": w1_ep
}
#update environment
env0.update(envs)
env1.update(envs)
tr_cmd = "%s %s"
tr0_cmd = tr_cmd % (self._python_interp, model_file)
tr1_cmd = tr_cmd % (self._python_interp, model_file)
tr0_pipe = open("/tmp/tr0_err.log", "wb")
tr1_pipe = open("/tmp/tr1_err.log", "wb")
#print(tr0_cmd)
tr0_proc = subprocess.Popen(
tr0_cmd.strip().split(),
stdout=subprocess.PIPE,
stderr=tr0_pipe,
env=env0)
tr1_proc = subprocess.Popen(
tr0_cmd.strip().split(),
stdout=subprocess.PIPE,
stderr=tr1_pipe,
env=env1)
tr0_out, tr0_err = tr0_proc.communicate()
tr1_out, tr1_err = tr1_proc.communicate()
sys.stderr.write('trainer 0 stderr: %s\n' % tr0_err)
sys.stderr.write('trainer 1 stderr: %s\n' % tr1_err)
# close trainer file
tr0_pipe.close()
tr1_pipe.close()
return pickle.loads(tr0_out), pickle.loads(
tr1_out), tr0_proc.pid, tr1_proc.pid
def check_with_place(self,
model_file,
col_type,
data_type,
check_error_log=False,
need_envs={}):
required_envs = {
"FLAGS_eager_delete_tensor_gb": "0.0",
"PATH": os.getenv("PATH"),
"PYTHONPATH": os.getenv("PYTHONPATH", ""),
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
"LD_PRELOAD": os.getenv("LD_PRELOAD", ""),
"GLOG_v": "3",
"DATA_TYPE": data_type,
}
required_envs.update(need_envs)
if check_error_log:
required_envs["GLOG_v"] = "3"
required_envs["GLOG_logtostderr"] = "1"
tr0_out, tr1_out, pid0, pid1 = self._run_cluster(model_file,
required_envs)
np_data_type = DataTypeCast(data_type)
np.random.seed(pid0)
input1 = np.random.random((10, 1000)).astype(np_data_type)
np.random.seed(pid1)
input2 = np.random.random((10, 1000)).astype(np_data_type)
if col_type == "broadcast":
need_result = input2
self.assertTrue(np.allclose(tr0_out, need_result))
self.assertTrue(np.allclose(tr1_out, need_result))
elif col_type == "allreduce":
need_result = input1 + input2
self.assertTrue(
np.allclose(
tr0_out, need_result, rtol=1e-05, atol=1e-05))
self.assertTrue(
np.allclose(
tr1_out, need_result, rtol=1e-05, atol=1e-05))
elif col_type == "allgather":
need_result = np.vstack((input1, input2))
self.assertTrue(np.allclose(tr0_out, need_result))
self.assertTrue(np.allclose(tr1_out, need_result))
else:
pass | 0.272993 | 0.113187 |
import os
import sqlite3
from datetime import datetime
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import pytz
from feast import Entity, FeatureTable
from feast.feature_view import FeatureView
from feast.infra.key_encoding_utils import serialize_entity_key
from feast.infra.online_stores.online_store import OnlineStore
from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
from feast.protos.feast.types.Value_pb2 import Value as ValueProto
from feast.repo_config import RepoConfig
class SqliteOnlineStore(OnlineStore):
"""
OnlineStore is an object used for all interaction between Feast and the service used for offline storage of
features.
"""
_conn: Optional[sqlite3.Connection] = None
@staticmethod
def _get_db_path(config: RepoConfig) -> str:
assert config.online_store.type == "sqlite"
if config.repo_path and not Path(config.online_store.path).is_absolute():
db_path = str(config.repo_path / config.online_store.path)
else:
db_path = config.online_store.path
return db_path
def _get_conn(self, config: RepoConfig):
if not self._conn:
db_path = self._get_db_path(config)
Path(db_path).parent.mkdir(exist_ok=True)
self._conn = sqlite3.connect(
db_path, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
)
return self._conn
def online_write_batch(
self,
config: RepoConfig,
table: Union[FeatureTable, FeatureView],
data: List[
Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]
],
progress: Optional[Callable[[int], Any]],
) -> None:
conn = self._get_conn(config)
project = config.project
with conn:
for entity_key, values, timestamp, created_ts in data:
entity_key_bin = serialize_entity_key(entity_key)
timestamp = _to_naive_utc(timestamp)
if created_ts is not None:
created_ts = _to_naive_utc(created_ts)
for feature_name, val in values.items():
conn.execute(
f"""
UPDATE {_table_id(project, table)}
SET value = ?, event_ts = ?, created_ts = ?
WHERE (entity_key = ? AND feature_name = ?)
""",
(
# SET
val.SerializeToString(),
timestamp,
created_ts,
# WHERE
entity_key_bin,
feature_name,
),
)
conn.execute(
f"""INSERT OR IGNORE INTO {_table_id(project, table)}
(entity_key, feature_name, value, event_ts, created_ts)
VALUES (?, ?, ?, ?, ?)""",
(
entity_key_bin,
feature_name,
val.SerializeToString(),
timestamp,
created_ts,
),
)
if progress:
progress(1)
def online_read(
self,
config: RepoConfig,
table: Union[FeatureTable, FeatureView],
entity_keys: List[EntityKeyProto],
requested_features: Optional[List[str]] = None,
) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:
pass
conn = self._get_conn(config)
cur = conn.cursor()
result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []
project = config.project
for entity_key in entity_keys:
entity_key_bin = serialize_entity_key(entity_key)
cur.execute(
f"SELECT feature_name, value, event_ts FROM {_table_id(project, table)} WHERE entity_key = ?",
(entity_key_bin,),
)
res = {}
res_ts = None
for feature_name, val_bin, ts in cur.fetchall():
val = ValueProto()
val.ParseFromString(val_bin)
res[feature_name] = val
res_ts = ts
if not res:
result.append((None, None))
else:
result.append((res_ts, res))
return result
def update(
self,
config: RepoConfig,
tables_to_delete: Sequence[Union[FeatureTable, FeatureView]],
tables_to_keep: Sequence[Union[FeatureTable, FeatureView]],
entities_to_delete: Sequence[Entity],
entities_to_keep: Sequence[Entity],
partial: bool,
):
conn = self._get_conn(config)
project = config.project
for table in tables_to_keep:
conn.execute(
f"CREATE TABLE IF NOT EXISTS {_table_id(project, table)} (entity_key BLOB, feature_name TEXT, value BLOB, event_ts timestamp, created_ts timestamp, PRIMARY KEY(entity_key, feature_name))"
)
conn.execute(
f"CREATE INDEX IF NOT EXISTS {_table_id(project, table)}_ek ON {_table_id(project, table)} (entity_key);"
)
for table in tables_to_delete:
conn.execute(f"DROP TABLE IF EXISTS {_table_id(project, table)}")
def teardown(
self,
config: RepoConfig,
tables: Sequence[Union[FeatureTable, FeatureView]],
entities: Sequence[Entity],
):
os.unlink(self._get_db_path(config))
def _table_id(project: str, table: Union[FeatureTable, FeatureView]) -> str:
return f"{project}_{table.name}"
def _to_naive_utc(ts: datetime):
if ts.tzinfo is None:
return ts
else:
return ts.astimezone(pytz.utc).replace(tzinfo=None) | sdk/python/feast/infra/online_stores/sqlite.py |
import os
import sqlite3
from datetime import datetime
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import pytz
from feast import Entity, FeatureTable
from feast.feature_view import FeatureView
from feast.infra.key_encoding_utils import serialize_entity_key
from feast.infra.online_stores.online_store import OnlineStore
from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
from feast.protos.feast.types.Value_pb2 import Value as ValueProto
from feast.repo_config import RepoConfig
class SqliteOnlineStore(OnlineStore):
"""
OnlineStore is an object used for all interaction between Feast and the service used for offline storage of
features.
"""
_conn: Optional[sqlite3.Connection] = None
@staticmethod
def _get_db_path(config: RepoConfig) -> str:
assert config.online_store.type == "sqlite"
if config.repo_path and not Path(config.online_store.path).is_absolute():
db_path = str(config.repo_path / config.online_store.path)
else:
db_path = config.online_store.path
return db_path
def _get_conn(self, config: RepoConfig):
if not self._conn:
db_path = self._get_db_path(config)
Path(db_path).parent.mkdir(exist_ok=True)
self._conn = sqlite3.connect(
db_path, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
)
return self._conn
def online_write_batch(
self,
config: RepoConfig,
table: Union[FeatureTable, FeatureView],
data: List[
Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]
],
progress: Optional[Callable[[int], Any]],
) -> None:
conn = self._get_conn(config)
project = config.project
with conn:
for entity_key, values, timestamp, created_ts in data:
entity_key_bin = serialize_entity_key(entity_key)
timestamp = _to_naive_utc(timestamp)
if created_ts is not None:
created_ts = _to_naive_utc(created_ts)
for feature_name, val in values.items():
conn.execute(
f"""
UPDATE {_table_id(project, table)}
SET value = ?, event_ts = ?, created_ts = ?
WHERE (entity_key = ? AND feature_name = ?)
""",
(
# SET
val.SerializeToString(),
timestamp,
created_ts,
# WHERE
entity_key_bin,
feature_name,
),
)
conn.execute(
f"""INSERT OR IGNORE INTO {_table_id(project, table)}
(entity_key, feature_name, value, event_ts, created_ts)
VALUES (?, ?, ?, ?, ?)""",
(
entity_key_bin,
feature_name,
val.SerializeToString(),
timestamp,
created_ts,
),
)
if progress:
progress(1)
def online_read(
self,
config: RepoConfig,
table: Union[FeatureTable, FeatureView],
entity_keys: List[EntityKeyProto],
requested_features: Optional[List[str]] = None,
) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:
pass
conn = self._get_conn(config)
cur = conn.cursor()
result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []
project = config.project
for entity_key in entity_keys:
entity_key_bin = serialize_entity_key(entity_key)
cur.execute(
f"SELECT feature_name, value, event_ts FROM {_table_id(project, table)} WHERE entity_key = ?",
(entity_key_bin,),
)
res = {}
res_ts = None
for feature_name, val_bin, ts in cur.fetchall():
val = ValueProto()
val.ParseFromString(val_bin)
res[feature_name] = val
res_ts = ts
if not res:
result.append((None, None))
else:
result.append((res_ts, res))
return result
def update(
self,
config: RepoConfig,
tables_to_delete: Sequence[Union[FeatureTable, FeatureView]],
tables_to_keep: Sequence[Union[FeatureTable, FeatureView]],
entities_to_delete: Sequence[Entity],
entities_to_keep: Sequence[Entity],
partial: bool,
):
conn = self._get_conn(config)
project = config.project
for table in tables_to_keep:
conn.execute(
f"CREATE TABLE IF NOT EXISTS {_table_id(project, table)} (entity_key BLOB, feature_name TEXT, value BLOB, event_ts timestamp, created_ts timestamp, PRIMARY KEY(entity_key, feature_name))"
)
conn.execute(
f"CREATE INDEX IF NOT EXISTS {_table_id(project, table)}_ek ON {_table_id(project, table)} (entity_key);"
)
for table in tables_to_delete:
conn.execute(f"DROP TABLE IF EXISTS {_table_id(project, table)}")
def teardown(
self,
config: RepoConfig,
tables: Sequence[Union[FeatureTable, FeatureView]],
entities: Sequence[Entity],
):
os.unlink(self._get_db_path(config))
def _table_id(project: str, table: Union[FeatureTable, FeatureView]) -> str:
return f"{project}_{table.name}"
def _to_naive_utc(ts: datetime):
if ts.tzinfo is None:
return ts
else:
return ts.astimezone(pytz.utc).replace(tzinfo=None) | 0.795857 | 0.271522 |
import aifc
import random
def monoChannelWrapper(gen):
for s in gen:
yield [s]
def saveM(gen,t,name = "save.aiff"):
saveSample(monoChannelWrapper(extent(gen,t)),name,1,1)
def saveSample(gen,name = "out.aiff",channels=2,prec=3,rate=48000,dither=1):
f = aifc.open(name,'w')
f.setnchannels(channels)
f.setsampwidth(prec)
f.setframerate(rate)
for s in gen:
resv = (int(s[i]*(1<<(prec*8 - 2))+random.random()*dither) for i in range(channels))
f.writeframes(bytes(((v>>(8*i))%256 for i in range(prec) for v in resv)))
#print(resv,end='\r')
f.close()
#some sample generation things
def forceGen(x=0,y=0,vx=-1,vy=1,forceFunc = lambda x,y,vx,vy: (-x,-y), damping = 0.1,dt= 1/96000):
while 1:
x += vx*dt
y += vy*dt
tmp = forceFunc(x,y,vx,vy)
vx += tmp[0]*dt
vy += tmp[1]*dt
vx *= 1-damping
vy *= 1-damping
yield [x,y]
def makeMono(gen,ind = 0):
for i in gen:
yield i[ind]
def makeGen(n):
if type(n) != type((1 for i in ())):
while 1:
yield n
else:
for i in n:
yield i
def shift(g,d):
d = makeGen(d)
for i in g:
yield i+next(d)
def step(g,d):
for i in g:
d -= 1;
if d <= 0:
break
for i in g:
yield i
def fof(g,l = lambda x: x):
for i in g:
yield l(i)
def prod(g1,g2):
g1 = makeGen(g1)
g2 = makeGen(g2)
for i in g1:
yield i*next(g2)
def sum(g1,g2):
g1 = makeGen(g1)
g2 = makeGen(g2)
for i in g1:
yield i+next(g2)
def integrate(g,k=1,d = 0):
v =0
d = makeGen(d)
k = makeGen(k)
for i in g:
v += next(k)*i - next(d)*v
yield v
def derivitive(g):
prev = 0
for i in g:
yield i-prev
prev = i
def coupledOscilators(xs,ks,k0s,damp=0,dt = 1/96000):
vs = [0 for i in xs]
while 1:
for i in range(len(xs)):
xs[i] += vs[i]*dt
for i in range(len(xs)):
vs[i] = (vs[i]+((xs[i-1]-xs[i])*ks[i]
-(xs[i]-xs[(i+1)%len(xs)])*ks[(i+1)%len(xs)]
-xs[i]*k0s[i])*dt)*(1-damp)
yield xs
def magnetField(mx,my,mz,mf,g=1):#immitates the magnet pendulum physics
#https://www.math.hmc.edu/~dyong/math164/2006/win/finalreport.pdf
def force(x,y,vx,vy):
fx = -x*g
fy = -y*g
for i in range(len(mx)):
r = ((mx[i]-x)**2+(my[i]-y)**2+mz[i]**2)**(3/2)
fx += mf[i]*(mx[i]-x)/r
fy += mf[i]*(my[i]-y)/r
return (fx,fy)
return force
def movingMagnetField(mx,my,mz,mf,mov = lambda t,mx,my,mz,mf:(mx,my,mz,mf),dt=1/48000,g=1):#immitates the magnet pendulum physics
#https://www.math.hmc.edu/~dyong/math164/2006/win/finalreport.pdf
def force(x,y,vx,vy,t=[0],mx=[mx],my=[my],mz=[mz],mf=[mf]):
fx = -x*g
fy = -y*g
for i in range(len(mx)):
r = ((mx[0][i]-x)**2+(my[0][i]-y)**2+mz[0][i]**2)**(3/2)
fx += mf[0][i]*(mx[0][i]-x)/r
fy += mf[0][i]*(my[0][i]-y)/r
mx[0],my[0],mz[0],mf[0] = mov(t[0],mx[0],my[0],mz[0],mf[0])
t[0] += dt
return (fx,fy)
return force
def ext(g,n):
for i in g:
yield i
n -= 1
if n<0:
return
def td(l,dt=1/48000):
def lt(x,y,vx,vy,t=[0]):
t[0] += dt
return l(x,y,vx,vy,t[0])
return lt
def lin(s,d=48000):
s /= d
x = 0
while 1:
yield x
x += s
def foldMod(v,n):
return abs((v-n)%(4*n)-2*n)-n
def fm1(v):
return abs((v-1)%4-2)-1 | sampleGen.py | import aifc
import random
def monoChannelWrapper(gen):
for s in gen:
yield [s]
def saveM(gen,t,name = "save.aiff"):
saveSample(monoChannelWrapper(extent(gen,t)),name,1,1)
def saveSample(gen,name = "out.aiff",channels=2,prec=3,rate=48000,dither=1):
f = aifc.open(name,'w')
f.setnchannels(channels)
f.setsampwidth(prec)
f.setframerate(rate)
for s in gen:
resv = (int(s[i]*(1<<(prec*8 - 2))+random.random()*dither) for i in range(channels))
f.writeframes(bytes(((v>>(8*i))%256 for i in range(prec) for v in resv)))
#print(resv,end='\r')
f.close()
#some sample generation things
def forceGen(x=0,y=0,vx=-1,vy=1,forceFunc = lambda x,y,vx,vy: (-x,-y), damping = 0.1,dt= 1/96000):
while 1:
x += vx*dt
y += vy*dt
tmp = forceFunc(x,y,vx,vy)
vx += tmp[0]*dt
vy += tmp[1]*dt
vx *= 1-damping
vy *= 1-damping
yield [x,y]
def makeMono(gen,ind = 0):
for i in gen:
yield i[ind]
def makeGen(n):
if type(n) != type((1 for i in ())):
while 1:
yield n
else:
for i in n:
yield i
def shift(g,d):
d = makeGen(d)
for i in g:
yield i+next(d)
def step(g,d):
for i in g:
d -= 1;
if d <= 0:
break
for i in g:
yield i
def fof(g,l = lambda x: x):
for i in g:
yield l(i)
def prod(g1,g2):
g1 = makeGen(g1)
g2 = makeGen(g2)
for i in g1:
yield i*next(g2)
def sum(g1,g2):
g1 = makeGen(g1)
g2 = makeGen(g2)
for i in g1:
yield i+next(g2)
def integrate(g,k=1,d = 0):
v =0
d = makeGen(d)
k = makeGen(k)
for i in g:
v += next(k)*i - next(d)*v
yield v
def derivitive(g):
prev = 0
for i in g:
yield i-prev
prev = i
def coupledOscilators(xs,ks,k0s,damp=0,dt = 1/96000):
vs = [0 for i in xs]
while 1:
for i in range(len(xs)):
xs[i] += vs[i]*dt
for i in range(len(xs)):
vs[i] = (vs[i]+((xs[i-1]-xs[i])*ks[i]
-(xs[i]-xs[(i+1)%len(xs)])*ks[(i+1)%len(xs)]
-xs[i]*k0s[i])*dt)*(1-damp)
yield xs
def magnetField(mx,my,mz,mf,g=1):#immitates the magnet pendulum physics
#https://www.math.hmc.edu/~dyong/math164/2006/win/finalreport.pdf
def force(x,y,vx,vy):
fx = -x*g
fy = -y*g
for i in range(len(mx)):
r = ((mx[i]-x)**2+(my[i]-y)**2+mz[i]**2)**(3/2)
fx += mf[i]*(mx[i]-x)/r
fy += mf[i]*(my[i]-y)/r
return (fx,fy)
return force
def movingMagnetField(mx,my,mz,mf,mov = lambda t,mx,my,mz,mf:(mx,my,mz,mf),dt=1/48000,g=1):#immitates the magnet pendulum physics
#https://www.math.hmc.edu/~dyong/math164/2006/win/finalreport.pdf
def force(x,y,vx,vy,t=[0],mx=[mx],my=[my],mz=[mz],mf=[mf]):
fx = -x*g
fy = -y*g
for i in range(len(mx)):
r = ((mx[0][i]-x)**2+(my[0][i]-y)**2+mz[0][i]**2)**(3/2)
fx += mf[0][i]*(mx[0][i]-x)/r
fy += mf[0][i]*(my[0][i]-y)/r
mx[0],my[0],mz[0],mf[0] = mov(t[0],mx[0],my[0],mz[0],mf[0])
t[0] += dt
return (fx,fy)
return force
def ext(g,n):
for i in g:
yield i
n -= 1
if n<0:
return
def td(l,dt=1/48000):
def lt(x,y,vx,vy,t=[0]):
t[0] += dt
return l(x,y,vx,vy,t[0])
return lt
def lin(s,d=48000):
s /= d
x = 0
while 1:
yield x
x += s
def foldMod(v,n):
return abs((v-n)%(4*n)-2*n)-n
def fm1(v):
return abs((v-1)%4-2)-1 | 0.096211 | 0.199327 |
import argparse
import sys
import json
from .utils import as_obj
def command_arg(*args, **kwargs):
"""
Small tweak to build argparse.add_argument() arguments through a command
declaration (see `command` decorator and `Command.add_arguments` method)
"""
class CommandArg:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def get_name(self):
name = self.args[1] if len(self.args) > 1 else self.args[0]
if name.startswith('--'):
name = name[2:]
return name
return CommandArg(*args, **kwargs)
class Command(object):
"""
The Command object represent a given action. In fine, its goal is to make
the argparse.ArgumentParser parsing result in the calling of the method that
used the @command decorator.
"""
def __init__(self, name, help, args):
self.help = help
self.name = name
self.args = args
def has_arguments(self):
return len(self.args) > 0
def add_arguments(self, parser):
for arg in self.args:
parser.add_argument(*arg.args, **arg.kwargs)
# Takes args produced by argparse.parse_args() and outputs the proper kwargs
# dict for the bound api method.
def process_kwargs(self, args):
kwargs = {}
for arg in self.args:
argname = arg.get_name()
if getattr(args, argname, False):
kwargs[argname] = getattr(args, argname)
return kwargs
def command(name='', help=None, args=list()):
"""
@command() decorator. Used to register a sub command for argparse.
To register arguments just use the command_arg() helper as you would with
parser.add_argument() arguments.
"""
command = Command(name=name, help=help, args=args)
def decorated(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
setattr(wrapper, 'command', command)
return wrapper
return decorated
def with_commands(description):
"""
Register a class as a commanded class. All methods marked with the @command()
decorator will be be piloted from here.
"""
def wrapped(Cls):
class CommandedCls:
def __init__(self, *args, **kwargs):
self.instance = Cls(*args, **kwargs)
self.description = description
self.bindings = self.list_bindings()
self.parser = argparse.ArgumentParser(description=description)
self.add_commands()
self.parse_args()
def __getattribute__(self, attr):
_attr = None
try: _attr = super(CommandedCls, self).__getattribute__(attr)
except: pass
if _attr is None:
_attr = self.instance.__getattribute__(self.instance, attr)
return _attr
def add_commands(self):
bindings = self.bindings.items()
subparsers = self.parser.add_subparsers()
for key, binding in bindings:
command = binding.command
command_parser = subparsers.add_parser(key, help=command.help)
command_parser.set_defaults(command=key)
command.add_arguments(command_parser)
def parse_args(self):
args = self.parser.parse_args()
if args.command:
binding = self.bindings.get(args.command)
command = binding.command
method = binding.method
kwargs = command.process_kwargs(args)
# method execution
getattr(self.instance, args.command)(**kwargs)
else:
self.parser.print_help()
def is_valid_name(self, name): return not name.startswith('__')
def is_decorated(self, name):
"""
Check if a given command name is a decorated method
"""
method = getattr(self.instance, name)
return getattr(method, 'command', None) is not None
def list_bindings(self):
"""
Will return all decorated methods as a dict:
{
<command name>: object(method=<bound api method>, command=<bound command object>),
...
}
"""
all_command_names = filter(
self.is_decorated, filter(
self.is_valid_name,
dir(self.instance)
)
)
def _getcommand(name):
method = getattr(self.instance, name)
return [name, method, method.command]
return {
name: as_obj({ "command": command, "method": method }) for [
name, method, command
] in map(_getcommand, all_command_names)
}
return CommandedCls
return wrapped | lib/decorators.py | import argparse
import sys
import json
from .utils import as_obj
def command_arg(*args, **kwargs):
"""
Small tweak to build argparse.add_argument() arguments through a command
declaration (see `command` decorator and `Command.add_arguments` method)
"""
class CommandArg:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def get_name(self):
name = self.args[1] if len(self.args) > 1 else self.args[0]
if name.startswith('--'):
name = name[2:]
return name
return CommandArg(*args, **kwargs)
class Command(object):
"""
The Command object represent a given action. In fine, its goal is to make
the argparse.ArgumentParser parsing result in the calling of the method that
used the @command decorator.
"""
def __init__(self, name, help, args):
self.help = help
self.name = name
self.args = args
def has_arguments(self):
return len(self.args) > 0
def add_arguments(self, parser):
for arg in self.args:
parser.add_argument(*arg.args, **arg.kwargs)
# Takes args produced by argparse.parse_args() and outputs the proper kwargs
# dict for the bound api method.
def process_kwargs(self, args):
kwargs = {}
for arg in self.args:
argname = arg.get_name()
if getattr(args, argname, False):
kwargs[argname] = getattr(args, argname)
return kwargs
def command(name='', help=None, args=list()):
"""
@command() decorator. Used to register a sub command for argparse.
To register arguments just use the command_arg() helper as you would with
parser.add_argument() arguments.
"""
command = Command(name=name, help=help, args=args)
def decorated(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
setattr(wrapper, 'command', command)
return wrapper
return decorated
def with_commands(description):
"""
Register a class as a commanded class. All methods marked with the @command()
decorator will be be piloted from here.
"""
def wrapped(Cls):
class CommandedCls:
def __init__(self, *args, **kwargs):
self.instance = Cls(*args, **kwargs)
self.description = description
self.bindings = self.list_bindings()
self.parser = argparse.ArgumentParser(description=description)
self.add_commands()
self.parse_args()
def __getattribute__(self, attr):
_attr = None
try: _attr = super(CommandedCls, self).__getattribute__(attr)
except: pass
if _attr is None:
_attr = self.instance.__getattribute__(self.instance, attr)
return _attr
def add_commands(self):
bindings = self.bindings.items()
subparsers = self.parser.add_subparsers()
for key, binding in bindings:
command = binding.command
command_parser = subparsers.add_parser(key, help=command.help)
command_parser.set_defaults(command=key)
command.add_arguments(command_parser)
def parse_args(self):
args = self.parser.parse_args()
if args.command:
binding = self.bindings.get(args.command)
command = binding.command
method = binding.method
kwargs = command.process_kwargs(args)
# method execution
getattr(self.instance, args.command)(**kwargs)
else:
self.parser.print_help()
def is_valid_name(self, name): return not name.startswith('__')
def is_decorated(self, name):
"""
Check if a given command name is a decorated method
"""
method = getattr(self.instance, name)
return getattr(method, 'command', None) is not None
def list_bindings(self):
"""
Will return all decorated methods as a dict:
{
<command name>: object(method=<bound api method>, command=<bound command object>),
...
}
"""
all_command_names = filter(
self.is_decorated, filter(
self.is_valid_name,
dir(self.instance)
)
)
def _getcommand(name):
method = getattr(self.instance, name)
return [name, method, method.command]
return {
name: as_obj({ "command": command, "method": method }) for [
name, method, command
] in map(_getcommand, all_command_names)
}
return CommandedCls
return wrapped | 0.378229 | 0.102709 |
import csv
import sys
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Accepts exactly one parameter - the filepath to a Funding Circle CSV export.")
sys.exit(1)
entries = {}
with open('input.csv', 'r') as f:
csv_reader = csv.reader(f)
next(csv_reader) # Remove the headers.
for row in csv_reader:
loan_part = ""
if ("Interest repayment for loan part " in row[1]) or ("Early interest repayment for loan part " in row[1]):
loan_part = row[1].split(" ")[-1]
if row[0]+loan_part in entries:
entries[row[0]+loan_part]["interest"] = row[2]
else:
entries[row[0] + loan_part] = {"interest": row[2]}
elif ("Principal repayment for loan part " in row[1]) or ("Early principal repayment for loan part " in row[1]):
loan_part = row[1].split(" ")[-1]
if row[0]+loan_part in entries:
entries[row[0]+loan_part]["principal"] = row[2]
else:
entries[row[0] + loan_part] = {"principal": row[2]}
elif "Servicing fee for Loan ID " in row[1]:
loan_part = row[1].split(";")[1].split(" ")[-1]
if row[0]+loan_part in entries:
entries[row[0]+loan_part]["fee"] = row[3]
else:
entries[row[0] + loan_part] = {"fee": row[3]}
elif "Servicing fee for loan part " in row[1]:
loan_part = row[1].split(" ")[-1]
if row[0]+loan_part in entries:
entries[row[0]+loan_part]["fee"] = row[3]
else:
entries[row[0] + loan_part] = {"fee": row[3]}
elif "Loan offer on " in row[1]:
loan_part = row[1].split(" ")[-1]
if row[0]+loan_part in entries:
entries[row[0]+loan_part]["cost"] = row[3]
else:
entries[row[0] + loan_part] = {"cost": row[3]}
else:
raise IOError("Unrecognised row: "+row[1])
delta = 0
print("{: ^10} {: ^9} {: ^5} {: ^5} {: ^5} {: ^5} {: ^6}".format("Date", "ID", "Prin.", "Int", "Fee", "Cost", "Total"))
for k in sorted(entries.keys()):
principal = float(entries[k].get("principal", "0"))
interest = float(entries[k].get("interest", "0"))
fee = float(entries[k].get("fee", "0"))
cost = float(entries[k].get("cost", "0"))
total = principal + interest - fee - cost
delta += total
print("{: >10} {: >9} {: >5.2f} {: >5.2f} {: >5.2f} {: >5.2f} {: >6.2f}".format(k[0:10],
k[10:],
principal,
interest,
fee,
cost,
total
))
print()
print("Difference between last month and this month: £{:.2f}".format(delta)) | modules/app_minis/files/fundingcircle_gnucash_summariser/fundingcircle_gnucash_summariser.py |
import csv
import sys
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Accepts exactly one parameter - the filepath to a Funding Circle CSV export.")
sys.exit(1)
entries = {}
with open('input.csv', 'r') as f:
csv_reader = csv.reader(f)
next(csv_reader) # Remove the headers.
for row in csv_reader:
loan_part = ""
if ("Interest repayment for loan part " in row[1]) or ("Early interest repayment for loan part " in row[1]):
loan_part = row[1].split(" ")[-1]
if row[0]+loan_part in entries:
entries[row[0]+loan_part]["interest"] = row[2]
else:
entries[row[0] + loan_part] = {"interest": row[2]}
elif ("Principal repayment for loan part " in row[1]) or ("Early principal repayment for loan part " in row[1]):
loan_part = row[1].split(" ")[-1]
if row[0]+loan_part in entries:
entries[row[0]+loan_part]["principal"] = row[2]
else:
entries[row[0] + loan_part] = {"principal": row[2]}
elif "Servicing fee for Loan ID " in row[1]:
loan_part = row[1].split(";")[1].split(" ")[-1]
if row[0]+loan_part in entries:
entries[row[0]+loan_part]["fee"] = row[3]
else:
entries[row[0] + loan_part] = {"fee": row[3]}
elif "Servicing fee for loan part " in row[1]:
loan_part = row[1].split(" ")[-1]
if row[0]+loan_part in entries:
entries[row[0]+loan_part]["fee"] = row[3]
else:
entries[row[0] + loan_part] = {"fee": row[3]}
elif "Loan offer on " in row[1]:
loan_part = row[1].split(" ")[-1]
if row[0]+loan_part in entries:
entries[row[0]+loan_part]["cost"] = row[3]
else:
entries[row[0] + loan_part] = {"cost": row[3]}
else:
raise IOError("Unrecognised row: "+row[1])
delta = 0
print("{: ^10} {: ^9} {: ^5} {: ^5} {: ^5} {: ^5} {: ^6}".format("Date", "ID", "Prin.", "Int", "Fee", "Cost", "Total"))
for k in sorted(entries.keys()):
principal = float(entries[k].get("principal", "0"))
interest = float(entries[k].get("interest", "0"))
fee = float(entries[k].get("fee", "0"))
cost = float(entries[k].get("cost", "0"))
total = principal + interest - fee - cost
delta += total
print("{: >10} {: >9} {: >5.2f} {: >5.2f} {: >5.2f} {: >5.2f} {: >6.2f}".format(k[0:10],
k[10:],
principal,
interest,
fee,
cost,
total
))
print()
print("Difference between last month and this month: £{:.2f}".format(delta)) | 0.265785 | 0.328435 |
import grpc
import model_service_pb2 as model__service__pb2
class ModelServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.publishLoad = channel.unary_unary(
'/com.webank.ai.fate.api.mlmodel.manager.ModelService/publishLoad',
request_serializer=model__service__pb2.PublishRequest.SerializeToString,
response_deserializer=model__service__pb2.PublishResponse.FromString,
)
self.publishBind = channel.unary_unary(
'/com.webank.ai.fate.api.mlmodel.manager.ModelService/publishBind',
request_serializer=model__service__pb2.PublishRequest.SerializeToString,
response_deserializer=model__service__pb2.PublishResponse.FromString,
)
class ModelServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def publishLoad(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def publishBind(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ModelServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'publishLoad': grpc.unary_unary_rpc_method_handler(
servicer.publishLoad,
request_deserializer=model__service__pb2.PublishRequest.FromString,
response_serializer=model__service__pb2.PublishResponse.SerializeToString,
),
'publishBind': grpc.unary_unary_rpc_method_handler(
servicer.publishBind,
request_deserializer=model__service__pb2.PublishRequest.FromString,
response_serializer=model__service__pb2.PublishResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'com.webank.ai.fate.api.mlmodel.manager.ModelService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) | arch/api/proto/model_service_pb2_grpc.py | import grpc
import model_service_pb2 as model__service__pb2
class ModelServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.publishLoad = channel.unary_unary(
'/com.webank.ai.fate.api.mlmodel.manager.ModelService/publishLoad',
request_serializer=model__service__pb2.PublishRequest.SerializeToString,
response_deserializer=model__service__pb2.PublishResponse.FromString,
)
self.publishBind = channel.unary_unary(
'/com.webank.ai.fate.api.mlmodel.manager.ModelService/publishBind',
request_serializer=model__service__pb2.PublishRequest.SerializeToString,
response_deserializer=model__service__pb2.PublishResponse.FromString,
)
class ModelServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def publishLoad(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def publishBind(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ModelServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'publishLoad': grpc.unary_unary_rpc_method_handler(
servicer.publishLoad,
request_deserializer=model__service__pb2.PublishRequest.FromString,
response_serializer=model__service__pb2.PublishResponse.SerializeToString,
),
'publishBind': grpc.unary_unary_rpc_method_handler(
servicer.publishBind,
request_deserializer=model__service__pb2.PublishRequest.FromString,
response_serializer=model__service__pb2.PublishResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'com.webank.ai.fate.api.mlmodel.manager.ModelService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) | 0.61682 | 0.048586 |
import os
import sys
import ray
import numpy as np
import pytest
import time
from ray._private.test_utils import SignalActor
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 5,
"object_store_memory": 10**8,
}],
indirect=True)
def test_parallel_actor_fill_plasma_retry(ray_start_cluster_head):
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
actors = [LargeMemoryActor.remote() for _ in range(5)]
for _ in range(5):
pending = [a.some_expensive_task.remote() for a in actors]
while pending:
[done], pending = ray.wait(pending, num_returns=1)
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"task_retry_delay_ms": 500
}
}],
indirect=True)
def test_async_actor_task_retries(ray_start_regular):
# https://github.com/ray-project/ray/issues/11683
signal = SignalActor.remote()
@ray.remote
class DyingActor:
def __init__(self):
print("DyingActor init called")
self.should_exit = False
def set_should_exit(self):
print("DyingActor.set_should_exit called")
self.should_exit = True
async def get(self, x, wait=False):
print(f"DyingActor.get called with x={x}, wait={wait}")
if self.should_exit:
os._exit(0)
if wait:
await signal.wait.remote()
return x
# Normal in order actor task retries should work
dying = DyingActor.options(
max_restarts=-1,
max_task_retries=-1,
).remote()
assert ray.get(dying.get.remote(1)) == 1
ray.get(dying.set_should_exit.remote())
assert ray.get(dying.get.remote(42)) == 42
# Now let's try out of order retries:
# Task seqno 0 will return
# Task seqno 1 will be pending and retried later
# Task seqno 2 will return
# Task seqno 3 will crash the actor and retried later
dying = DyingActor.options(
max_restarts=-1,
max_task_retries=-1,
).remote()
# seqno 0
ref_0 = dying.get.remote(0)
assert ray.get(ref_0) == 0
# seqno 1
ref_1 = dying.get.remote(1, wait=True)
# Need a barrier here to ensure ordering between the async and sync call.
# Otherwise ref2 could be executed prior to ref1.
for i in range(100):
if ray.get(signal.cur_num_waiters.remote()) > 0:
break
time.sleep(.1)
assert ray.get(signal.cur_num_waiters.remote()) > 0
# seqno 2
ref_2 = dying.set_should_exit.remote()
assert ray.get(ref_2) is None
# seqno 3, this will crash the actor because previous task set should exit
# to true.
ref_3 = dying.get.remote(3)
# At this point the actor should be restarted. The two pending tasks
# [ref_1, ref_3] should be retried, but not the completed tasks [ref_0,
# ref_2]. Critically, if ref_2 was retried, ref_3 can never return.
ray.get(signal.send.remote())
assert ray.get(ref_1) == 1
assert ray.get(ref_3) == 3
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__])) | python/ray/tests/test_failure_3.py | import os
import sys
import ray
import numpy as np
import pytest
import time
from ray._private.test_utils import SignalActor
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 5,
"object_store_memory": 10**8,
}],
indirect=True)
def test_parallel_actor_fill_plasma_retry(ray_start_cluster_head):
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
actors = [LargeMemoryActor.remote() for _ in range(5)]
for _ in range(5):
pending = [a.some_expensive_task.remote() for a in actors]
while pending:
[done], pending = ray.wait(pending, num_returns=1)
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"task_retry_delay_ms": 500
}
}],
indirect=True)
def test_async_actor_task_retries(ray_start_regular):
# https://github.com/ray-project/ray/issues/11683
signal = SignalActor.remote()
@ray.remote
class DyingActor:
def __init__(self):
print("DyingActor init called")
self.should_exit = False
def set_should_exit(self):
print("DyingActor.set_should_exit called")
self.should_exit = True
async def get(self, x, wait=False):
print(f"DyingActor.get called with x={x}, wait={wait}")
if self.should_exit:
os._exit(0)
if wait:
await signal.wait.remote()
return x
# Normal in order actor task retries should work
dying = DyingActor.options(
max_restarts=-1,
max_task_retries=-1,
).remote()
assert ray.get(dying.get.remote(1)) == 1
ray.get(dying.set_should_exit.remote())
assert ray.get(dying.get.remote(42)) == 42
# Now let's try out of order retries:
# Task seqno 0 will return
# Task seqno 1 will be pending and retried later
# Task seqno 2 will return
# Task seqno 3 will crash the actor and retried later
dying = DyingActor.options(
max_restarts=-1,
max_task_retries=-1,
).remote()
# seqno 0
ref_0 = dying.get.remote(0)
assert ray.get(ref_0) == 0
# seqno 1
ref_1 = dying.get.remote(1, wait=True)
# Need a barrier here to ensure ordering between the async and sync call.
# Otherwise ref2 could be executed prior to ref1.
for i in range(100):
if ray.get(signal.cur_num_waiters.remote()) > 0:
break
time.sleep(.1)
assert ray.get(signal.cur_num_waiters.remote()) > 0
# seqno 2
ref_2 = dying.set_should_exit.remote()
assert ray.get(ref_2) is None
# seqno 3, this will crash the actor because previous task set should exit
# to true.
ref_3 = dying.get.remote(3)
# At this point the actor should be restarted. The two pending tasks
# [ref_1, ref_3] should be retried, but not the completed tasks [ref_0,
# ref_2]. Critically, if ref_2 was retried, ref_3 can never return.
ray.get(signal.send.remote())
assert ray.get(ref_1) == 1
assert ray.get(ref_3) == 3
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__])) | 0.501465 | 0.355999 |
from rdkit import Chem
import cv2
import os
import numpy as np
import os.path
from rdkit import Chem
from rdkit.Chem import AllChem
class Compound:
extension = 'png'
#compound identifier in the dataset
id=""
#compound SMILE
_SMILE=""
#mutagen
mutagen=False
#rdk model
rdkMolecule = None
def __init__(self,id,SMILE,mut):
self.id=id
self._SMILE=SMILE
self.description = self.id + ": "+ self._SMILE
self.mutagen = mut
self.rdkMolecule = Chem.MolFromSmiles(self._SMILE)
#print(SMILE)
def __repr__(self):
return self.description
def __str__(self):
return self.description
def fileExist(self,path):
img = path+self.id+'.'+ Compound.extension
return os.path.isfile(img)
def image(self,path):
img = path+self.id+'.'+Compound.extension
return cv2.imread(str(img))
def input(self, path='',t='image'):
if t == 'image':
return self.image(path),1 if self.mutagen else 0
else:
return self._SMILE,1 if self.mutagen else 0
def InitialiseNeutralisationReactions(self):
patts= (
# Imidazoles
('[n+;H]','n'),
# Amines
('[N+;!H0]','N'),
# Carboxylic acids and alcohols
('[$([O-]);!$([O-][#7])]','O'),
# Thiols
('[S-;X1]','S'),
# Sulfonamides
('[$([N-;X2]S(=O)=O)]','N'),
# Enamines
('[$([N-;X2][C,N]=C)]','N'),
# Tetrazoles
('[n-]','[nH]'),
# Sulfoxides
('[$([S-]=O)]','S'),
# Amides
('[$([N-]C=O)]','N'),
)
return [(Chem.MolFromSmarts(x),Chem.MolFromSmiles(y,False)) for x,y in patts]
reac=None
def NeutraliseCharges(self, reactions=None):
if reactions is None:
if self.reac is None:
self.reac=self.InitialiseNeutralisationReactions()
reactions=self.reac
mol = Chem.MolFromSmiles(self._SMILE)
replaced = False
for i,(reactant, product) in enumerate(reactions):
while mol.HasSubstructMatch(reactant):
replaced = True
rms = AllChem.ReplaceSubstructs(mol, reactant, product)
mol = rms[0]
if replaced:
return (Chem.MolToSmiles(mol,True), True)
else:
return (self._SMILE, False) | chemception/models/compound.py | from rdkit import Chem
import cv2
import os
import numpy as np
import os.path
from rdkit import Chem
from rdkit.Chem import AllChem
class Compound:
extension = 'png'
#compound identifier in the dataset
id=""
#compound SMILE
_SMILE=""
#mutagen
mutagen=False
#rdk model
rdkMolecule = None
def __init__(self,id,SMILE,mut):
self.id=id
self._SMILE=SMILE
self.description = self.id + ": "+ self._SMILE
self.mutagen = mut
self.rdkMolecule = Chem.MolFromSmiles(self._SMILE)
#print(SMILE)
def __repr__(self):
return self.description
def __str__(self):
return self.description
def fileExist(self,path):
img = path+self.id+'.'+ Compound.extension
return os.path.isfile(img)
def image(self,path):
img = path+self.id+'.'+Compound.extension
return cv2.imread(str(img))
def input(self, path='',t='image'):
if t == 'image':
return self.image(path),1 if self.mutagen else 0
else:
return self._SMILE,1 if self.mutagen else 0
def InitialiseNeutralisationReactions(self):
patts= (
# Imidazoles
('[n+;H]','n'),
# Amines
('[N+;!H0]','N'),
# Carboxylic acids and alcohols
('[$([O-]);!$([O-][#7])]','O'),
# Thiols
('[S-;X1]','S'),
# Sulfonamides
('[$([N-;X2]S(=O)=O)]','N'),
# Enamines
('[$([N-;X2][C,N]=C)]','N'),
# Tetrazoles
('[n-]','[nH]'),
# Sulfoxides
('[$([S-]=O)]','S'),
# Amides
('[$([N-]C=O)]','N'),
)
return [(Chem.MolFromSmarts(x),Chem.MolFromSmiles(y,False)) for x,y in patts]
reac=None
def NeutraliseCharges(self, reactions=None):
if reactions is None:
if self.reac is None:
self.reac=self.InitialiseNeutralisationReactions()
reactions=self.reac
mol = Chem.MolFromSmiles(self._SMILE)
replaced = False
for i,(reactant, product) in enumerate(reactions):
while mol.HasSubstructMatch(reactant):
replaced = True
rms = AllChem.ReplaceSubstructs(mol, reactant, product)
mol = rms[0]
if replaced:
return (Chem.MolToSmiles(mol,True), True)
else:
return (self._SMILE, False) | 0.144722 | 0.159283 |
from html.parser import HTMLParser
from goose.text import innerTrim
import html
class OutputFormatter(object):
def __init__(self, config):
self.top_node = None
self.config = config
# parser
self.parser = self.config.get_parser()
self.stopwords_class = config.stopwords_class
def get_language(self, article):
"""\
Returns the language is by the article or
the configuration language
"""
# we don't want to force the target laguage
# so we use the article.meta_lang
if self.config.use_meta_language == True:
if article.meta_lang:
return article.meta_lang[:2]
return self.config.target_language
def get_top_node(self):
return self.top_node
def get_formatted_text(self, article):
self.top_node = article.top_node
self.remove_negativescores_nodes()
self.links_to_text()
self.add_newline_to_br()
self.replace_with_text()
self.remove_fewwords_paragraphs(article)
return self.convert_to_text()
def convert_to_text(self):
txts = []
for node in list(self.get_top_node()):
txt = self.parser.getText(node)
if txt:
txt = html.unescape(txt)
txt_lis = innerTrim(txt).split(r'\n')
txts.extend(txt_lis)
return '\n\n'.join(txts)
def add_newline_to_br(self):
for e in self.parser.getElementsByTag(self.top_node, tag='br'):
e.text = r'\n'
def links_to_text(self):
"""\
cleans up and converts any nodes that
should be considered text into text
"""
self.parser.stripTags(self.get_top_node(), 'a')
def remove_negativescores_nodes(self):
"""\
if there are elements inside our top node
that have a negative gravity score,
let's give em the boot
"""
gravity_items = self.parser.css_select(self.top_node, "*[gravityScore]")
for item in gravity_items:
score = self.parser.getAttribute(item, 'gravityScore')
score = int(score, 0)
if score < 1:
item.getparent().remove(item)
def replace_with_text(self):
"""\
replace common tags with just
text so we don't have any crazy formatting issues
so replace <br>, <i>, <strong>, etc....
with whatever text is inside them
code : http://lxml.de/api/lxml.etree-module.html#strip_tags
"""
self.parser.stripTags(self.get_top_node(), 'b', 'strong', 'i', 'br', 'sup')
def remove_fewwords_paragraphs(self, article):
"""\
remove paragraphs that have less than x number of words,
would indicate that it's some sort of link
"""
all_nodes = self.parser.getElementsByTags(self.get_top_node(), ['*'])
all_nodes.reverse()
for el in all_nodes:
tag = self.parser.getTag(el)
text = self.parser.getText(el)
stop_words = self.stopwords_class(language=self.get_language(article)).get_stopword_count(text)
if (tag != 'br' or text != '\\r') and stop_words.get_stopword_count() < 3 \
and len(self.parser.getElementsByTag(el, tag='object')) == 0 \
and len(self.parser.getElementsByTag(el, tag='embed')) == 0:
self.parser.remove(el)
# TODO
# check if it is in the right place
else:
trimmed = self.parser.getText(el)
if trimmed.startswith("(") and trimmed.endswith(")"):
self.parser.remove(el)
class StandardOutputFormatter(OutputFormatter):
pass | goose/outputformatters.py | from html.parser import HTMLParser
from goose.text import innerTrim
import html
class OutputFormatter(object):
def __init__(self, config):
self.top_node = None
self.config = config
# parser
self.parser = self.config.get_parser()
self.stopwords_class = config.stopwords_class
def get_language(self, article):
"""\
Returns the language is by the article or
the configuration language
"""
# we don't want to force the target laguage
# so we use the article.meta_lang
if self.config.use_meta_language == True:
if article.meta_lang:
return article.meta_lang[:2]
return self.config.target_language
def get_top_node(self):
return self.top_node
def get_formatted_text(self, article):
self.top_node = article.top_node
self.remove_negativescores_nodes()
self.links_to_text()
self.add_newline_to_br()
self.replace_with_text()
self.remove_fewwords_paragraphs(article)
return self.convert_to_text()
def convert_to_text(self):
txts = []
for node in list(self.get_top_node()):
txt = self.parser.getText(node)
if txt:
txt = html.unescape(txt)
txt_lis = innerTrim(txt).split(r'\n')
txts.extend(txt_lis)
return '\n\n'.join(txts)
def add_newline_to_br(self):
for e in self.parser.getElementsByTag(self.top_node, tag='br'):
e.text = r'\n'
def links_to_text(self):
"""\
cleans up and converts any nodes that
should be considered text into text
"""
self.parser.stripTags(self.get_top_node(), 'a')
def remove_negativescores_nodes(self):
"""\
if there are elements inside our top node
that have a negative gravity score,
let's give em the boot
"""
gravity_items = self.parser.css_select(self.top_node, "*[gravityScore]")
for item in gravity_items:
score = self.parser.getAttribute(item, 'gravityScore')
score = int(score, 0)
if score < 1:
item.getparent().remove(item)
def replace_with_text(self):
"""\
replace common tags with just
text so we don't have any crazy formatting issues
so replace <br>, <i>, <strong>, etc....
with whatever text is inside them
code : http://lxml.de/api/lxml.etree-module.html#strip_tags
"""
self.parser.stripTags(self.get_top_node(), 'b', 'strong', 'i', 'br', 'sup')
def remove_fewwords_paragraphs(self, article):
"""\
remove paragraphs that have less than x number of words,
would indicate that it's some sort of link
"""
all_nodes = self.parser.getElementsByTags(self.get_top_node(), ['*'])
all_nodes.reverse()
for el in all_nodes:
tag = self.parser.getTag(el)
text = self.parser.getText(el)
stop_words = self.stopwords_class(language=self.get_language(article)).get_stopword_count(text)
if (tag != 'br' or text != '\\r') and stop_words.get_stopword_count() < 3 \
and len(self.parser.getElementsByTag(el, tag='object')) == 0 \
and len(self.parser.getElementsByTag(el, tag='embed')) == 0:
self.parser.remove(el)
# TODO
# check if it is in the right place
else:
trimmed = self.parser.getText(el)
if trimmed.startswith("(") and trimmed.endswith(")"):
self.parser.remove(el)
class StandardOutputFormatter(OutputFormatter):
pass | 0.567937 | 0.097219 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
from .util import (
display,
)
from .data import (
data_context,
)
def get_powershell_module_utils_imports(powershell_targets):
"""Return a dictionary of module_utils names mapped to sets of powershell file paths.
:type powershell_targets: list[TestTarget]
:rtype: dict[str, set[str]]
"""
module_utils = enumerate_module_utils()
imports_by_target_path = {}
for target in powershell_targets:
imports_by_target_path[target.path] = extract_powershell_module_utils_imports(target.path, module_utils)
imports = dict([(module_util, set()) for module_util in module_utils])
for target_path in imports_by_target_path:
for module_util in imports_by_target_path[target_path]:
imports[module_util].add(target_path)
for module_util in sorted(imports):
if not imports[module_util]:
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_powershell_module_utils_name(path): # type: (str) -> str
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_powershell_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix + '.plugins.module_utils.'
else:
prefix = ''
name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.sep, '.')
return name
def enumerate_module_utils():
"""Return a list of available module_utils imports.
:rtype: set[str]
"""
return set(get_powershell_module_utils_name(p)
for p in data_context().content.walk_files(data_context().content.module_utils_powershell_path)
if os.path.splitext(p)[1] == '.psm1')
def extract_powershell_module_utils_imports(path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
:rtype: set[str]
"""
imports = set()
with open(path, 'r') as module_fd:
code = module_fd.read()
if '# POWERSHELL_COMMON' in code:
imports.add('Ansible.ModuleUtils.Legacy')
lines = code.splitlines()
line_number = 0
for line in lines:
line_number += 1
match = re.search(r'(?i)^#\s*(?:requires\s+-module(?:s?)|ansiblerequires\s+-powershell)\s*((?:Ansible|ansible_collections)\..+)', line)
if not match:
continue
import_name = match.group(1)
if import_name in module_utils:
imports.add(import_name)
else:
display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
return imports | ansible/venv/lib/python2.7/site-packages/ansible_test/_internal/powershell_import_analysis.py | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
from .util import (
display,
)
from .data import (
data_context,
)
def get_powershell_module_utils_imports(powershell_targets):
"""Return a dictionary of module_utils names mapped to sets of powershell file paths.
:type powershell_targets: list[TestTarget]
:rtype: dict[str, set[str]]
"""
module_utils = enumerate_module_utils()
imports_by_target_path = {}
for target in powershell_targets:
imports_by_target_path[target.path] = extract_powershell_module_utils_imports(target.path, module_utils)
imports = dict([(module_util, set()) for module_util in module_utils])
for target_path in imports_by_target_path:
for module_util in imports_by_target_path[target_path]:
imports[module_util].add(target_path)
for module_util in sorted(imports):
if not imports[module_util]:
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_powershell_module_utils_name(path): # type: (str) -> str
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_powershell_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix + '.plugins.module_utils.'
else:
prefix = ''
name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.sep, '.')
return name
def enumerate_module_utils():
"""Return a list of available module_utils imports.
:rtype: set[str]
"""
return set(get_powershell_module_utils_name(p)
for p in data_context().content.walk_files(data_context().content.module_utils_powershell_path)
if os.path.splitext(p)[1] == '.psm1')
def extract_powershell_module_utils_imports(path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
:rtype: set[str]
"""
imports = set()
with open(path, 'r') as module_fd:
code = module_fd.read()
if '# POWERSHELL_COMMON' in code:
imports.add('Ansible.ModuleUtils.Legacy')
lines = code.splitlines()
line_number = 0
for line in lines:
line_number += 1
match = re.search(r'(?i)^#\s*(?:requires\s+-module(?:s?)|ansiblerequires\s+-powershell)\s*((?:Ansible|ansible_collections)\..+)', line)
if not match:
continue
import_name = match.group(1)
if import_name in module_utils:
imports.add(import_name)
else:
display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
return imports | 0.655005 | 0.162812 |
from django.test import TestCase
from .models import Image, Location, Category
class LocationTestClass(TestCase):
def setUp(self):
self.location = Location(location = 'Mombasa')
def test_instance(self):
self.assertTrue(isinstance(self.location, Location))
def test_save_location(self):
self.location.save_location()
locations = Location.objects.all()
self.assertTrue(len(locations) > 0)
def test_delete_location(self):
self.location.save_location()
Location.delete_location(self.location.id)
locations = Location.objects.all()
self.assertEqual(len(locations), 0)
def test_update_location(self):
Location.update_location(self.location.id, 'Paris')
self.assertEqual(self.location.location, 'Paris')
class CategoryTestClass(TestCase):
def setUp(self):
self.category = Category(category = 'Things')
def test_instance(self):
self.assertTrue(isinstance(self.category, Category))
def test_save_category(self):
self.category.save_category()
categories = Category.objects.all()
self.assertTrue(len(categories)>0)
def test_delete_category(self):
Category.delete_category(self.category.id)
categories = Category.objects.all()
self.assertEqual(len(categories), 0)
def test_update_category(self):
Category.update_category(self.category.id, 'Animals')
self.assertEqual(self.category.category, 'Animals')
class ImageTestClass(TestCase):
def setUp(self):
self.location = Location(location = 'Paris')
self.location.save_location()
self.category = Category(category = 'Animals')
self.category.save_category()
self.img = Image(img_path = 'darolle.png', img_name = 'passport photo', img_desc ='passport sized photo of Joan', img_location= self.location, img_category = self.category)
def test_instance(self):
self.assertTrue(isinstance(self.img, Image))
def test_save_image(self):
self.img.save_image()
images = Image.objects.all()
self.assertTrue(len(images)> 0)
def test_delete_image(self):
self.img.save_image()
Image.delete_image(self.img.id)
images = Image.objects.all()
self.assertEqual(len(images), 0)
def test_get_image_by_id(self):
self.img.save_image()
image = Image.get_image_by_id(self.img.id)
self.assertEqual(self.img.img_location, image)
def test_search_image(self):
self.img.save_image()
image = Image.search_image(self.img.img_category)
self.assertEqual(self.img, image)
def test_update_image(self):
self.img.save_image()
Image.update_image(self.img.id, 'mark.jpg')
self.assertEqual(self.img.img_path, 'mark.jpg')
def test_update_description(self):
self.img.save_image()
Image.update_desc(self.img.id, 'passport sized photo of Mark')
self.assertEqual(self.img.img_desc, 'passport sized photo of Mark')
def tearDown(self):
Location.objects.all().delete()
Category.objects.all().delete()
Image.objects.all().delete() | galleria/tests.py | from django.test import TestCase
from .models import Image, Location, Category
class LocationTestClass(TestCase):
def setUp(self):
self.location = Location(location = 'Mombasa')
def test_instance(self):
self.assertTrue(isinstance(self.location, Location))
def test_save_location(self):
self.location.save_location()
locations = Location.objects.all()
self.assertTrue(len(locations) > 0)
def test_delete_location(self):
self.location.save_location()
Location.delete_location(self.location.id)
locations = Location.objects.all()
self.assertEqual(len(locations), 0)
def test_update_location(self):
Location.update_location(self.location.id, 'Paris')
self.assertEqual(self.location.location, 'Paris')
class CategoryTestClass(TestCase):
def setUp(self):
self.category = Category(category = 'Things')
def test_instance(self):
self.assertTrue(isinstance(self.category, Category))
def test_save_category(self):
self.category.save_category()
categories = Category.objects.all()
self.assertTrue(len(categories)>0)
def test_delete_category(self):
Category.delete_category(self.category.id)
categories = Category.objects.all()
self.assertEqual(len(categories), 0)
def test_update_category(self):
Category.update_category(self.category.id, 'Animals')
self.assertEqual(self.category.category, 'Animals')
class ImageTestClass(TestCase):
def setUp(self):
self.location = Location(location = 'Paris')
self.location.save_location()
self.category = Category(category = 'Animals')
self.category.save_category()
self.img = Image(img_path = 'darolle.png', img_name = 'passport photo', img_desc ='passport sized photo of Joan', img_location= self.location, img_category = self.category)
def test_instance(self):
self.assertTrue(isinstance(self.img, Image))
def test_save_image(self):
self.img.save_image()
images = Image.objects.all()
self.assertTrue(len(images)> 0)
def test_delete_image(self):
self.img.save_image()
Image.delete_image(self.img.id)
images = Image.objects.all()
self.assertEqual(len(images), 0)
def test_get_image_by_id(self):
self.img.save_image()
image = Image.get_image_by_id(self.img.id)
self.assertEqual(self.img.img_location, image)
def test_search_image(self):
self.img.save_image()
image = Image.search_image(self.img.img_category)
self.assertEqual(self.img, image)
def test_update_image(self):
self.img.save_image()
Image.update_image(self.img.id, 'mark.jpg')
self.assertEqual(self.img.img_path, 'mark.jpg')
def test_update_description(self):
self.img.save_image()
Image.update_desc(self.img.id, 'passport sized photo of Mark')
self.assertEqual(self.img.img_desc, 'passport sized photo of Mark')
def tearDown(self):
Location.objects.all().delete()
Category.objects.all().delete()
Image.objects.all().delete() | 0.520009 | 0.459015 |
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect
from data_gen import *
from marks import ignore_order
from pyspark.sql.types import *
import pyspark.sql.functions as f
def four_op_df(spark, gen, length=2048, seed=0):
return gen_df(spark, StructGen([
('a', gen),
('b', gen),
('c', gen),
('d', gen)], nullable=False), length=length, seed=seed)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
def test_explode_makearray(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : four_op_df(spark, data_gen).selectExpr('a', 'explode(array(b, c, d))'))
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
def test_explode_litarray(data_gen):
array_lit = gen_scalar(ArrayGen(data_gen, min_length=3, max_length=3, nullable=False))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : four_op_df(spark, data_gen).select(f.col('a'), f.col('b'), f.col('c'),
f.explode(array_lit)))
# use a small `spark.rapids.sql.batchSizeBytes` to enforce input batches splitting up during explode
conf_to_enforce_split_input = {'spark.rapids.sql.batchSizeBytes': '8192',
'spark.sql.legacy.allowNegativeScaleOfDecimal': 'true'}
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen + struct_gens_sample + array_gens_sample + map_gens_sample, ids=idfn)
def test_explode_array_data(spark_tmp_path, data_gen):
data_gen = [int_gen, ArrayGen(data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'explode(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('map_gen', map_gens_sample, ids=idfn)
def test_explode_map_data(spark_tmp_path, map_gen):
data_gen = [int_gen, map_gen]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'explode(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
def test_explode_nested_array_data(spark_tmp_path, data_gen):
data_gen = [int_gen, ArrayGen(ArrayGen(data_gen))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr(
'a', 'explode(b) as c').selectExpr('a', 'explode(c)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen + struct_gens_sample + array_gens_sample + map_gens_sample, ids=idfn)
def test_explode_outer_array_data(spark_tmp_path, data_gen):
data_gen = [int_gen, ArrayGen(data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'explode_outer(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('map_gen', map_gens_sample, ids=idfn)
def test_explode_outer_map_data(spark_tmp_path, map_gen):
data_gen = [int_gen, map_gen]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'explode_outer(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
def test_explode_outer_nested_array_data(spark_tmp_path, data_gen):
data_gen = [int_gen, ArrayGen(ArrayGen(data_gen))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr(
'a', 'explode_outer(b) as c').selectExpr('a', 'explode_outer(c)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
def test_posexplode_makearray(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : four_op_df(spark, data_gen).selectExpr('posexplode(array(b, c, d))', 'a'))
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
def test_posexplode_litarray(data_gen):
array_lit = gen_scalar(ArrayGen(data_gen, min_length=3, max_length=3, nullable=False))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : four_op_df(spark, data_gen).select(f.col('a'), f.col('b'), f.col('c'),
f.posexplode(array_lit)))
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen + struct_gens_sample + array_gens_sample + map_gens_sample, ids=idfn)
def test_posexplode_array_data(spark_tmp_path, data_gen):
data_gen = [int_gen, ArrayGen(data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'posexplode(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('map_gen', map_gens_sample, ids=idfn)
def test_posexplode_map_data(spark_tmp_path, map_gen):
data_gen = [int_gen, map_gen]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'posexplode(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
def test_posexplode_nested_array_data(spark_tmp_path, data_gen):
data_gen = [int_gen, ArrayGen(ArrayGen(data_gen))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr(
'a', 'posexplode(b) as (pos, c)').selectExpr('a', 'pos', 'posexplode(c)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen + struct_gens_sample + array_gens_sample + map_gens_sample, ids=idfn)
def test_posexplode_outer_array_data(spark_tmp_path, data_gen):
data_gen = [int_gen, ArrayGen(data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'posexplode_outer(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('map_gen', map_gens_sample, ids=idfn)
def test_posexplode_outer_map_data(spark_tmp_path, map_gen):
data_gen = [int_gen, map_gen]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'posexplode_outer(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
def test_posexplode_nested_outer_array_data(spark_tmp_path, data_gen):
data_gen = [int_gen, ArrayGen(ArrayGen(data_gen))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr(
'a', 'posexplode_outer(b) as (pos, c)').selectExpr(
'a', 'pos', 'posexplode_outer(c)'),
conf=conf_to_enforce_split_input) | integration_tests/src/main/python/generate_expr_test.py |
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect
from data_gen import *
from marks import ignore_order
from pyspark.sql.types import *
import pyspark.sql.functions as f
def four_op_df(spark, gen, length=2048, seed=0):
return gen_df(spark, StructGen([
('a', gen),
('b', gen),
('c', gen),
('d', gen)], nullable=False), length=length, seed=seed)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
def test_explode_makearray(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : four_op_df(spark, data_gen).selectExpr('a', 'explode(array(b, c, d))'))
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
def test_explode_litarray(data_gen):
array_lit = gen_scalar(ArrayGen(data_gen, min_length=3, max_length=3, nullable=False))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : four_op_df(spark, data_gen).select(f.col('a'), f.col('b'), f.col('c'),
f.explode(array_lit)))
# use a small `spark.rapids.sql.batchSizeBytes` to enforce input batches splitting up during explode
conf_to_enforce_split_input = {'spark.rapids.sql.batchSizeBytes': '8192',
'spark.sql.legacy.allowNegativeScaleOfDecimal': 'true'}
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen + struct_gens_sample + array_gens_sample + map_gens_sample, ids=idfn)
def test_explode_array_data(spark_tmp_path, data_gen):
data_gen = [int_gen, ArrayGen(data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'explode(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('map_gen', map_gens_sample, ids=idfn)
def test_explode_map_data(spark_tmp_path, map_gen):
data_gen = [int_gen, map_gen]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'explode(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
def test_explode_nested_array_data(spark_tmp_path, data_gen):
data_gen = [int_gen, ArrayGen(ArrayGen(data_gen))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr(
'a', 'explode(b) as c').selectExpr('a', 'explode(c)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen + struct_gens_sample + array_gens_sample + map_gens_sample, ids=idfn)
def test_explode_outer_array_data(spark_tmp_path, data_gen):
data_gen = [int_gen, ArrayGen(data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'explode_outer(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('map_gen', map_gens_sample, ids=idfn)
def test_explode_outer_map_data(spark_tmp_path, map_gen):
data_gen = [int_gen, map_gen]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'explode_outer(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
def test_explode_outer_nested_array_data(spark_tmp_path, data_gen):
data_gen = [int_gen, ArrayGen(ArrayGen(data_gen))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr(
'a', 'explode_outer(b) as c').selectExpr('a', 'explode_outer(c)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
def test_posexplode_makearray(data_gen):
assert_gpu_and_cpu_are_equal_collect(
lambda spark : four_op_df(spark, data_gen).selectExpr('posexplode(array(b, c, d))', 'a'))
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
def test_posexplode_litarray(data_gen):
array_lit = gen_scalar(ArrayGen(data_gen, min_length=3, max_length=3, nullable=False))
assert_gpu_and_cpu_are_equal_collect(
lambda spark : four_op_df(spark, data_gen).select(f.col('a'), f.col('b'), f.col('c'),
f.posexplode(array_lit)))
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen + struct_gens_sample + array_gens_sample + map_gens_sample, ids=idfn)
def test_posexplode_array_data(spark_tmp_path, data_gen):
data_gen = [int_gen, ArrayGen(data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'posexplode(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('map_gen', map_gens_sample, ids=idfn)
def test_posexplode_map_data(spark_tmp_path, map_gen):
data_gen = [int_gen, map_gen]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'posexplode(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
def test_posexplode_nested_array_data(spark_tmp_path, data_gen):
data_gen = [int_gen, ArrayGen(ArrayGen(data_gen))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr(
'a', 'posexplode(b) as (pos, c)').selectExpr('a', 'pos', 'posexplode(c)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen + struct_gens_sample + array_gens_sample + map_gens_sample, ids=idfn)
def test_posexplode_outer_array_data(spark_tmp_path, data_gen):
data_gen = [int_gen, ArrayGen(data_gen)]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'posexplode_outer(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('map_gen', map_gens_sample, ids=idfn)
def test_posexplode_outer_map_data(spark_tmp_path, map_gen):
data_gen = [int_gen, map_gen]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr('a', 'posexplode_outer(b)'),
conf=conf_to_enforce_split_input)
#sort locally because of https://github.com/NVIDIA/spark-rapids/issues/84
# After 3.1.0 is the min spark version we can drop this
@ignore_order(local=True)
@pytest.mark.parametrize('data_gen', all_gen, ids=idfn)
def test_posexplode_nested_outer_array_data(spark_tmp_path, data_gen):
data_gen = [int_gen, ArrayGen(ArrayGen(data_gen))]
assert_gpu_and_cpu_are_equal_collect(
lambda spark: two_col_df(spark, *data_gen).selectExpr(
'a', 'posexplode_outer(b) as (pos, c)').selectExpr(
'a', 'pos', 'posexplode_outer(c)'),
conf=conf_to_enforce_split_input) | 0.701509 | 0.612802 |
from defoe import query_utils
from defoe.nlsArticles.query_utils import clean_headers_page_as_string, get_articles_eb, filter_terms_page
from pyspark.sql import Row, SparkSession, SQLContext
import yaml, os
def comp(o):
num_page=o.split("_")[0]
print("-----> ROSA ----> %s" %num_page)
return(int(num_page))
def do_query(archives, config_file=None, logger=None, context=None):
"""
Ingest NLS pages, clean and extract the articles of each to each page, and save them to HDFS, with some metadata associated with each article.
Metadata collected: "title", "edition", "year", "place", "archive_filename", "source_text_filename", "text_unit",
"text_unit_id", "num_text_unit", "type_archive", "model", "type_page", "header", "term", "definition",
"num_articles", "num_page_words", "num_article_words",
Data is saved as Dataframes into ElasticSearch: Index:'nls_articles' Type:'Encyclopaedia_Britannica'
Example:
'Encyclopaedia Britannica: or, A dictionary of arts and sciences':
- archive_name: /home/tdm/datasets/eb_test/144850366
articles:
ACQUEST:
- or Acquist, in law, signifies goods got by purchase or donation. See CoNtiUEST.
ACQUI:
- "a town of Italy, in the Dutchy of Montferrat, with a biihop\u2019s see, and\
\ commodious baths. It was taken by the Spaniards in 1745, and retaken by the\
\ Piedmontese in 1746; but after this, it was taken again and difrcantled by\
\ the French, who afterwards forsook it. It is seated on the river Bormio, 25\
\ miles N.W. of Genoa, and 30 S. of Cafal, 8. 30. E. long. 44. 40. lat."
ACQUIESCENCE:
- in commerce, is the consent that a person gives to the determination given either
by arbitration, orbyaconful
:param archives: RDD of defoe.nls.archive.Archive
:type archives: pyspark.rdd.PipelinedRDD
:param config_file: query configuration file
:type config_file: str or unicode
:param logger: logger (unused)
:type logger: py4j.java_gateway.JavaObject
:return: "0"
:rtype: string
"""
with open(config_file, "r") as f:
config = yaml.load(f)
if "os_type" in config:
if config["os_type"] == "linux":
os_type = "sys-i386-64"
else:
os_type= "sys-i386-snow-leopard"
else:
os_type = "sys-i386-64"
if "defoe_path" in config :
defoe_path = config["defoe_path"]
else:
defoe_path = "./"
text_unit = "page"
# [(tittle, edition, year, place, archive filename,
# num pages, type of archive, type of disribution, model)]
documents = archives.flatMap(
lambda archive: [(document.title, document.edition, document.year, \
document.place, document.archive.filename, document.num_pages, \
document.document_type, document.model, document) for document in list(archive)])
# [(tittle, edition, year, place, archive filename, page filename, text_unit, tex_unit_id, num_pages,
# type of archive, type of disribution, model, page_type, header, articles_page_dictionary, num_articles_page, num_page_words)]
pages_clean = documents.flatMap(
lambda year_document: [(year_document[0], year_document[1], year_document[2],\
year_document[3], year_document[4], page.code, text_unit, page.page_id, \
year_document[5], year_document[6], year_document[7], \
filter_terms_page(page, defoe_path, os_type), len(page.words)) for page in year_document[8]])
# [(tittle, edition, year, place, archive filename, page filename , text_unit, tex_unit_id, num_pages,
# type of archive, type of disribution, model, page_type, header, term, (definition, num_article_page), num_articles_per_page, num_page_words, num_artciles_words)]
pages_articles = pages_clean.flatMap(
lambda articles_page: [(articles_page[0], articles_page[1], articles_page[2],\
articles_page[3], articles_page[4], articles_page[5], articles_page[6], articles_page[7], \
articles_page[8], articles_page[9], articles_page[10], \
articles_page[11][0], articles_page[11][1], key, articles_page[11][2][key][0], articles_page[11][2][key][1], \
articles_page[11][2][key][2], articles_page[11][2][key][3], articles_page[11][3],\
articles_page[12], len(articles_page[11][2][key][0].split(" "))) for key in articles_page[11][2]])
matching_pages = pages_articles.flatMap(
lambda row_page:
[(row_page[1], (int(row_page[7].split("Page")[1]),
{"title": row_page[0],
"edition": row_page[1],
"year": row_page[2],
"place": row_page[3],
"archive_filename": row_page[4],
"source_text_file": row_page[5],
"text_unit": row_page[6],
"text_unit_id": row_page[7],
"num_text_unit": row_page[8],
"type_archive": row_page[9],
"model": row_page[10],
"type_page": row_page[11],
"header": row_page[12],
"term": row_page[13],
"definition": row_page[14],
"term_id_in_page": row_page[15],
"last_term_in_page": row_page[16],
"related_terms": row_page[17],
"num_articles": row_page[18],
"num_page_words": row_page[19],
"num_article_words": row_page[20]}))])
result = matching_pages \
.groupByKey() \
.map(lambda date_context:
(date_context[0], list(date_context[1]))).sortByKey() \
.collect()
return result | defoe/nlsArticles/queries/write_articles_pages_df_yaml.py | from defoe import query_utils
from defoe.nlsArticles.query_utils import clean_headers_page_as_string, get_articles_eb, filter_terms_page
from pyspark.sql import Row, SparkSession, SQLContext
import yaml, os
def comp(o):
num_page=o.split("_")[0]
print("-----> ROSA ----> %s" %num_page)
return(int(num_page))
def do_query(archives, config_file=None, logger=None, context=None):
"""
Ingest NLS pages, clean and extract the articles of each to each page, and save them to HDFS, with some metadata associated with each article.
Metadata collected: "title", "edition", "year", "place", "archive_filename", "source_text_filename", "text_unit",
"text_unit_id", "num_text_unit", "type_archive", "model", "type_page", "header", "term", "definition",
"num_articles", "num_page_words", "num_article_words",
Data is saved as Dataframes into ElasticSearch: Index:'nls_articles' Type:'Encyclopaedia_Britannica'
Example:
'Encyclopaedia Britannica: or, A dictionary of arts and sciences':
- archive_name: /home/tdm/datasets/eb_test/144850366
articles:
ACQUEST:
- or Acquist, in law, signifies goods got by purchase or donation. See CoNtiUEST.
ACQUI:
- "a town of Italy, in the Dutchy of Montferrat, with a biihop\u2019s see, and\
\ commodious baths. It was taken by the Spaniards in 1745, and retaken by the\
\ Piedmontese in 1746; but after this, it was taken again and difrcantled by\
\ the French, who afterwards forsook it. It is seated on the river Bormio, 25\
\ miles N.W. of Genoa, and 30 S. of Cafal, 8. 30. E. long. 44. 40. lat."
ACQUIESCENCE:
- in commerce, is the consent that a person gives to the determination given either
by arbitration, orbyaconful
:param archives: RDD of defoe.nls.archive.Archive
:type archives: pyspark.rdd.PipelinedRDD
:param config_file: query configuration file
:type config_file: str or unicode
:param logger: logger (unused)
:type logger: py4j.java_gateway.JavaObject
:return: "0"
:rtype: string
"""
with open(config_file, "r") as f:
config = yaml.load(f)
if "os_type" in config:
if config["os_type"] == "linux":
os_type = "sys-i386-64"
else:
os_type= "sys-i386-snow-leopard"
else:
os_type = "sys-i386-64"
if "defoe_path" in config :
defoe_path = config["defoe_path"]
else:
defoe_path = "./"
text_unit = "page"
# [(tittle, edition, year, place, archive filename,
# num pages, type of archive, type of disribution, model)]
documents = archives.flatMap(
lambda archive: [(document.title, document.edition, document.year, \
document.place, document.archive.filename, document.num_pages, \
document.document_type, document.model, document) for document in list(archive)])
# [(tittle, edition, year, place, archive filename, page filename, text_unit, tex_unit_id, num_pages,
# type of archive, type of disribution, model, page_type, header, articles_page_dictionary, num_articles_page, num_page_words)]
pages_clean = documents.flatMap(
lambda year_document: [(year_document[0], year_document[1], year_document[2],\
year_document[3], year_document[4], page.code, text_unit, page.page_id, \
year_document[5], year_document[6], year_document[7], \
filter_terms_page(page, defoe_path, os_type), len(page.words)) for page in year_document[8]])
# [(tittle, edition, year, place, archive filename, page filename , text_unit, tex_unit_id, num_pages,
# type of archive, type of disribution, model, page_type, header, term, (definition, num_article_page), num_articles_per_page, num_page_words, num_artciles_words)]
pages_articles = pages_clean.flatMap(
lambda articles_page: [(articles_page[0], articles_page[1], articles_page[2],\
articles_page[3], articles_page[4], articles_page[5], articles_page[6], articles_page[7], \
articles_page[8], articles_page[9], articles_page[10], \
articles_page[11][0], articles_page[11][1], key, articles_page[11][2][key][0], articles_page[11][2][key][1], \
articles_page[11][2][key][2], articles_page[11][2][key][3], articles_page[11][3],\
articles_page[12], len(articles_page[11][2][key][0].split(" "))) for key in articles_page[11][2]])
matching_pages = pages_articles.flatMap(
lambda row_page:
[(row_page[1], (int(row_page[7].split("Page")[1]),
{"title": row_page[0],
"edition": row_page[1],
"year": row_page[2],
"place": row_page[3],
"archive_filename": row_page[4],
"source_text_file": row_page[5],
"text_unit": row_page[6],
"text_unit_id": row_page[7],
"num_text_unit": row_page[8],
"type_archive": row_page[9],
"model": row_page[10],
"type_page": row_page[11],
"header": row_page[12],
"term": row_page[13],
"definition": row_page[14],
"term_id_in_page": row_page[15],
"last_term_in_page": row_page[16],
"related_terms": row_page[17],
"num_articles": row_page[18],
"num_page_words": row_page[19],
"num_article_words": row_page[20]}))])
result = matching_pages \
.groupByKey() \
.map(lambda date_context:
(date_context[0], list(date_context[1]))).sortByKey() \
.collect()
return result | 0.521959 | 0.335895 |
"""Pooling layers definitions."""
import tensorflow as tf
class MAC(tf.keras.layers.Layer):
"""Global max pooling (MAC) layer.
Maximum Activations of Convolutions (MAC) is simply constructed by
max-pooling over all dimensions per feature map. See
https://arxiv.org/abs/1511.05879 for a reference.
"""
def call(self, x, axis=None):
"""Invokes the MAC pooling instance.
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return mac(x, axis=axis)
class SPoC(tf.keras.layers.Layer):
"""Average pooling (SPoC) layer.
Sum-pooled convolutional features (SPoC) is based on the sum pooling of the
deep features. See https://arxiv.org/pdf/1510.07493.pdf for a reference.
"""
def call(self, x, axis=None):
"""Invokes the SPoC instance.
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return spoc(x, axis)
class GeM(tf.keras.layers.Layer):
"""Generalized mean pooling (GeM) layer.
Generalized Mean Pooling (GeM) computes the generalized mean of each
channel in a tensor. See https://arxiv.org/abs/1711.02512 for a reference.
"""
def __init__(self, power=3.):
"""Initialization of the generalized mean pooling (GeM) layer.
Args:
power: Float power > 0 is an inverse exponent parameter, used during the
generalized mean pooling computation. Setting this exponent as power > 1
increases the contrast of the pooled feature map and focuses on the
salient features of the image. GeM is a generalization of the average
pooling commonly used in classification networks (power = 1) and of
spatial max-pooling layer (power = inf).
"""
super(GeM, self).__init__()
self.power = power
self.eps = 1e-6
def call(self, x, axis=None):
"""Invokes the GeM instance.
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return gem(x, power=self.power, eps=self.eps, axis=axis)
class GeMPooling2D(tf.keras.layers.Layer):
def __init__(self, power=20., pool_size=(2, 2), strides=None,
padding='valid', data_format='channels_last'):
"""Generalized mean pooling (GeM) pooling operation for spatial data.
Args:
power: Float, power > 0. is an inverse exponent parameter (GeM power).
pool_size: Integer or tuple of 2 integers, factors by which to
downscale (vertical, horizontal)
strides: Integer, tuple of 2 integers, or None. Strides values.
If None, it will default to `pool_size`.
padding: One of `valid` or `same`. `valid` means no padding.
`same` results in padding evenly to the left/right or up/down of
the input such that output has the same height/width dimension as the
input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape `(batch, height,
width, channels)` while `channels_first` corresponds to inputs with
shape `(batch, channels, height, width)`.
"""
super(GeMPooling2D, self).__init__()
self.power = power
self.eps = 1e-6
self.pool_size = pool_size
self.strides = strides
self.padding = padding.upper()
data_format_conv = {'channels_last': 'NHWC',
'channels_first': 'NCHW',
}
self.data_format = data_format_conv[data_format]
def call(self, x):
tmp = tf.pow(x, self.power)
tmp = tf.nn.avg_pool(tmp, self.pool_size, self.strides,
self.padding, self.data_format)
out = tf.pow(tmp, 1. / self.power)
return out
def mac(x, axis=None):
"""Performs global max pooling (MAC).
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return tf.reduce_max(x, axis=axis, keepdims=False)
def spoc(x, axis=None):
"""Performs average pooling (SPoC).
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return tf.reduce_mean(x, axis=axis, keepdims=False)
def gem(x, axis=None, power=3., eps=1e-6):
"""Performs generalized mean pooling (GeM).
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
power: Float, power > 0 is an inverse exponent parameter (GeM power).
eps: Float, parameter for numerical stability.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
tmp = tf.pow(tf.maximum(x, eps), power)
out = tf.pow(tf.reduce_mean(tmp, axis=axis, keepdims=False), 1. / power)
return out | research/delf/delf/python/pooling_layers/pooling.py | """Pooling layers definitions."""
import tensorflow as tf
class MAC(tf.keras.layers.Layer):
"""Global max pooling (MAC) layer.
Maximum Activations of Convolutions (MAC) is simply constructed by
max-pooling over all dimensions per feature map. See
https://arxiv.org/abs/1511.05879 for a reference.
"""
def call(self, x, axis=None):
"""Invokes the MAC pooling instance.
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return mac(x, axis=axis)
class SPoC(tf.keras.layers.Layer):
"""Average pooling (SPoC) layer.
Sum-pooled convolutional features (SPoC) is based on the sum pooling of the
deep features. See https://arxiv.org/pdf/1510.07493.pdf for a reference.
"""
def call(self, x, axis=None):
"""Invokes the SPoC instance.
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return spoc(x, axis)
class GeM(tf.keras.layers.Layer):
"""Generalized mean pooling (GeM) layer.
Generalized Mean Pooling (GeM) computes the generalized mean of each
channel in a tensor. See https://arxiv.org/abs/1711.02512 for a reference.
"""
def __init__(self, power=3.):
"""Initialization of the generalized mean pooling (GeM) layer.
Args:
power: Float power > 0 is an inverse exponent parameter, used during the
generalized mean pooling computation. Setting this exponent as power > 1
increases the contrast of the pooled feature map and focuses on the
salient features of the image. GeM is a generalization of the average
pooling commonly used in classification networks (power = 1) and of
spatial max-pooling layer (power = inf).
"""
super(GeM, self).__init__()
self.power = power
self.eps = 1e-6
def call(self, x, axis=None):
"""Invokes the GeM instance.
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return gem(x, power=self.power, eps=self.eps, axis=axis)
class GeMPooling2D(tf.keras.layers.Layer):
def __init__(self, power=20., pool_size=(2, 2), strides=None,
padding='valid', data_format='channels_last'):
"""Generalized mean pooling (GeM) pooling operation for spatial data.
Args:
power: Float, power > 0. is an inverse exponent parameter (GeM power).
pool_size: Integer or tuple of 2 integers, factors by which to
downscale (vertical, horizontal)
strides: Integer, tuple of 2 integers, or None. Strides values.
If None, it will default to `pool_size`.
padding: One of `valid` or `same`. `valid` means no padding.
`same` results in padding evenly to the left/right or up/down of
the input such that output has the same height/width dimension as the
input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape `(batch, height,
width, channels)` while `channels_first` corresponds to inputs with
shape `(batch, channels, height, width)`.
"""
super(GeMPooling2D, self).__init__()
self.power = power
self.eps = 1e-6
self.pool_size = pool_size
self.strides = strides
self.padding = padding.upper()
data_format_conv = {'channels_last': 'NHWC',
'channels_first': 'NCHW',
}
self.data_format = data_format_conv[data_format]
def call(self, x):
tmp = tf.pow(x, self.power)
tmp = tf.nn.avg_pool(tmp, self.pool_size, self.strides,
self.padding, self.data_format)
out = tf.pow(tmp, 1. / self.power)
return out
def mac(x, axis=None):
"""Performs global max pooling (MAC).
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return tf.reduce_max(x, axis=axis, keepdims=False)
def spoc(x, axis=None):
"""Performs average pooling (SPoC).
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
return tf.reduce_mean(x, axis=axis, keepdims=False)
def gem(x, axis=None, power=3., eps=1e-6):
"""Performs generalized mean pooling (GeM).
Args:
x: [B, H, W, D] A float32 Tensor.
axis: Dimensions to reduce. By default, dimensions [1, 2] are reduced.
power: Float, power > 0 is an inverse exponent parameter (GeM power).
eps: Float, parameter for numerical stability.
Returns:
output: [B, D] A float32 Tensor.
"""
if axis is None:
axis = [1, 2]
tmp = tf.pow(tf.maximum(x, eps), power)
out = tf.pow(tf.reduce_mean(tmp, axis=axis, keepdims=False), 1. / power)
return out | 0.978385 | 0.843154 |
import re
import xml.etree.cElementTree as ElementTree
from hashlib import md5
import pytest
from html5lib import getTreeBuilder, HTMLParser
from test_build_html import flat_dict, tail_check, check_xpath
from sphinx.util.docutils import is_html5_writer_available
TREE_BUILDER = getTreeBuilder('etree', implementation=ElementTree)
HTML_PARSER = HTMLParser(TREE_BUILDER, namespaceHTMLElements=False)
etree_cache = {}
@pytest.mark.skipif(not is_html5_writer_available(), reason='HTML5 writer is not available')
@pytest.fixture(scope='module')
def cached_etree_parse():
def parse(fname):
if fname in etree_cache:
return etree_cache[fname]
with (fname).open('rb') as fp:
etree = HTML_PARSER.parse(fp)
etree_cache.clear()
etree_cache[fname] = etree
return etree
yield parse
etree_cache.clear()
@pytest.mark.skipif(not is_html5_writer_available(), reason='HTML5 writer is not available')
@pytest.mark.parametrize("fname,expect", flat_dict({
'images.html': [
(".//img[@src='_images/img.png']", ''),
(".//img[@src='_images/img1.png']", ''),
(".//img[@src='_images/simg.png']", ''),
(".//img[@src='_images/svgimg.svg']", ''),
(".//a[@href='_sources/images.txt']", ''),
],
'subdir/images.html': [
(".//img[@src='../_images/img1.png']", ''),
(".//img[@src='../_images/rimg.png']", ''),
],
'subdir/includes.html': [
(".//a[@class='reference download internal']", ''),
(".//img[@src='../_images/img.png']", ''),
(".//p", 'This is an include file.'),
(".//pre/span", 'line 1'),
(".//pre/span", 'line 2'),
],
'includes.html': [
(".//pre", '<NAME>'),
(".//a[@class='reference download internal']", ''),
(".//pre/span", '"quotes"'),
(".//pre/span", "'included'"),
(".//pre/span[@class='s2']", 'üöä'),
(".//div[@class='inc-pyobj1 highlight-text notranslate']//pre",
r'^class Foo:\n pass\n\s*$'),
(".//div[@class='inc-pyobj2 highlight-text notranslate']//pre",
r'^ def baz\(\):\n pass\n\s*$'),
(".//div[@class='inc-lines highlight-text notranslate']//pre",
r'^class Foo:\n pass\nclass Bar:\n$'),
(".//div[@class='inc-startend highlight-text notranslate']//pre",
'^foo = "Including Unicode characters: üöä"\\n$'),
(".//div[@class='inc-preappend highlight-text notranslate']//pre",
r'(?m)^START CODE$'),
(".//div[@class='inc-pyobj-dedent highlight-python notranslate']//span",
r'def'),
(".//div[@class='inc-tab3 highlight-text notranslate']//pre",
r'-| |-'),
(".//div[@class='inc-tab8 highlight-python notranslate']//pre/span",
r'-| |-'),
],
'autodoc.html': [
(".//dt[@id='autodoc_target.Class']", ''),
(".//dt[@id='autodoc_target.function']/em", r'\*\*kwds'),
(".//dd/p", r'Return spam\.'),
],
'extapi.html': [
(".//strong", 'from class: Bar'),
],
'markup.html': [
(".//title", 'set by title directive'),
(".//p/em", 'Section author: <NAME>'),
(".//p/em", 'Module author: <NAME>'),
# created by the meta directive
(".//meta[@name='author'][@content='Me']", ''),
(".//meta[@name='keywords'][@content='docs, sphinx']", ''),
# a label created by ``.. _label:``
(".//div[@id='label']", ''),
# code with standard code blocks
(".//pre", '^some code$'),
# an option list
(".//span[@class='option']", '--help'),
# admonitions
(".//p[@class='admonition-title']", 'My Admonition'),
(".//div[@class='admonition note']/p", 'Note text.'),
(".//div[@class='admonition warning']/p", 'Warning text.'),
# inline markup
(".//li/p/strong", r'^command\\n$'),
(".//li/p/strong", r'^program\\n$'),
(".//li/p/em", r'^dfn\\n$'),
(".//li/p/kbd", r'^kbd\\n$'),
(".//li/p/span", 'File \N{TRIANGULAR BULLET} Close'),
(".//li/p/code/span[@class='pre']", '^a/$'),
(".//li/p/code/em/span[@class='pre']", '^varpart$'),
(".//li/p/code/em/span[@class='pre']", '^i$'),
(".//a[@href='https://www.python.org/dev/peps/pep-0008']"
"[@class='pep reference external']/strong", 'PEP 8'),
(".//a[@href='https://www.python.org/dev/peps/pep-0008']"
"[@class='pep reference external']/strong",
'Python Enhancement Proposal #8'),
(".//a[@href='https://tools.ietf.org/html/rfc1.html']"
"[@class='rfc reference external']/strong", 'RFC 1'),
(".//a[@href='https://tools.ietf.org/html/rfc1.html']"
"[@class='rfc reference external']/strong", 'Request for Comments #1'),
(".//a[@href='objects.html#envvar-HOME']"
"[@class='reference internal']/code/span[@class='pre']", 'HOME'),
(".//a[@href='#with']"
"[@class='reference internal']/code/span[@class='pre']", '^with$'),
(".//a[@href='#grammar-token-try-stmt']"
"[@class='reference internal']/code/span", '^statement$'),
(".//a[@href='#some-label'][@class='reference internal']/span", '^here$'),
(".//a[@href='#some-label'][@class='reference internal']/span", '^there$'),
(".//a[@href='subdir/includes.html']"
"[@class='reference internal']/span", 'Including in subdir'),
(".//a[@href='objects.html#cmdoption-python-c']"
"[@class='reference internal']/code/span[@class='pre']", '-c'),
# abbreviations
(".//abbr[@title='abbreviation']", '^abbr$'),
# version stuff
(".//div[@class='versionadded']/p/span", 'New in version 0.6: '),
(".//div[@class='versionadded']/p/span",
tail_check('First paragraph of versionadded')),
(".//div[@class='versionchanged']/p/span",
tail_check('First paragraph of versionchanged')),
(".//div[@class='versionchanged']/p",
'Second paragraph of versionchanged'),
# footnote reference
(".//a[@class='footnote-reference brackets']", r'1'),
# created by reference lookup
(".//a[@href='index.html#ref1']", ''),
# ``seealso`` directive
(".//div/p[@class='admonition-title']", 'See also'),
# a ``hlist`` directive
(".//table[@class='hlist']/tbody/tr/td/ul/li/p", '^This$'),
# a ``centered`` directive
(".//p[@class='centered']/strong", 'LICENSE'),
# a glossary
(".//dl/dt[@id='term-boson']", 'boson'),
# a production list
(".//pre/strong", 'try_stmt'),
(".//pre/a[@href='#grammar-token-try1-stmt']/code/span", 'try1_stmt'),
# tests for ``only`` directive
(".//p", 'A global substitution.'),
(".//p", 'In HTML.'),
(".//p", 'In both.'),
(".//p", 'Always present'),
# tests for ``any`` role
(".//a[@href='#with']/span", 'headings'),
(".//a[@href='objects.html#func_without_body']/code/span", 'objects'),
# tests for numeric labels
(".//a[@href='#id1'][@class='reference internal']/span", 'Testing various markup'),
],
'objects.html': [
(".//dt[@id='mod.Cls.meth1']", ''),
(".//dt[@id='errmod.Error']", ''),
(".//dt/code", r'long\(parameter,\s* list\)'),
(".//dt/code", 'another one'),
(".//a[@href='#mod.Cls'][@class='reference internal']", ''),
(".//dl[@class='userdesc']", ''),
(".//dt[@id='userdesc-myobj']", ''),
(".//a[@href='#userdesc-myobj'][@class='reference internal']", ''),
# docfields
(".//a[@class='reference internal'][@href='#TimeInt']/em", 'TimeInt'),
(".//a[@class='reference internal'][@href='#Time']", 'Time'),
(".//a[@class='reference internal'][@href='#errmod.Error']/strong", 'Error'),
# C references
(".//span[@class='pre']", 'CFunction()'),
(".//a[@href='#c.Sphinx_DoSomething']", ''),
(".//a[@href='#c.SphinxStruct.member']", ''),
(".//a[@href='#c.SPHINX_USE_PYTHON']", ''),
(".//a[@href='#c.SphinxType']", ''),
(".//a[@href='#c.sphinx_global']", ''),
# test global TOC created by toctree()
(".//ul[@class='current']/li[@class='toctree-l1 current']/a[@href='#']",
'Testing object descriptions'),
(".//li[@class='toctree-l1']/a[@href='markup.html']",
'Testing various markup'),
# test unknown field names
(".//dt[@class='field-odd']", 'Field_name'),
(".//dt[@class='field-even']", 'Field_name all lower'),
(".//dt[@class='field-odd']", 'FIELD_NAME'),
(".//dt[@class='field-even']", 'FIELD_NAME ALL CAPS'),
(".//dt[@class='field-odd']", 'Field_Name'),
(".//dt[@class='field-even']", 'Field_Name All Word Caps'),
(".//dt[@class='field-odd']", 'Field_name'),
(".//dt[@class='field-even']", 'Field_name First word cap'),
(".//dt[@class='field-odd']", 'FIELd_name'),
(".//dt[@class='field-even']", 'FIELd_name PARTial caps'),
# custom sidebar
(".//h4", 'Custom sidebar'),
# docfields
(".//dd[@class='field-odd']/p/strong", '^moo$'),
(".//dd[@class='field-odd']/p/strong", tail_check(r'\(Moo\) .* Moo')),
(".//dd[@class='field-odd']/ul/li/p/strong", '^hour$'),
(".//dd[@class='field-odd']/ul/li/p/em", '^DuplicateType$'),
(".//dd[@class='field-odd']/ul/li/p/em", tail_check(r'.* Some parameter')),
# others
(".//a[@class='reference internal'][@href='#cmdoption-perl-arg-p']/code/span",
'perl'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-arg-p']/code/span",
'\\+p'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-objc']/code/span",
'--ObjC\\+\\+'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-plugin-option']/code/span",
'--plugin.option'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-arg-create-auth-token']"
"/code/span",
'create-auth-token'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-arg-arg']/code/span",
'arg'),
(".//a[@class='reference internal'][@href='#cmdoption-hg-arg-commit']/code/span",
'hg'),
(".//a[@class='reference internal'][@href='#cmdoption-hg-arg-commit']/code/span",
'commit'),
(".//a[@class='reference internal'][@href='#cmdoption-git-commit-p']/code/span",
'git'),
(".//a[@class='reference internal'][@href='#cmdoption-git-commit-p']/code/span",
'commit'),
(".//a[@class='reference internal'][@href='#cmdoption-git-commit-p']/code/span",
'-p'),
],
'index.html': [
(".//meta[@name='hc'][@content='hcval']", ''),
(".//meta[@name='hc_co'][@content='hcval_co']", ''),
(".//dt[@class='label']/span[@class='brackets']", r'Ref1'),
(".//dt[@class='label']", ''),
(".//li[@class='toctree-l1']/a", 'Testing various markup'),
(".//li[@class='toctree-l2']/a", 'Inline markup'),
(".//title", 'Sphinx <Tests>'),
(".//div[@class='footer']", '<NAME> & Team'),
(".//a[@href='http://python.org/']"
"[@class='reference external']", ''),
(".//li/p/a[@href='genindex.html']/span", 'Index'),
(".//li/p/a[@href='py-modindex.html']/span", 'Module Index'),
(".//li/p/a[@href='search.html']/span", 'Search Page'),
# custom sidebar only for contents
(".//h4", 'Contents sidebar'),
# custom JavaScript
(".//script[@src='file://moo.js']", ''),
# URL in contents
(".//a[@class='reference external'][@href='http://sphinx-doc.org/']",
'http://sphinx-doc.org/'),
(".//a[@class='reference external'][@href='http://sphinx-doc.org/latest/']",
'Latest reference'),
# Indirect hyperlink targets across files
(".//a[@href='markup.html#some-label'][@class='reference internal']/span",
'^indirect hyperref$'),
],
'bom.html': [
(".//title", " File with UTF-8 BOM"),
],
'extensions.html': [
(".//a[@href='http://python.org/dev/']", "http://python.org/dev/"),
(".//a[@href='http://bugs.python.org/issue1000']", "issue 1000"),
(".//a[@href='http://bugs.python.org/issue1042']", "explicit caption"),
],
'genindex.html': [
# index entries
(".//a/strong", "Main"),
(".//a/strong", "[1]"),
(".//a/strong", "Other"),
(".//a", "entry"),
(".//li/a", "double"),
],
'footnote.html': [
(".//a[@class='footnote-reference brackets'][@href='#id9'][@id='id1']", r"1"),
(".//a[@class='footnote-reference brackets'][@href='#id10'][@id='id2']", r"2"),
(".//a[@class='footnote-reference brackets'][@href='#foo'][@id='id3']", r"3"),
(".//a[@class='reference internal'][@href='#bar'][@id='id4']", r"\[bar\]"),
(".//a[@class='reference internal'][@href='#baz-qux'][@id='id5']", r"\[baz_qux\]"),
(".//a[@class='footnote-reference brackets'][@href='#id11'][@id='id6']", r"4"),
(".//a[@class='footnote-reference brackets'][@href='#id12'][@id='id7']", r"5"),
(".//a[@class='fn-backref'][@href='#id1']", r"1"),
(".//a[@class='fn-backref'][@href='#id2']", r"2"),
(".//a[@class='fn-backref'][@href='#id3']", r"3"),
(".//a[@class='fn-backref'][@href='#id4']", r"bar"),
(".//a[@class='fn-backref'][@href='#id5']", r"baz_qux"),
(".//a[@class='fn-backref'][@href='#id6']", r"4"),
(".//a[@class='fn-backref'][@href='#id7']", r"5"),
(".//a[@class='fn-backref'][@href='#id8']", r"6"),
],
'otherext.html': [
(".//h1", "Generated section"),
(".//a[@href='_sources/otherext.foo.txt']", ''),
]
}))
@pytest.mark.sphinx('html', tags=['testtag'], confoverrides={
'html_context.hckey_co': 'hcval_co',
'html_experimental_html5_writer': True})
@pytest.mark.test_params(shared_result='test_build_html5_output')
def test_html5_output(app, cached_etree_parse, fname, expect):
app.build()
print(app.outdir / fname)
check_xpath(cached_etree_parse(app.outdir / fname), fname, *expect)
@pytest.mark.sphinx('html', tags=['testtag'], confoverrides={
'html_context.hckey_co': 'hcval_co',
'html_experimental_html5_writer': True})
@pytest.mark.test_params(shared_result='test_build_html_output')
def test_html_download(app):
app.build()
# subdir/includes.html
result = (app.outdir / 'subdir' / 'includes.html').text()
pattern = ('<a class="reference download internal" download="" '
'href="../(_downloads/.*/img.png)">')
matched = re.search(pattern, result)
assert matched
assert (app.outdir / matched.group(1)).exists()
filename = matched.group(1)
# includes.html
result = (app.outdir / 'includes.html').text()
pattern = ('<a class="reference download internal" download="" '
'href="(_downloads/.*/img.png)">')
matched = re.search(pattern, result)
assert matched
assert (app.outdir / matched.group(1)).exists()
assert matched.group(1) == filename
@pytest.mark.sphinx('html', testroot='roles-download',
confoverrides={'html_experimental_html5_writer': True})
def test_html_download_role(app, status, warning):
app.build()
digest = md5((app.srcdir / 'dummy.dat').encode()).hexdigest()
assert (app.outdir / '_downloads' / digest / 'dummy.dat').exists()
content = (app.outdir / 'index.html').text()
assert (('<li><p><a class="reference download internal" download="" '
'href="_downloads/%s/dummy.dat">'
'<code class="xref download docutils literal notranslate">'
'<span class="pre">dummy.dat</span></code></a></p></li>' % digest)
in content)
assert ('<li><p><code class="xref download docutils literal notranslate">'
'<span class="pre">not_found.dat</span></code></p></li>' in content)
assert ('<li><p><a class="reference download external" download="" '
'href="http://www.sphinx-doc.org/en/master/_static/sphinxheader.png">'
'<code class="xref download docutils literal notranslate">'
'<span class="pre">Sphinx</span> <span class="pre">logo</span>'
'</code></a></p></li>' in content) | tests/test_build_html5.py | import re
import xml.etree.cElementTree as ElementTree
from hashlib import md5
import pytest
from html5lib import getTreeBuilder, HTMLParser
from test_build_html import flat_dict, tail_check, check_xpath
from sphinx.util.docutils import is_html5_writer_available
TREE_BUILDER = getTreeBuilder('etree', implementation=ElementTree)
HTML_PARSER = HTMLParser(TREE_BUILDER, namespaceHTMLElements=False)
etree_cache = {}
@pytest.mark.skipif(not is_html5_writer_available(), reason='HTML5 writer is not available')
@pytest.fixture(scope='module')
def cached_etree_parse():
def parse(fname):
if fname in etree_cache:
return etree_cache[fname]
with (fname).open('rb') as fp:
etree = HTML_PARSER.parse(fp)
etree_cache.clear()
etree_cache[fname] = etree
return etree
yield parse
etree_cache.clear()
@pytest.mark.skipif(not is_html5_writer_available(), reason='HTML5 writer is not available')
@pytest.mark.parametrize("fname,expect", flat_dict({
'images.html': [
(".//img[@src='_images/img.png']", ''),
(".//img[@src='_images/img1.png']", ''),
(".//img[@src='_images/simg.png']", ''),
(".//img[@src='_images/svgimg.svg']", ''),
(".//a[@href='_sources/images.txt']", ''),
],
'subdir/images.html': [
(".//img[@src='../_images/img1.png']", ''),
(".//img[@src='../_images/rimg.png']", ''),
],
'subdir/includes.html': [
(".//a[@class='reference download internal']", ''),
(".//img[@src='../_images/img.png']", ''),
(".//p", 'This is an include file.'),
(".//pre/span", 'line 1'),
(".//pre/span", 'line 2'),
],
'includes.html': [
(".//pre", '<NAME>'),
(".//a[@class='reference download internal']", ''),
(".//pre/span", '"quotes"'),
(".//pre/span", "'included'"),
(".//pre/span[@class='s2']", 'üöä'),
(".//div[@class='inc-pyobj1 highlight-text notranslate']//pre",
r'^class Foo:\n pass\n\s*$'),
(".//div[@class='inc-pyobj2 highlight-text notranslate']//pre",
r'^ def baz\(\):\n pass\n\s*$'),
(".//div[@class='inc-lines highlight-text notranslate']//pre",
r'^class Foo:\n pass\nclass Bar:\n$'),
(".//div[@class='inc-startend highlight-text notranslate']//pre",
'^foo = "Including Unicode characters: üöä"\\n$'),
(".//div[@class='inc-preappend highlight-text notranslate']//pre",
r'(?m)^START CODE$'),
(".//div[@class='inc-pyobj-dedent highlight-python notranslate']//span",
r'def'),
(".//div[@class='inc-tab3 highlight-text notranslate']//pre",
r'-| |-'),
(".//div[@class='inc-tab8 highlight-python notranslate']//pre/span",
r'-| |-'),
],
'autodoc.html': [
(".//dt[@id='autodoc_target.Class']", ''),
(".//dt[@id='autodoc_target.function']/em", r'\*\*kwds'),
(".//dd/p", r'Return spam\.'),
],
'extapi.html': [
(".//strong", 'from class: Bar'),
],
'markup.html': [
(".//title", 'set by title directive'),
(".//p/em", 'Section author: <NAME>'),
(".//p/em", 'Module author: <NAME>'),
# created by the meta directive
(".//meta[@name='author'][@content='Me']", ''),
(".//meta[@name='keywords'][@content='docs, sphinx']", ''),
# a label created by ``.. _label:``
(".//div[@id='label']", ''),
# code with standard code blocks
(".//pre", '^some code$'),
# an option list
(".//span[@class='option']", '--help'),
# admonitions
(".//p[@class='admonition-title']", 'My Admonition'),
(".//div[@class='admonition note']/p", 'Note text.'),
(".//div[@class='admonition warning']/p", 'Warning text.'),
# inline markup
(".//li/p/strong", r'^command\\n$'),
(".//li/p/strong", r'^program\\n$'),
(".//li/p/em", r'^dfn\\n$'),
(".//li/p/kbd", r'^kbd\\n$'),
(".//li/p/span", 'File \N{TRIANGULAR BULLET} Close'),
(".//li/p/code/span[@class='pre']", '^a/$'),
(".//li/p/code/em/span[@class='pre']", '^varpart$'),
(".//li/p/code/em/span[@class='pre']", '^i$'),
(".//a[@href='https://www.python.org/dev/peps/pep-0008']"
"[@class='pep reference external']/strong", 'PEP 8'),
(".//a[@href='https://www.python.org/dev/peps/pep-0008']"
"[@class='pep reference external']/strong",
'Python Enhancement Proposal #8'),
(".//a[@href='https://tools.ietf.org/html/rfc1.html']"
"[@class='rfc reference external']/strong", 'RFC 1'),
(".//a[@href='https://tools.ietf.org/html/rfc1.html']"
"[@class='rfc reference external']/strong", 'Request for Comments #1'),
(".//a[@href='objects.html#envvar-HOME']"
"[@class='reference internal']/code/span[@class='pre']", 'HOME'),
(".//a[@href='#with']"
"[@class='reference internal']/code/span[@class='pre']", '^with$'),
(".//a[@href='#grammar-token-try-stmt']"
"[@class='reference internal']/code/span", '^statement$'),
(".//a[@href='#some-label'][@class='reference internal']/span", '^here$'),
(".//a[@href='#some-label'][@class='reference internal']/span", '^there$'),
(".//a[@href='subdir/includes.html']"
"[@class='reference internal']/span", 'Including in subdir'),
(".//a[@href='objects.html#cmdoption-python-c']"
"[@class='reference internal']/code/span[@class='pre']", '-c'),
# abbreviations
(".//abbr[@title='abbreviation']", '^abbr$'),
# version stuff
(".//div[@class='versionadded']/p/span", 'New in version 0.6: '),
(".//div[@class='versionadded']/p/span",
tail_check('First paragraph of versionadded')),
(".//div[@class='versionchanged']/p/span",
tail_check('First paragraph of versionchanged')),
(".//div[@class='versionchanged']/p",
'Second paragraph of versionchanged'),
# footnote reference
(".//a[@class='footnote-reference brackets']", r'1'),
# created by reference lookup
(".//a[@href='index.html#ref1']", ''),
# ``seealso`` directive
(".//div/p[@class='admonition-title']", 'See also'),
# a ``hlist`` directive
(".//table[@class='hlist']/tbody/tr/td/ul/li/p", '^This$'),
# a ``centered`` directive
(".//p[@class='centered']/strong", 'LICENSE'),
# a glossary
(".//dl/dt[@id='term-boson']", 'boson'),
# a production list
(".//pre/strong", 'try_stmt'),
(".//pre/a[@href='#grammar-token-try1-stmt']/code/span", 'try1_stmt'),
# tests for ``only`` directive
(".//p", 'A global substitution.'),
(".//p", 'In HTML.'),
(".//p", 'In both.'),
(".//p", 'Always present'),
# tests for ``any`` role
(".//a[@href='#with']/span", 'headings'),
(".//a[@href='objects.html#func_without_body']/code/span", 'objects'),
# tests for numeric labels
(".//a[@href='#id1'][@class='reference internal']/span", 'Testing various markup'),
],
'objects.html': [
(".//dt[@id='mod.Cls.meth1']", ''),
(".//dt[@id='errmod.Error']", ''),
(".//dt/code", r'long\(parameter,\s* list\)'),
(".//dt/code", 'another one'),
(".//a[@href='#mod.Cls'][@class='reference internal']", ''),
(".//dl[@class='userdesc']", ''),
(".//dt[@id='userdesc-myobj']", ''),
(".//a[@href='#userdesc-myobj'][@class='reference internal']", ''),
# docfields
(".//a[@class='reference internal'][@href='#TimeInt']/em", 'TimeInt'),
(".//a[@class='reference internal'][@href='#Time']", 'Time'),
(".//a[@class='reference internal'][@href='#errmod.Error']/strong", 'Error'),
# C references
(".//span[@class='pre']", 'CFunction()'),
(".//a[@href='#c.Sphinx_DoSomething']", ''),
(".//a[@href='#c.SphinxStruct.member']", ''),
(".//a[@href='#c.SPHINX_USE_PYTHON']", ''),
(".//a[@href='#c.SphinxType']", ''),
(".//a[@href='#c.sphinx_global']", ''),
# test global TOC created by toctree()
(".//ul[@class='current']/li[@class='toctree-l1 current']/a[@href='#']",
'Testing object descriptions'),
(".//li[@class='toctree-l1']/a[@href='markup.html']",
'Testing various markup'),
# test unknown field names
(".//dt[@class='field-odd']", 'Field_name'),
(".//dt[@class='field-even']", 'Field_name all lower'),
(".//dt[@class='field-odd']", 'FIELD_NAME'),
(".//dt[@class='field-even']", 'FIELD_NAME ALL CAPS'),
(".//dt[@class='field-odd']", 'Field_Name'),
(".//dt[@class='field-even']", 'Field_Name All Word Caps'),
(".//dt[@class='field-odd']", 'Field_name'),
(".//dt[@class='field-even']", 'Field_name First word cap'),
(".//dt[@class='field-odd']", 'FIELd_name'),
(".//dt[@class='field-even']", 'FIELd_name PARTial caps'),
# custom sidebar
(".//h4", 'Custom sidebar'),
# docfields
(".//dd[@class='field-odd']/p/strong", '^moo$'),
(".//dd[@class='field-odd']/p/strong", tail_check(r'\(Moo\) .* Moo')),
(".//dd[@class='field-odd']/ul/li/p/strong", '^hour$'),
(".//dd[@class='field-odd']/ul/li/p/em", '^DuplicateType$'),
(".//dd[@class='field-odd']/ul/li/p/em", tail_check(r'.* Some parameter')),
# others
(".//a[@class='reference internal'][@href='#cmdoption-perl-arg-p']/code/span",
'perl'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-arg-p']/code/span",
'\\+p'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-objc']/code/span",
'--ObjC\\+\\+'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-plugin-option']/code/span",
'--plugin.option'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-arg-create-auth-token']"
"/code/span",
'create-auth-token'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-arg-arg']/code/span",
'arg'),
(".//a[@class='reference internal'][@href='#cmdoption-hg-arg-commit']/code/span",
'hg'),
(".//a[@class='reference internal'][@href='#cmdoption-hg-arg-commit']/code/span",
'commit'),
(".//a[@class='reference internal'][@href='#cmdoption-git-commit-p']/code/span",
'git'),
(".//a[@class='reference internal'][@href='#cmdoption-git-commit-p']/code/span",
'commit'),
(".//a[@class='reference internal'][@href='#cmdoption-git-commit-p']/code/span",
'-p'),
],
'index.html': [
(".//meta[@name='hc'][@content='hcval']", ''),
(".//meta[@name='hc_co'][@content='hcval_co']", ''),
(".//dt[@class='label']/span[@class='brackets']", r'Ref1'),
(".//dt[@class='label']", ''),
(".//li[@class='toctree-l1']/a", 'Testing various markup'),
(".//li[@class='toctree-l2']/a", 'Inline markup'),
(".//title", 'Sphinx <Tests>'),
(".//div[@class='footer']", '<NAME> & Team'),
(".//a[@href='http://python.org/']"
"[@class='reference external']", ''),
(".//li/p/a[@href='genindex.html']/span", 'Index'),
(".//li/p/a[@href='py-modindex.html']/span", 'Module Index'),
(".//li/p/a[@href='search.html']/span", 'Search Page'),
# custom sidebar only for contents
(".//h4", 'Contents sidebar'),
# custom JavaScript
(".//script[@src='file://moo.js']", ''),
# URL in contents
(".//a[@class='reference external'][@href='http://sphinx-doc.org/']",
'http://sphinx-doc.org/'),
(".//a[@class='reference external'][@href='http://sphinx-doc.org/latest/']",
'Latest reference'),
# Indirect hyperlink targets across files
(".//a[@href='markup.html#some-label'][@class='reference internal']/span",
'^indirect hyperref$'),
],
'bom.html': [
(".//title", " File with UTF-8 BOM"),
],
'extensions.html': [
(".//a[@href='http://python.org/dev/']", "http://python.org/dev/"),
(".//a[@href='http://bugs.python.org/issue1000']", "issue 1000"),
(".//a[@href='http://bugs.python.org/issue1042']", "explicit caption"),
],
'genindex.html': [
# index entries
(".//a/strong", "Main"),
(".//a/strong", "[1]"),
(".//a/strong", "Other"),
(".//a", "entry"),
(".//li/a", "double"),
],
'footnote.html': [
(".//a[@class='footnote-reference brackets'][@href='#id9'][@id='id1']", r"1"),
(".//a[@class='footnote-reference brackets'][@href='#id10'][@id='id2']", r"2"),
(".//a[@class='footnote-reference brackets'][@href='#foo'][@id='id3']", r"3"),
(".//a[@class='reference internal'][@href='#bar'][@id='id4']", r"\[bar\]"),
(".//a[@class='reference internal'][@href='#baz-qux'][@id='id5']", r"\[baz_qux\]"),
(".//a[@class='footnote-reference brackets'][@href='#id11'][@id='id6']", r"4"),
(".//a[@class='footnote-reference brackets'][@href='#id12'][@id='id7']", r"5"),
(".//a[@class='fn-backref'][@href='#id1']", r"1"),
(".//a[@class='fn-backref'][@href='#id2']", r"2"),
(".//a[@class='fn-backref'][@href='#id3']", r"3"),
(".//a[@class='fn-backref'][@href='#id4']", r"bar"),
(".//a[@class='fn-backref'][@href='#id5']", r"baz_qux"),
(".//a[@class='fn-backref'][@href='#id6']", r"4"),
(".//a[@class='fn-backref'][@href='#id7']", r"5"),
(".//a[@class='fn-backref'][@href='#id8']", r"6"),
],
'otherext.html': [
(".//h1", "Generated section"),
(".//a[@href='_sources/otherext.foo.txt']", ''),
]
}))
@pytest.mark.sphinx('html', tags=['testtag'], confoverrides={
'html_context.hckey_co': 'hcval_co',
'html_experimental_html5_writer': True})
@pytest.mark.test_params(shared_result='test_build_html5_output')
def test_html5_output(app, cached_etree_parse, fname, expect):
app.build()
print(app.outdir / fname)
check_xpath(cached_etree_parse(app.outdir / fname), fname, *expect)
@pytest.mark.sphinx('html', tags=['testtag'], confoverrides={
'html_context.hckey_co': 'hcval_co',
'html_experimental_html5_writer': True})
@pytest.mark.test_params(shared_result='test_build_html_output')
def test_html_download(app):
app.build()
# subdir/includes.html
result = (app.outdir / 'subdir' / 'includes.html').text()
pattern = ('<a class="reference download internal" download="" '
'href="../(_downloads/.*/img.png)">')
matched = re.search(pattern, result)
assert matched
assert (app.outdir / matched.group(1)).exists()
filename = matched.group(1)
# includes.html
result = (app.outdir / 'includes.html').text()
pattern = ('<a class="reference download internal" download="" '
'href="(_downloads/.*/img.png)">')
matched = re.search(pattern, result)
assert matched
assert (app.outdir / matched.group(1)).exists()
assert matched.group(1) == filename
@pytest.mark.sphinx('html', testroot='roles-download',
confoverrides={'html_experimental_html5_writer': True})
def test_html_download_role(app, status, warning):
app.build()
digest = md5((app.srcdir / 'dummy.dat').encode()).hexdigest()
assert (app.outdir / '_downloads' / digest / 'dummy.dat').exists()
content = (app.outdir / 'index.html').text()
assert (('<li><p><a class="reference download internal" download="" '
'href="_downloads/%s/dummy.dat">'
'<code class="xref download docutils literal notranslate">'
'<span class="pre">dummy.dat</span></code></a></p></li>' % digest)
in content)
assert ('<li><p><code class="xref download docutils literal notranslate">'
'<span class="pre">not_found.dat</span></code></p></li>' in content)
assert ('<li><p><a class="reference download external" download="" '
'href="http://www.sphinx-doc.org/en/master/_static/sphinxheader.png">'
'<code class="xref download docutils literal notranslate">'
'<span class="pre">Sphinx</span> <span class="pre">logo</span>'
'</code></a></p></li>' in content) | 0.325413 | 0.139543 |
from copy import deepcopy
class EventServiceException(Exception):
"""
Exception class for the event service.
"""
def __init__(self, value):
super().__init__()
self.value = value
def __str__(self):
return self.value
class PublisherEventService:
"""
Interface to topics for publishers.
"""
def getTopic(self, topicName):
"""
"""
return _proxy.getTopic(topicName)
def existsTopic(self, topicName):
return _proxy.existsTopic(topicName)
class SubscriberEventService:
def __init__(self):
self.subscriberid = _proxy.registerSubscriber()
def getSubscription(self, subscriptionName):
"""
A Subscription object can be safely returned from here without screwing
up automatic object tracking for cleaning up out-of-scope subscriptions.
A framework/component subscriber uses this Subscription object to
talk to the event service.
"""
_proxy.getSubscription(self.subscriberid, subscriptionName)
return Subscription(self.subscriberid, subscriptionName)
def processEvents(self):
_proxy.processEvents(self.subscriberid)
def __del__(self):
_proxy.unregisterSubscriber(self.subscriberid)
class Event:
def __init__(self, header={}, body={}):
self.header = deepcopy(header)
self.body = deepcopy(body)
def getHeader(self):
return self.header
def getBody(self):
return self.body
def __str__(self):
return str(self.body)
class EventListener:
def __init__(self):
self.listenerid = _proxy.createListener()
def processEvent(self, topicName, theEvent):
"""
A listener implements the processEvent method to respond to an event,
thereby overriding the below invocation. Ideally, it should be an abstract
method, but currently serves to check the correct operation of the
event service.
"""
class Topic:
def __init__(self, topicName):
self.topicName = topicName
def getTopicName(self):
return self.topicName
def sendEvent(self, eventName, eventBody):
_proxy.sendEvent(self.topicName, eventName, eventBody)
class Subscription:
def __init__(self, subscriberid, subscriptionName):
self.subscriberid = subscriberid
self.subscriptionName = subscriptionName
def registerEventListener(self, listenerKey, theListener):
_proxy.registerEventListener(self.subscriberid, self.subscriptionName,
listenerKey, theListener.listenerid, theListener)
def unregisterEventListener(self, listenerKey):
_proxy.unregisterEventListener(self.subscriberid, self.subscriptionName, listenerKey)
def getSubscriptionName(self):
return self.subscriptionName
def __del__(self):
_proxy.removeSubscription(self.subscriberid, self.subscriptionName)
""" Initialize the proxy """
def initialize_event_service(service):
global _proxy
if isinstance(service, EventService):
_proxy = EventServiceFwkProxy(service)
else:
_proxy = EventServiceCmpProxy(service)
# pylint: disable=C0413
from .eventService import EventService # noqa: E402
from .eventServiceProxy import EventServiceFwkProxy, EventServiceCmpProxy # noqa: E402 | ipsframework/cca_es_spec.py | from copy import deepcopy
class EventServiceException(Exception):
"""
Exception class for the event service.
"""
def __init__(self, value):
super().__init__()
self.value = value
def __str__(self):
return self.value
class PublisherEventService:
"""
Interface to topics for publishers.
"""
def getTopic(self, topicName):
"""
"""
return _proxy.getTopic(topicName)
def existsTopic(self, topicName):
return _proxy.existsTopic(topicName)
class SubscriberEventService:
def __init__(self):
self.subscriberid = _proxy.registerSubscriber()
def getSubscription(self, subscriptionName):
"""
A Subscription object can be safely returned from here without screwing
up automatic object tracking for cleaning up out-of-scope subscriptions.
A framework/component subscriber uses this Subscription object to
talk to the event service.
"""
_proxy.getSubscription(self.subscriberid, subscriptionName)
return Subscription(self.subscriberid, subscriptionName)
def processEvents(self):
_proxy.processEvents(self.subscriberid)
def __del__(self):
_proxy.unregisterSubscriber(self.subscriberid)
class Event:
def __init__(self, header={}, body={}):
self.header = deepcopy(header)
self.body = deepcopy(body)
def getHeader(self):
return self.header
def getBody(self):
return self.body
def __str__(self):
return str(self.body)
class EventListener:
def __init__(self):
self.listenerid = _proxy.createListener()
def processEvent(self, topicName, theEvent):
"""
A listener implements the processEvent method to respond to an event,
thereby overriding the below invocation. Ideally, it should be an abstract
method, but currently serves to check the correct operation of the
event service.
"""
class Topic:
def __init__(self, topicName):
self.topicName = topicName
def getTopicName(self):
return self.topicName
def sendEvent(self, eventName, eventBody):
_proxy.sendEvent(self.topicName, eventName, eventBody)
class Subscription:
def __init__(self, subscriberid, subscriptionName):
self.subscriberid = subscriberid
self.subscriptionName = subscriptionName
def registerEventListener(self, listenerKey, theListener):
_proxy.registerEventListener(self.subscriberid, self.subscriptionName,
listenerKey, theListener.listenerid, theListener)
def unregisterEventListener(self, listenerKey):
_proxy.unregisterEventListener(self.subscriberid, self.subscriptionName, listenerKey)
def getSubscriptionName(self):
return self.subscriptionName
def __del__(self):
_proxy.removeSubscription(self.subscriberid, self.subscriptionName)
""" Initialize the proxy """
def initialize_event_service(service):
global _proxy
if isinstance(service, EventService):
_proxy = EventServiceFwkProxy(service)
else:
_proxy = EventServiceCmpProxy(service)
# pylint: disable=C0413
from .eventService import EventService # noqa: E402
from .eventServiceProxy import EventServiceFwkProxy, EventServiceCmpProxy # noqa: E402 | 0.704465 | 0.151812 |
import asyncio
import logging
import datetime
from typing import Optional, Iterable, Any, Union, Callable, Awaitable, List
from .._common.message import ServiceBusReceivedMessage
from ._servicebus_session_async import ServiceBusSession
from ._servicebus_receiver_async import ServiceBusReceiver
from .._common.utils import (
get_renewable_start_time,
utc_now,
get_renewable_lock_duration,
)
from .._common.auto_lock_renewer import SHORT_RENEW_OFFSET, SHORT_RENEW_SCALING_FACTOR
from ._async_utils import get_running_loop
from ..exceptions import AutoLockRenewTimeout, AutoLockRenewFailed, ServiceBusError
Renewable = Union[ServiceBusSession, ServiceBusReceivedMessage]
AsyncLockRenewFailureCallback = Callable[
[Renewable, Optional[Exception]], Awaitable[None]
]
_log = logging.getLogger(__name__)
class AutoLockRenewer:
"""Auto lock renew.
An asynchronous AutoLockRenewer handler for renewing the lock
tokens of messages and/or sessions in the background.
:param max_lock_renewal_duration: A time in seconds that locks registered to this renewer
should be maintained for. Default value is 300 (5 minutes).
:type max_lock_renewal_duration: float
:param on_lock_renew_failure: A callback may be specified to be called when the lock is lost on the renewable
that is being registered. Default value is None (no callback).
:type on_lock_renew_failure: Optional[LockRenewFailureCallback]
:param loop: An async event loop.
:type loop: Optional[~asyncio.AbstractEventLoop]
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
:start-after: [START auto_lock_renew_message_async]
:end-before: [END auto_lock_renew_message_async]
:language: python
:dedent: 4
:caption: Automatically renew a message lock
.. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
:start-after: [START auto_lock_renew_session_async]
:end-before: [END auto_lock_renew_session_async]
:language: python
:dedent: 4
:caption: Automatically renew a session lock
"""
def __init__(
self,
max_lock_renewal_duration: float = 300,
on_lock_renew_failure: Optional[AsyncLockRenewFailureCallback] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
self._shutdown = asyncio.Event()
self._futures = [] # type: List[asyncio.Future]
self._loop = loop or get_running_loop()
self._sleep_time = 1
self._renew_period = 10
self._on_lock_renew_failure = on_lock_renew_failure
self._max_lock_renewal_duration = max_lock_renewal_duration
async def __aenter__(self) -> "AutoLockRenewer":
if self._shutdown.is_set():
raise ServiceBusError(
"The AutoLockRenewer has already been shutdown. Please create a new instance for"
" auto lock renewing."
)
return self
async def __aexit__(self, *args: Iterable[Any]) -> None:
await self.close()
def _renewable(
self, renewable: Union[ServiceBusReceivedMessage, ServiceBusSession]
) -> bool:
# pylint: disable=protected-access
if self._shutdown.is_set():
return False
if hasattr(renewable, "_settled") and renewable._settled: # type: ignore
return False
if renewable._lock_expired:
return False
try:
if not renewable._receiver._running: # type: ignore
return False
except AttributeError: # If for whatever reason the renewable isn't hooked up to a receiver
raise ServiceBusError(
"Cannot renew an entity without an associated receiver. "
"ServiceBusReceivedMessage and active ServiceBusReceiver.Session objects are expected."
)
return True
async def _auto_lock_renew(
self,
receiver: ServiceBusReceiver,
renewable: Renewable,
starttime: datetime.datetime,
max_lock_renewal_duration: float,
on_lock_renew_failure: Optional[AsyncLockRenewFailureCallback] = None,
renew_period_override: float = None,
) -> None:
# pylint: disable=protected-access
_log.debug(
"Running async lock auto-renew for %r seconds", max_lock_renewal_duration
)
error = None # type: Optional[Exception]
clean_shutdown = False # Only trigger the on_lock_renew_failure if halting was not expected (shutdown, etc)
renew_period = renew_period_override or self._renew_period
try:
while self._renewable(renewable):
if (utc_now() - starttime) >= datetime.timedelta(
seconds=max_lock_renewal_duration
):
_log.debug(
"Reached max auto lock renew duration - letting lock expire."
)
raise AutoLockRenewTimeout(
"Auto-renew period ({} seconds) elapsed.".format(
max_lock_renewal_duration
)
)
if (renewable.locked_until_utc - utc_now()) <= datetime.timedelta(
seconds=renew_period
):
_log.debug(
"%r seconds or less until lock expires - auto renewing.",
renew_period,
)
try:
# Renewable is a session
await renewable.renew_lock() # type: ignore
except AttributeError:
# Renewable is a message
await receiver.renew_message_lock(renewable) # type: ignore
await asyncio.sleep(self._sleep_time)
clean_shutdown = not renewable._lock_expired
except AutoLockRenewTimeout as e:
error = e
renewable.auto_renew_error = e
clean_shutdown = not renewable._lock_expired
except Exception as e: # pylint: disable=broad-except
_log.debug("Failed to auto-renew lock: %r. Closing thread.", e)
error = AutoLockRenewFailed("Failed to auto-renew lock", error=e)
renewable.auto_renew_error = error
finally:
if on_lock_renew_failure and not clean_shutdown:
await on_lock_renew_failure(renewable, error)
def register(
self,
receiver: ServiceBusReceiver,
renewable: Union[ServiceBusReceivedMessage, ServiceBusSession],
max_lock_renewal_duration: Optional[float] = None,
on_lock_renew_failure: Optional[AsyncLockRenewFailureCallback] = None,
) -> None:
"""Register a renewable entity for automatic lock renewal.
:param receiver: The ServiceBusReceiver instance that is associated with the message or the session to
be auto-lock-renewed.
:type receiver: ~azure.servicebus.aio.ServiceBusReceiver
:param renewable: A locked entity that needs to be renewed.
:type renewable: Union[~azure.servicebus.aio.ServiceBusReceivedMessage,~azure.servicebus.aio.ServiceBusSession]
:param max_lock_renewal_duration: A time in seconds that locks registered to this renewer
should be maintained for. Default value is 300 (5 minutes).
:type max_lock_renewal_duration: Optional[float]
:param Optional[AsyncLockRenewFailureCallback] on_lock_renew_failure:
An async callback may be specified to be called when the lock is lost on the renewable being registered.
Default value is None (no callback).
:rtype: None
"""
if not isinstance(renewable, (ServiceBusReceivedMessage, ServiceBusSession)):
raise TypeError(
"AutoLockRenewer only supports registration of types "
"azure.servicebus.ServiceBusReceivedMessage (via a receiver's receive methods) and "
"azure.servicebus.aio.ServiceBusSession "
"(via a session receiver's property receiver.session)."
)
if self._shutdown.is_set():
raise ServiceBusError(
"The AutoLockRenewer has already been shutdown. Please create a new instance for"
" auto lock renewing."
)
if renewable.locked_until_utc is None:
raise ValueError(
"Only azure.servicebus.ServiceBusReceivedMessage objects in PEEK_LOCK receive mode may"
"be lock-renewed. (E.g. only messages received via receive() or the receiver iterator,"
"not using RECEIVE_AND_DELETE receive mode, and not returned from Peek)"
)
starttime = get_renewable_start_time(renewable)
# This is a heuristic to compensate if it appears the user has a lock duration less than our base renew period
time_until_expiry = get_renewable_lock_duration(renewable)
renew_period_override = None
# Default is 10 seconds, but let's leave ourselves a small margin of error because clock skew is a real problem
if time_until_expiry <= datetime.timedelta(
seconds=self._renew_period + SHORT_RENEW_OFFSET
):
renew_period_override = (
time_until_expiry.seconds * SHORT_RENEW_SCALING_FACTOR
)
renew_future = asyncio.ensure_future(
self._auto_lock_renew(
receiver,
renewable,
starttime,
max_lock_renewal_duration or self._max_lock_renewal_duration,
on_lock_renew_failure or self._on_lock_renew_failure,
renew_period_override,
),
loop=self._loop,
)
self._futures.append(renew_future)
async def close(self) -> None:
"""Cease autorenewal by cancelling any remaining open lock renewal futures."""
self._shutdown.set()
await asyncio.wait(self._futures) | sdk/servicebus/azure-servicebus/azure/servicebus/aio/_async_auto_lock_renewer.py |
import asyncio
import logging
import datetime
from typing import Optional, Iterable, Any, Union, Callable, Awaitable, List
from .._common.message import ServiceBusReceivedMessage
from ._servicebus_session_async import ServiceBusSession
from ._servicebus_receiver_async import ServiceBusReceiver
from .._common.utils import (
get_renewable_start_time,
utc_now,
get_renewable_lock_duration,
)
from .._common.auto_lock_renewer import SHORT_RENEW_OFFSET, SHORT_RENEW_SCALING_FACTOR
from ._async_utils import get_running_loop
from ..exceptions import AutoLockRenewTimeout, AutoLockRenewFailed, ServiceBusError
Renewable = Union[ServiceBusSession, ServiceBusReceivedMessage]
AsyncLockRenewFailureCallback = Callable[
[Renewable, Optional[Exception]], Awaitable[None]
]
_log = logging.getLogger(__name__)
class AutoLockRenewer:
"""Auto lock renew.
An asynchronous AutoLockRenewer handler for renewing the lock
tokens of messages and/or sessions in the background.
:param max_lock_renewal_duration: A time in seconds that locks registered to this renewer
should be maintained for. Default value is 300 (5 minutes).
:type max_lock_renewal_duration: float
:param on_lock_renew_failure: A callback may be specified to be called when the lock is lost on the renewable
that is being registered. Default value is None (no callback).
:type on_lock_renew_failure: Optional[LockRenewFailureCallback]
:param loop: An async event loop.
:type loop: Optional[~asyncio.AbstractEventLoop]
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
:start-after: [START auto_lock_renew_message_async]
:end-before: [END auto_lock_renew_message_async]
:language: python
:dedent: 4
:caption: Automatically renew a message lock
.. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
:start-after: [START auto_lock_renew_session_async]
:end-before: [END auto_lock_renew_session_async]
:language: python
:dedent: 4
:caption: Automatically renew a session lock
"""
def __init__(
self,
max_lock_renewal_duration: float = 300,
on_lock_renew_failure: Optional[AsyncLockRenewFailureCallback] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
self._shutdown = asyncio.Event()
self._futures = [] # type: List[asyncio.Future]
self._loop = loop or get_running_loop()
self._sleep_time = 1
self._renew_period = 10
self._on_lock_renew_failure = on_lock_renew_failure
self._max_lock_renewal_duration = max_lock_renewal_duration
async def __aenter__(self) -> "AutoLockRenewer":
if self._shutdown.is_set():
raise ServiceBusError(
"The AutoLockRenewer has already been shutdown. Please create a new instance for"
" auto lock renewing."
)
return self
async def __aexit__(self, *args: Iterable[Any]) -> None:
await self.close()
def _renewable(
self, renewable: Union[ServiceBusReceivedMessage, ServiceBusSession]
) -> bool:
# pylint: disable=protected-access
if self._shutdown.is_set():
return False
if hasattr(renewable, "_settled") and renewable._settled: # type: ignore
return False
if renewable._lock_expired:
return False
try:
if not renewable._receiver._running: # type: ignore
return False
except AttributeError: # If for whatever reason the renewable isn't hooked up to a receiver
raise ServiceBusError(
"Cannot renew an entity without an associated receiver. "
"ServiceBusReceivedMessage and active ServiceBusReceiver.Session objects are expected."
)
return True
async def _auto_lock_renew(
self,
receiver: ServiceBusReceiver,
renewable: Renewable,
starttime: datetime.datetime,
max_lock_renewal_duration: float,
on_lock_renew_failure: Optional[AsyncLockRenewFailureCallback] = None,
renew_period_override: float = None,
) -> None:
# pylint: disable=protected-access
_log.debug(
"Running async lock auto-renew for %r seconds", max_lock_renewal_duration
)
error = None # type: Optional[Exception]
clean_shutdown = False # Only trigger the on_lock_renew_failure if halting was not expected (shutdown, etc)
renew_period = renew_period_override or self._renew_period
try:
while self._renewable(renewable):
if (utc_now() - starttime) >= datetime.timedelta(
seconds=max_lock_renewal_duration
):
_log.debug(
"Reached max auto lock renew duration - letting lock expire."
)
raise AutoLockRenewTimeout(
"Auto-renew period ({} seconds) elapsed.".format(
max_lock_renewal_duration
)
)
if (renewable.locked_until_utc - utc_now()) <= datetime.timedelta(
seconds=renew_period
):
_log.debug(
"%r seconds or less until lock expires - auto renewing.",
renew_period,
)
try:
# Renewable is a session
await renewable.renew_lock() # type: ignore
except AttributeError:
# Renewable is a message
await receiver.renew_message_lock(renewable) # type: ignore
await asyncio.sleep(self._sleep_time)
clean_shutdown = not renewable._lock_expired
except AutoLockRenewTimeout as e:
error = e
renewable.auto_renew_error = e
clean_shutdown = not renewable._lock_expired
except Exception as e: # pylint: disable=broad-except
_log.debug("Failed to auto-renew lock: %r. Closing thread.", e)
error = AutoLockRenewFailed("Failed to auto-renew lock", error=e)
renewable.auto_renew_error = error
finally:
if on_lock_renew_failure and not clean_shutdown:
await on_lock_renew_failure(renewable, error)
def register(
self,
receiver: ServiceBusReceiver,
renewable: Union[ServiceBusReceivedMessage, ServiceBusSession],
max_lock_renewal_duration: Optional[float] = None,
on_lock_renew_failure: Optional[AsyncLockRenewFailureCallback] = None,
) -> None:
"""Register a renewable entity for automatic lock renewal.
:param receiver: The ServiceBusReceiver instance that is associated with the message or the session to
be auto-lock-renewed.
:type receiver: ~azure.servicebus.aio.ServiceBusReceiver
:param renewable: A locked entity that needs to be renewed.
:type renewable: Union[~azure.servicebus.aio.ServiceBusReceivedMessage,~azure.servicebus.aio.ServiceBusSession]
:param max_lock_renewal_duration: A time in seconds that locks registered to this renewer
should be maintained for. Default value is 300 (5 minutes).
:type max_lock_renewal_duration: Optional[float]
:param Optional[AsyncLockRenewFailureCallback] on_lock_renew_failure:
An async callback may be specified to be called when the lock is lost on the renewable being registered.
Default value is None (no callback).
:rtype: None
"""
if not isinstance(renewable, (ServiceBusReceivedMessage, ServiceBusSession)):
raise TypeError(
"AutoLockRenewer only supports registration of types "
"azure.servicebus.ServiceBusReceivedMessage (via a receiver's receive methods) and "
"azure.servicebus.aio.ServiceBusSession "
"(via a session receiver's property receiver.session)."
)
if self._shutdown.is_set():
raise ServiceBusError(
"The AutoLockRenewer has already been shutdown. Please create a new instance for"
" auto lock renewing."
)
if renewable.locked_until_utc is None:
raise ValueError(
"Only azure.servicebus.ServiceBusReceivedMessage objects in PEEK_LOCK receive mode may"
"be lock-renewed. (E.g. only messages received via receive() or the receiver iterator,"
"not using RECEIVE_AND_DELETE receive mode, and not returned from Peek)"
)
starttime = get_renewable_start_time(renewable)
# This is a heuristic to compensate if it appears the user has a lock duration less than our base renew period
time_until_expiry = get_renewable_lock_duration(renewable)
renew_period_override = None
# Default is 10 seconds, but let's leave ourselves a small margin of error because clock skew is a real problem
if time_until_expiry <= datetime.timedelta(
seconds=self._renew_period + SHORT_RENEW_OFFSET
):
renew_period_override = (
time_until_expiry.seconds * SHORT_RENEW_SCALING_FACTOR
)
renew_future = asyncio.ensure_future(
self._auto_lock_renew(
receiver,
renewable,
starttime,
max_lock_renewal_duration or self._max_lock_renewal_duration,
on_lock_renew_failure or self._on_lock_renew_failure,
renew_period_override,
),
loop=self._loop,
)
self._futures.append(renew_future)
async def close(self) -> None:
"""Cease autorenewal by cancelling any remaining open lock renewal futures."""
self._shutdown.set()
await asyncio.wait(self._futures) | 0.800536 | 0.097907 |
from collections import namedtuple
import logging
from pymol import cmd
import pyrosetta.distributed.packed_pose as packed_pose
from pyrosetta.rosetta.protocols.rosetta_scripts import XmlObjects
from .utils import (
pymol_to_rosetta,
rosetta_to_pymol,
pymol_selection_to_residue_selector,
)
logger = logging.getLogger("pymol_rosetta_utils.selectors")
PDBAtomID = namedtuple("PDBAtomID", "resi chain name")
def _apply_residue_selector_and_show_as_sticks(
selector, pose, label="selected_residues"
):
"""Apply a residue selector to a pose and show the selected residues in sticks."""
residues = selector.apply(pose)
sele = []
for pos, val in enumerate(residues, start=1):
if not val:
continue
# this will fail on insertion codes because rosetta only returns
# residue numbers and chainIDs
resno, chain = pose.pdb_info().pose2pdb(pos).split()
sele.append("(resi {} and chain {})".format(resno, chain))
cmd.select(label, "+".join(sele))
cmd.show("sticks", label)
return residues
def _hbond_pdb(hbond, pose):
"""Convert donor and acceptor atom ids to PDB numbering and return a tuple
in the form of (donor, acceptor).
"""
(don_res, don_chain), don_atm = (
pose.pdb_info().pose2pdb(hbond.don_res()).split(),
pose.residue(hbond.don_res()).atom_name(hbond.don_hatm()).strip(),
)
(acc_res, acc_chain), acc_atm = (
pose.pdb_info().pose2pdb(hbond.acc_res()).split(),
pose.residue(hbond.acc_res()).atom_name(hbond.acc_atm()).strip(),
)
return PDBAtomID(don_res, don_chain, don_atm), PDBAtomID(
acc_res, acc_chain, acc_atm
)
def show_interface_residues(pymol_obj_name, grp1, grp2):
"""Show interface residues between two selections."""
wpose = packed_pose.to_pose(pymol_to_rosetta(pymol_obj_name))
objs = XmlObjects.create_from_string(
f"""
<RESIDUE_SELECTORS>
{pymol_selection_to_residue_selector(grp1, name="grp1", pose=wpose)}
{pymol_selection_to_residue_selector(grp2, name="grp2", pose=wpose)}
<InterfaceByVector name="interface" grp1_selector="grp1" grp2_selector="grp2"/>
</RESIDUE_SELECTORS>
"""
)
logger.debug(objs)
_apply_residue_selector_and_show_as_sticks(
objs.get_residue_selector("interface"), wpose, label="interface_residues"
)
def show_hydrogen_bonds(pymol_obj_name, include_bb_bb=True, selection="all"):
"""Show hydrogen bonds between two selections."""
wpose = packed_pose.to_pose(pymol_to_rosetta(pymol_obj_name))
# update the PyMOL object to include all hydrogens
rosetta_to_pymol(wpose, pymol_obj_name)
objs = XmlObjects.create_from_string(
f"""
<RESIDUE_SELECTORS>
{pymol_selection_to_residue_selector(selection, name="selector", pose=wpose)}
<HBond name="hbonds" residue_selector="selector" include_bb_bb="{include_bb_bb}" scorefxn="REF2015"/>
</RESIDUE_SELECTORS>
"""
)
logger.debug(objs)
# show sticks for selected residues
selected_residues = _apply_residue_selector_and_show_as_sticks(
objs.get_residue_selector("hbonds"), wpose, label="hbond_residues"
)
# hide non-polar hydrogens
cmd.hide("everything", "(h. and (e. c extend 1))")
# get all hbonds in pose
for i, hbond in enumerate(
wpose.get_hbonds(exclude_bb=(not include_bb_bb)).hbonds(), start=1
):
if (
not selected_residues[hbond.don_res()]
or not selected_residues[hbond.acc_res()]
):
continue
# get the residues and atom names involved in the hbond
don, acc = _hbond_pdb(hbond, wpose)
# draw dashed line to indicate hbond
cmd.distance(
f"hb_{i}",
f"(resi {don.resi} and chain {don.chain} and name {don.name})",
f"(resi {acc.resi} and chain {acc.chain} and name {acc.name})",
)
cmd.hide("labels", f"hb_{i}")
# group all of the hbonds together in a collapsable list
cmd.group("hbonds", "hb_*") | pymol_rosetta_utils/selectors.py | from collections import namedtuple
import logging
from pymol import cmd
import pyrosetta.distributed.packed_pose as packed_pose
from pyrosetta.rosetta.protocols.rosetta_scripts import XmlObjects
from .utils import (
pymol_to_rosetta,
rosetta_to_pymol,
pymol_selection_to_residue_selector,
)
logger = logging.getLogger("pymol_rosetta_utils.selectors")
PDBAtomID = namedtuple("PDBAtomID", "resi chain name")
def _apply_residue_selector_and_show_as_sticks(
selector, pose, label="selected_residues"
):
"""Apply a residue selector to a pose and show the selected residues in sticks."""
residues = selector.apply(pose)
sele = []
for pos, val in enumerate(residues, start=1):
if not val:
continue
# this will fail on insertion codes because rosetta only returns
# residue numbers and chainIDs
resno, chain = pose.pdb_info().pose2pdb(pos).split()
sele.append("(resi {} and chain {})".format(resno, chain))
cmd.select(label, "+".join(sele))
cmd.show("sticks", label)
return residues
def _hbond_pdb(hbond, pose):
"""Convert donor and acceptor atom ids to PDB numbering and return a tuple
in the form of (donor, acceptor).
"""
(don_res, don_chain), don_atm = (
pose.pdb_info().pose2pdb(hbond.don_res()).split(),
pose.residue(hbond.don_res()).atom_name(hbond.don_hatm()).strip(),
)
(acc_res, acc_chain), acc_atm = (
pose.pdb_info().pose2pdb(hbond.acc_res()).split(),
pose.residue(hbond.acc_res()).atom_name(hbond.acc_atm()).strip(),
)
return PDBAtomID(don_res, don_chain, don_atm), PDBAtomID(
acc_res, acc_chain, acc_atm
)
def show_interface_residues(pymol_obj_name, grp1, grp2):
"""Show interface residues between two selections."""
wpose = packed_pose.to_pose(pymol_to_rosetta(pymol_obj_name))
objs = XmlObjects.create_from_string(
f"""
<RESIDUE_SELECTORS>
{pymol_selection_to_residue_selector(grp1, name="grp1", pose=wpose)}
{pymol_selection_to_residue_selector(grp2, name="grp2", pose=wpose)}
<InterfaceByVector name="interface" grp1_selector="grp1" grp2_selector="grp2"/>
</RESIDUE_SELECTORS>
"""
)
logger.debug(objs)
_apply_residue_selector_and_show_as_sticks(
objs.get_residue_selector("interface"), wpose, label="interface_residues"
)
def show_hydrogen_bonds(pymol_obj_name, include_bb_bb=True, selection="all"):
"""Show hydrogen bonds between two selections."""
wpose = packed_pose.to_pose(pymol_to_rosetta(pymol_obj_name))
# update the PyMOL object to include all hydrogens
rosetta_to_pymol(wpose, pymol_obj_name)
objs = XmlObjects.create_from_string(
f"""
<RESIDUE_SELECTORS>
{pymol_selection_to_residue_selector(selection, name="selector", pose=wpose)}
<HBond name="hbonds" residue_selector="selector" include_bb_bb="{include_bb_bb}" scorefxn="REF2015"/>
</RESIDUE_SELECTORS>
"""
)
logger.debug(objs)
# show sticks for selected residues
selected_residues = _apply_residue_selector_and_show_as_sticks(
objs.get_residue_selector("hbonds"), wpose, label="hbond_residues"
)
# hide non-polar hydrogens
cmd.hide("everything", "(h. and (e. c extend 1))")
# get all hbonds in pose
for i, hbond in enumerate(
wpose.get_hbonds(exclude_bb=(not include_bb_bb)).hbonds(), start=1
):
if (
not selected_residues[hbond.don_res()]
or not selected_residues[hbond.acc_res()]
):
continue
# get the residues and atom names involved in the hbond
don, acc = _hbond_pdb(hbond, wpose)
# draw dashed line to indicate hbond
cmd.distance(
f"hb_{i}",
f"(resi {don.resi} and chain {don.chain} and name {don.name})",
f"(resi {acc.resi} and chain {acc.chain} and name {acc.name})",
)
cmd.hide("labels", f"hb_{i}")
# group all of the hbonds together in a collapsable list
cmd.group("hbonds", "hb_*") | 0.693784 | 0.523238 |
from contextlib import contextmanager
from django.contrib.auth import get_user_model
from django.core import mail
from django.http import HttpRequest
from django.test import TestCase, override_settings
from ..forms import RegistrationForm
from .. import signals
try:
from django.urls import reverse
except ImportError: # pragma: no cover
from django.core.urlresolvers import reverse # pragma: no cover
User = get_user_model()
# django-registration needs to test that signals are sent at
# registration and activation. Django -- as of 1.10 -- does not have a
# test assertion built in to test whether a signal was or was not
# sent. The code below is from a pull request submitted upstream to
# Django to add assertSignalSent and assertSignalNotSent assertions to
# Django's base test case class, and will be removed once it's been
# integrated into Django and django-registration is only supporting
# versions of Django which include it.
class _AssertSignalSentContext(object):
def __init__(self, test_case, signal, required_kwargs=None):
self.test_case = test_case
self.signal = signal
self.required_kwargs = required_kwargs
def _listener(self, sender, **kwargs):
self.signal_sent = True
self.received_kwargs = kwargs
self.sender = sender
def __enter__(self):
self.signal_sent = False
self.received_kwargs = {}
self.sender = None
self.signal.connect(self._listener)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.signal.disconnect(self._listener)
if not self.signal_sent:
self.test_case.fail('Signal was not sent.')
return
if self.required_kwargs is not None:
missing_kwargs = []
for k in self.required_kwargs:
if k not in self.received_kwargs:
missing_kwargs.append(k)
if missing_kwargs:
self.test_case.fail(
"Signal missing required arguments: "
"%s" % ','.join(missing_kwargs)
)
class _AssertSignalNotSentContext(_AssertSignalSentContext):
def __exit__(self, exc_type, exc_value, traceback):
self.signal.disconnect(self._listener)
if self.signal_sent:
self.test_case.fail('Signal was unexpectedly sent.')
@override_settings(
ACCOUNT_ACTIVATION_DAYS=7,
REGISTRATION_OPEN=True
)
class RegistrationTestCase(TestCase):
"""
Base class for test cases, defining valid data for registering a
user account and looking up the account after creation.
"""
user_model = User
valid_data = {
User.USERNAME_FIELD: 'alice',
'email': '<EMAIL>',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
}
user_lookup_kwargs = {
User.USERNAME_FIELD: 'alice',
}
@contextmanager
def assertSignalSent(self, signal, required_kwargs=None):
with _AssertSignalSentContext(self, signal, required_kwargs) as cm:
yield cm
@contextmanager
def assertSignalNotSent(self, signal):
with _AssertSignalNotSentContext(self, signal) as cm:
yield cm
class WorkflowTestCase(RegistrationTestCase):
"""
Base class for the test cases which exercise the built-in
workflows, including logic common to all of them (and which needs
to be tested for each one).
"""
def test_registration_open(self):
"""
``REGISTRATION_OPEN``, when ``True``, permits registration.
"""
resp = self.client.get(reverse('registration_register'))
self.assertEqual(200, resp.status_code)
@override_settings(REGISTRATION_OPEN=False)
def test_registration_closed(self):
"""
``REGISTRATION_OPEN``, when ``False``, disallows registration.
"""
resp = self.client.get(
reverse('registration_register')
)
self.assertRedirects(resp, reverse('registration_disallowed'))
resp = self.client.post(
reverse('registration_register'),
data=self.valid_data
)
self.assertRedirects(resp, reverse('registration_disallowed'))
def test_registration_get(self):
"""
HTTP ``GET`` to the registration view uses the appropriate
template and populates a registration form into the context.
"""
resp = self.client.get(reverse('registration_register'))
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(
resp, 'registration/registration.html'
)
self.assertTrue(
isinstance(
resp.context['form'], RegistrationForm
)
)
def test_registration(self):
"""
Registration creates a new account.
"""
with self.assertSignalSent(signals.user_registered):
resp = self.client.post(
reverse('registration_register'),
data=self.valid_data
)
self.assertRedirects(resp, reverse('registration_complete'))
new_user = self.user_model.objects.get(**self.user_lookup_kwargs)
self.assertTrue(
new_user.check_password(
self.valid_data['<PASSWORD>']
)
)
self.assertEqual(new_user.email, self.valid_data['email'])
def test_registration_failure(self):
"""
Registering with invalid data fails.
"""
data = self.valid_data.copy()
data.update(password2='<PASSWORD>')
with self.assertSignalNotSent(signals.user_registered):
resp = self.client.post(
reverse('registration_register'),
data=data
)
self.assertEqual(200, resp.status_code)
self.assertFalse(resp.context['form'].is_valid())
self.assertTrue(resp.context['form'].has_error('password2'))
def test_registration_signal(self):
with self.assertSignalSent(signals.user_registered) as cm:
self.client.post(
reverse('registration_register'),
data=self.valid_data
)
self.assertEqual(
getattr(cm.received_kwargs['user'],
self.user_model.USERNAME_FIELD),
self.valid_data[User.USERNAME_FIELD]
)
self.assertTrue(
isinstance(cm.received_kwargs['request'], HttpRequest)
)
class ActivationTestCase(WorkflowTestCase):
"""
Base class for testing the built-in workflows which involve an
activation step.
"""
# First few methods repeat parent class, but with added checks for
# is_active status and sending of activation emails.
def test_registration(self):
"""
Registration creates a new inactive account and sends an
activation email.
"""
with self.assertSignalSent(signals.user_registered):
super(ActivationTestCase, self).test_registration()
new_user = self.user_model.objects.get(**self.user_lookup_kwargs)
# New user must not be active.
self.assertFalse(new_user.is_active)
# An activation email was sent.
self.assertEqual(len(mail.outbox), 1)
def test_registration_failure(self):
"""
Registering with invalid data fails.
"""
with self.assertSignalNotSent(signals.user_registered):
super(ActivationTestCase, self).test_registration_failure()
# Activation email was not sent.
self.assertEqual(0, len(mail.outbox))
def test_registration_no_sites(self):
"""
Registration still functions properly when
``django.contrib.sites`` is not installed; the fallback will
be a ``RequestSite`` instance.
"""
with self.modify_settings(INSTALLED_APPS={
'remove': ['django.contrib.sites']
}):
with self.assertSignalSent(signals.user_registered):
resp = self.client.post(
reverse('registration_register'),
data=self.valid_data
)
self.assertEqual(302, resp.status_code)
new_user = self.user_model.objects.get(**self.user_lookup_kwargs)
self.assertTrue(
new_user.check_password(
self.valid_data['<PASSWORD>']
)
)
self.assertEqual(new_user.email, self.valid_data['email']) | virtual/lib/python3.6/site-packages/registration/tests/base.py | from contextlib import contextmanager
from django.contrib.auth import get_user_model
from django.core import mail
from django.http import HttpRequest
from django.test import TestCase, override_settings
from ..forms import RegistrationForm
from .. import signals
try:
from django.urls import reverse
except ImportError: # pragma: no cover
from django.core.urlresolvers import reverse # pragma: no cover
User = get_user_model()
# django-registration needs to test that signals are sent at
# registration and activation. Django -- as of 1.10 -- does not have a
# test assertion built in to test whether a signal was or was not
# sent. The code below is from a pull request submitted upstream to
# Django to add assertSignalSent and assertSignalNotSent assertions to
# Django's base test case class, and will be removed once it's been
# integrated into Django and django-registration is only supporting
# versions of Django which include it.
class _AssertSignalSentContext(object):
def __init__(self, test_case, signal, required_kwargs=None):
self.test_case = test_case
self.signal = signal
self.required_kwargs = required_kwargs
def _listener(self, sender, **kwargs):
self.signal_sent = True
self.received_kwargs = kwargs
self.sender = sender
def __enter__(self):
self.signal_sent = False
self.received_kwargs = {}
self.sender = None
self.signal.connect(self._listener)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.signal.disconnect(self._listener)
if not self.signal_sent:
self.test_case.fail('Signal was not sent.')
return
if self.required_kwargs is not None:
missing_kwargs = []
for k in self.required_kwargs:
if k not in self.received_kwargs:
missing_kwargs.append(k)
if missing_kwargs:
self.test_case.fail(
"Signal missing required arguments: "
"%s" % ','.join(missing_kwargs)
)
class _AssertSignalNotSentContext(_AssertSignalSentContext):
def __exit__(self, exc_type, exc_value, traceback):
self.signal.disconnect(self._listener)
if self.signal_sent:
self.test_case.fail('Signal was unexpectedly sent.')
@override_settings(
ACCOUNT_ACTIVATION_DAYS=7,
REGISTRATION_OPEN=True
)
class RegistrationTestCase(TestCase):
"""
Base class for test cases, defining valid data for registering a
user account and looking up the account after creation.
"""
user_model = User
valid_data = {
User.USERNAME_FIELD: 'alice',
'email': '<EMAIL>',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
}
user_lookup_kwargs = {
User.USERNAME_FIELD: 'alice',
}
@contextmanager
def assertSignalSent(self, signal, required_kwargs=None):
with _AssertSignalSentContext(self, signal, required_kwargs) as cm:
yield cm
@contextmanager
def assertSignalNotSent(self, signal):
with _AssertSignalNotSentContext(self, signal) as cm:
yield cm
class WorkflowTestCase(RegistrationTestCase):
"""
Base class for the test cases which exercise the built-in
workflows, including logic common to all of them (and which needs
to be tested for each one).
"""
def test_registration_open(self):
"""
``REGISTRATION_OPEN``, when ``True``, permits registration.
"""
resp = self.client.get(reverse('registration_register'))
self.assertEqual(200, resp.status_code)
@override_settings(REGISTRATION_OPEN=False)
def test_registration_closed(self):
"""
``REGISTRATION_OPEN``, when ``False``, disallows registration.
"""
resp = self.client.get(
reverse('registration_register')
)
self.assertRedirects(resp, reverse('registration_disallowed'))
resp = self.client.post(
reverse('registration_register'),
data=self.valid_data
)
self.assertRedirects(resp, reverse('registration_disallowed'))
def test_registration_get(self):
"""
HTTP ``GET`` to the registration view uses the appropriate
template and populates a registration form into the context.
"""
resp = self.client.get(reverse('registration_register'))
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(
resp, 'registration/registration.html'
)
self.assertTrue(
isinstance(
resp.context['form'], RegistrationForm
)
)
def test_registration(self):
"""
Registration creates a new account.
"""
with self.assertSignalSent(signals.user_registered):
resp = self.client.post(
reverse('registration_register'),
data=self.valid_data
)
self.assertRedirects(resp, reverse('registration_complete'))
new_user = self.user_model.objects.get(**self.user_lookup_kwargs)
self.assertTrue(
new_user.check_password(
self.valid_data['<PASSWORD>']
)
)
self.assertEqual(new_user.email, self.valid_data['email'])
def test_registration_failure(self):
"""
Registering with invalid data fails.
"""
data = self.valid_data.copy()
data.update(password2='<PASSWORD>')
with self.assertSignalNotSent(signals.user_registered):
resp = self.client.post(
reverse('registration_register'),
data=data
)
self.assertEqual(200, resp.status_code)
self.assertFalse(resp.context['form'].is_valid())
self.assertTrue(resp.context['form'].has_error('password2'))
def test_registration_signal(self):
with self.assertSignalSent(signals.user_registered) as cm:
self.client.post(
reverse('registration_register'),
data=self.valid_data
)
self.assertEqual(
getattr(cm.received_kwargs['user'],
self.user_model.USERNAME_FIELD),
self.valid_data[User.USERNAME_FIELD]
)
self.assertTrue(
isinstance(cm.received_kwargs['request'], HttpRequest)
)
class ActivationTestCase(WorkflowTestCase):
"""
Base class for testing the built-in workflows which involve an
activation step.
"""
# First few methods repeat parent class, but with added checks for
# is_active status and sending of activation emails.
def test_registration(self):
"""
Registration creates a new inactive account and sends an
activation email.
"""
with self.assertSignalSent(signals.user_registered):
super(ActivationTestCase, self).test_registration()
new_user = self.user_model.objects.get(**self.user_lookup_kwargs)
# New user must not be active.
self.assertFalse(new_user.is_active)
# An activation email was sent.
self.assertEqual(len(mail.outbox), 1)
def test_registration_failure(self):
"""
Registering with invalid data fails.
"""
with self.assertSignalNotSent(signals.user_registered):
super(ActivationTestCase, self).test_registration_failure()
# Activation email was not sent.
self.assertEqual(0, len(mail.outbox))
def test_registration_no_sites(self):
"""
Registration still functions properly when
``django.contrib.sites`` is not installed; the fallback will
be a ``RequestSite`` instance.
"""
with self.modify_settings(INSTALLED_APPS={
'remove': ['django.contrib.sites']
}):
with self.assertSignalSent(signals.user_registered):
resp = self.client.post(
reverse('registration_register'),
data=self.valid_data
)
self.assertEqual(302, resp.status_code)
new_user = self.user_model.objects.get(**self.user_lookup_kwargs)
self.assertTrue(
new_user.check_password(
self.valid_data['<PASSWORD>']
)
)
self.assertEqual(new_user.email, self.valid_data['email']) | 0.665954 | 0.240095 |
from string import Template
from builtins import object
import six
import requests
from shapely.wkt import loads as load_wkt
from collections import OrderedDict
import json, time, os
from shapely.ops import cascaded_union
from shapely.geometry import shape, box
from shapely.wkt import loads as from_wkt
from gbdxtools.auth import Auth
class Vectors(object):
default_index = 'vector-gbdx-alpha-catalog-v2-*'
def __init__(self, **kwargs):
''' Construct the Vectors interface class
Returns:
An instance of the Vectors interface class.
'''
interface = Auth(**kwargs)
self.gbdx_connection = interface.gbdx_connection
self.logger = interface.logger
self.query_url = 'https://vector.geobigdata.io/insight-vector/api/vectors/query/items'
self.query_index_url = 'https://vector.geobigdata.io/insight-vector/api/index/query/%s/items'
self.query_page_url = 'https://vector.geobigdata.io/insight-vector/api/vectors/query/paging'
self.query_index_page_url = 'https://vector.geobigdata.io/insight-vector/api/index/query/%s/paging'
self.page_url = 'https://vector.geobigdata.io/insight-vector/api/vectors/paging'
self.get_url = 'https://vector.geobigdata.io/insight-vector/api/vector/%s/'
self.create_url = 'https://vector.geobigdata.io/insight-vector/api/vectors'
self.aggregations_url = 'https://vector.geobigdata.io/insight-vector/api/aggregation'
self.aggregations_by_index_url = 'https://vector.geobigdata.io/insight-vector/api/index/aggregation/%s'
def create(self,vectors):
"""
Create a vectors in the vector service.
Args:
vectors: A single geojson vector or a list of geojson vectors. Each looks like:
{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [1.0,1.0]
},
"properties": {
"text" : "item text",
"name" : "item name",
"item_type" : "type",
"ingest_source" : "source",
"attributes" : {
"latitude" : 1,
"institute_founded" : "2015-07-17",
"mascot" : "moth"
}
}
}
item_type and ingest_source are required.
Returns:
a list of IDs of the vectors created
"""
if type(vectors) is dict:
vectors = [vectors]
# validate they all have item_type and ingest_source in properties
for vector in vectors:
if not 'properties' in list(vector.keys()):
raise Exception('Vector does not contain "properties" field.')
if not 'item_type' in list(vector['properties'].keys()):
raise Exception('Vector does not contain "item_type".')
if not 'ingest_source' in list(vector['properties'].keys()):
raise Exception('Vector does not contain "ingest_source".')
r = self.gbdx_connection.post(self.create_url, data=json.dumps(vectors))
r.raise_for_status()
return r.json()
def create_from_wkt(self, wkt, item_type, ingest_source, **attributes):
'''
Create a single vector in the vector service
Args:
wkt (str): wkt representation of the geometry
item_type (str): item_type of the vector
ingest_source (str): source of the vector
attributes: a set of key-value pairs of attributes
Returns:
id (str): string identifier of the vector created
'''
# verify the "depth" of the attributes is single layer
geojson = load_wkt(wkt).__geo_interface__
vector = {
'type': "Feature",
'geometry': geojson,
'properties': {
'item_type': item_type,
'ingest_source': ingest_source,
'attributes': attributes
}
}
return self.create(vector)[0]
def get(self, ID, index='vector-web-s'):
'''Retrieves a vector. Not usually necessary because searching is the best way to find & get stuff.
Args:
ID (str): ID of the vector object
index (str): Optional. Index the object lives in. defaults to 'vector-web-s'
Returns:
record (dict): A dict object identical to the json representation of the catalog record
'''
url = self.get_url % index
r = self.gbdx_connection.get(url + ID)
r.raise_for_status()
return r.json()
def query(self, searchAreaWkt, query, count=100, ttl='5m', index=default_index):
'''
Perform a vector services query using the QUERY API
(https://gbdxdocs.digitalglobe.com/docs/vs-query-list-vector-items-returns-default-fields)
Args:
searchAreaWkt: WKT Polygon of area to search
query: Elastic Search query
count: Maximum number of results to return
ttl: Amount of time for each temporary vector page to exist
Returns:
List of vector results
'''
if count < 1000:
# issue a single page query
search_area_polygon = from_wkt(searchAreaWkt)
left, lower, right, upper = search_area_polygon.bounds
params = {
"q": query,
"count": min(count,1000),
"left": left,
"right": right,
"lower": lower,
"upper": upper
}
url = self.query_index_url % index if index else self.query_url
r = self.gbdx_connection.get(url, params=params)
r.raise_for_status()
return r.json()
else:
return list(self.query_iteratively(searchAreaWkt, query, count, ttl, index))
def query_iteratively(self, searchAreaWkt, query, count=100, ttl='5m', index=default_index):
'''
Perform a vector services query using the QUERY API
(https://gbdxdocs.digitalglobe.com/docs/vs-query-list-vector-items-returns-default-fields)
Args:
searchAreaWkt: WKT Polygon of area to search
query: Elastic Search query
count: Maximum number of results to return
ttl: Amount of time for each temporary vector page to exist
Returns:
generator of vector results
'''
search_area_polygon = from_wkt(searchAreaWkt)
left, lower, right, upper = search_area_polygon.bounds
params = {
"q": query,
"count": min(count,1000),
"ttl": ttl,
"left": left,
"right": right,
"lower": lower,
"upper": upper
}
# initialize paging request
url = self.query_index_page_url % index if index else self.query_page_url
r = self.gbdx_connection.get(url, params=params)
r.raise_for_status()
page = r.json()
paging_id = page['next_paging_id']
item_count = int(page['item_count'])
data = page['data']
num_results = 0
for vector in data:
num_results += 1
if num_results > count: break
yield vector
if num_results == count:
return
# get vectors from each page
while paging_id and item_count > 0 and num_results < count:
headers = {'Content-Type':'application/x-www-form-urlencoded'}
data = {
"pagingId": paging_id,
"ttl": ttl
}
r = self.gbdx_connection.post(self.page_url, headers=headers, data=data)
r.raise_for_status()
page = r.json()
paging_id = page['next_paging_id']
item_count = int(page['item_count'])
data = page['data']
for vector in data:
num_results += 1
if num_results > count: break
yield vector
def aggregate_query(self, searchAreaWkt, agg_def, query=None, start_date=None, end_date=None, count=10, index=default_index):
"""Aggregates results of a query into buckets defined by the 'agg_def' parameter. The aggregations are
represented by dicts containing a 'name' key and a 'terms' key holding a list of the aggregation buckets.
Each bucket element is a dict containing a 'term' key containing the term used for this bucket, a 'count' key
containing the count of items that match this bucket, and an 'aggregations' key containing any child
aggregations.
Args:
searchAreaWkt (str): wkt representation of the geometry
agg_def (str or AggregationDef): the aggregation definitions
query (str): a valid Elasticsearch query string to constrain the items going into the aggregation
start_date (str): either an ISO-8601 date string or a 'now' expression (e.g. "now-6d" or just "now")
end_date (str): either an ISO-8601 date string or a 'now' expression (e.g. "now-6d" or just "now")
count (int): the number of buckets to include in the aggregations (the top N will be returned)
index (str): the index (or alias or wildcard index expression) to run aggregations against, set to None for the entire set of vector indexes
Returns:
results (list): A (usually single-element) list of dict objects containing the aggregation results.
"""
geojson = load_wkt(searchAreaWkt).__geo_interface__
aggs_str = str(agg_def) # could be string or AggregationDef
params = {
"count": count,
"aggs": aggs_str
}
if query:
params['query'] = query
if start_date:
params['start_date'] = start_date
if end_date:
params['end_date'] = end_date
url = self.aggregations_by_index_url % index if index else self.aggregations_url
r = self.gbdx_connection.post(url, params=params, json=geojson)
r.raise_for_status()
return r.json(object_pairs_hook=OrderedDict)['aggregations']
def tilemap(self, query, style={}, bbox=[-180,-90,180,90], zoom=16, api_key=os.environ.get('MAPBOX_API_KEY', None), index="vector-user-provided", name="GBDX_Task_Output"):
"""
Renders a mapbox gl map from a vector service query
"""
try:
from IPython.display import Javascript, HTML, display
except:
print("IPython is required to produce maps.")
return
assert api_key is not None, "No Mapbox API Key found. You can either pass in a token or set the MAPBOX_API_KEY environment variable."
wkt = box(*bbox).wkt
features = self.query(wkt, query, index=index)
union = cascaded_union([shape(f['geometry']) for f in features])
lon, lat = union.centroid.coords[0]
map_id = "map_{}".format(str(int(time.time())))
display(HTML(Template('''
<div id="$map_id"/>
<link href='https://api.tiles.mapbox.com/mapbox-gl-js/v0.41.0/mapbox-gl.css' rel='stylesheet' />
<style>body{margin:0;padding:0;}#$map_id{position:relative;top:0;bottom:0;width:100%;height:400px;}</style>
<style>.mapboxgl-popup-content table tr{border: 1px solid #efefef;} .mapboxgl-popup-content table, td, tr{border: none;}</style>
''').substitute({"map_id": map_id})))
js = Template("""
require.config({
paths: {
mapboxgl: 'https://api.tiles.mapbox.com/mapbox-gl-js/v0.41.0/mapbox-gl',
}
});
require(['mapboxgl'], function(mapboxgl){
mapboxgl.accessToken = "$mbkey";
function html( attrs, id ) {
var json = JSON.parse( attrs );
var html = '<table><tbody>';
html += '<tr><td>ID</td><td>' + id + '</td></tr>';
for ( var i=0; i < Object.keys(json).length; i++) {
var key = Object.keys( json )[ i ];
var val = json[ key ];
html += '<tr><td>' + key + '</td><td>' + val + '</td></tr>';
}
html += '</tbody></table>';
return html;
}
window.map = new mapboxgl.Map({
container: '$map_id',
style: 'mapbox://styles/mapbox/satellite-v9',
center: [$lon, $lat],
zoom: $zoom,
transformRequest: function( url, resourceType ) {
if (resourceType == 'Tile' && url.startsWith('https://vector.geobigdata')) {
return {
url: url,
headers: { 'Authorization': 'Bearer $token' }
}
}
}
});
var map = window.map;
var style = Object.keys($style).length
? $style
: {
"line-color": '#ff0000',
"line-opacity": .75,
"line-width": 2
};
var url = 'https://vector.geobigdata.io/insight-vector/api/mvt/{z}/{x}/{y}?';
url += 'q=$query&index=$index';
map.once('style.load', function(e) {
map.addLayer({
"id": "user-data",
"type": "line",
"source": {
type: 'vector',
tiles: [url]
},
"source-layer": "$name",
"paint": style
});
});
});
""").substitute({
"map_id": map_id,
"query": query,
"lat": lat,
"lon": lon,
"zoom": zoom,
"style": json.dumps(style),
"mbkey": api_key,
"token": self.gbdx_connection.access_token,
"index": index,
"name": name
})
display(Javascript(js))
def map(self, features=None, query=None, style={}, bbox=[-180,-90,180,90], zoom=10, api_key=os.environ.get('MAPBOX_API_KEY', None)):
"""
Renders a mapbox gl map from a vector service query
"""
try:
from IPython.display import Javascript, HTML, display
except:
print("IPython is required to produce maps.")
return
assert api_key is not None, "No Mapbox API Key found. You can either pass in a token or set the MAPBOX_API_KEY environment variable."
if features is None and query is not None:
wkt = box(*bbox).wkt
features = self.query(wkt, query, index=None)
elif features is None and query is None:
print('Must provide either a list of features or a query')
return
union = cascaded_union([shape(f['geometry']) for f in features])
lon, lat = union.centroid.coords[0]
geojson = {"type":"FeatureCollection", "features": features}
map_id = "map_{}".format(str(int(time.time())))
display(HTML(Template('''
<div id="$map_id"/>
<link href='https://api.tiles.mapbox.com/mapbox-gl-js/v0.37.0/mapbox-gl.css' rel='stylesheet' />
<style>body{margin:0;padding:0;}#$map_id{position:relative;top:0;bottom:0;width:100%;height:400px;}</style>
<style>.mapboxgl-popup-content table tr{border: 1px solid #efefef;} .mapboxgl-popup-content table, td, tr{border: none;}</style>
''').substitute({"map_id": map_id})))
js = Template("""
require.config({
paths: {
mapboxgl: 'https://api.tiles.mapbox.com/mapbox-gl-js/v0.37.0/mapbox-gl',
}
});
require(['mapboxgl'], function(mapboxgl){
mapboxgl.accessToken = "$mbkey";
function html( json, id ) {
var html = '<table><tbody>';
html += '<tr><td>ID</td><td>' + id + '</td></tr>';
for ( var i=0; i < Object.keys(json).length; i++) {
var key = Object.keys( json )[ i ];
var val = json[ key ];
html += '<tr><td>' + key + '</td><td>' + val + '</td></tr>';
}
html += '</tbody></table>';
return html;
}
window.map = new mapboxgl.Map({
container: '$map_id',
style: 'mapbox://styles/mapbox/satellite-v9',
center: [$lon, $lat],
zoom: $zoom
});
var map = window.map;
var geojson = $geojson;
var style = Object.keys($style).length
? $style
: {
"line-color": '#ff0000',
"line-opacity": .75,
"line-width": 2
};
map.on("click", function(e){
var features = map.queryRenderedFeatures(e.point);
if ( features.length ) {
var popup = new mapboxgl.Popup({closeOnClick: false})
.setLngLat(e.lngLat)
.setHTML(html(features[0].properties, features[0].properties['_item.id']))
.addTo(map);
}
});
map.once('style.load', function(e) {
function addLayer(mapid) {
try {
mapid.addSource('features',
{
type: "geojson",
data: geojson
});
var layer = {
"id": "gbdx",
"type": "line",
"source": "features",
"paint": style
};
mapid.addLayer(layer);
} catch (err) {
console.log(err);
}
}
addLayer(map);
});
});
""").substitute({
"map_id": map_id,
"lat": lat,
"lon": lon,
"zoom": zoom,
"geojson": json.dumps(geojson),
"style": json.dumps(style),
"mbkey": api_key
})
display(Javascript(js))
class AggregationDef(object):
def __init__(self, agg_type=None, value=None, children=None):
"""Constructs an aggregation definition. Possible 'agg_type' values include:
'geohash', 'date_hist', 'terms', 'avg', 'sum', 'cardinality' , 'avg_geo_lat', 'avg_geo_lon'.
The 'value' parameter is specific to whichever aggregation type is specified. For more,
detail, please see the VectorServices aggregation REST API documentation.
Args:
agg_type(str): the aggregation type to define
value(str): a value to supplement the type, often indicating how to divide up buckets
children(str or AggregationDef): any child aggregations to be run on each bucket
Returns:
the created AggregationDef
"""
self.agg_type = agg_type
self.value = value
self.children = children
def __repr__(self):
"""Creates a string representation of an aggregation definition suitable for use in VectorServices calls
Returns:
A string representation of an aggregation definition suitable for use in VectorServices calls
"""
if self.value:
base = '%s:%s' % (self.agg_type, self.value)
else:
base = '%s' % self.agg_type
if self.children:
if isinstance(self.children, six.string_types):
return '%s;%s' % (base, self.children)
elif isinstance(self.children, AggregationDef):
return '%s;%s' % (base, self.children.__repr__())
else: # assume it's iterable
kids = []
for child in self.children:
kids.append(child.__repr__())
kids_str = '(%s)' % ','.join(kids)
return '%s;%s' % (base, kids_str)
else:
return base
class GeohashAggDef(AggregationDef):
def __init__(self, hash_length='3', **kwargs):
super(GeohashAggDef, self).__init__('geohash', hash_length, **kwargs)
class DateHistogramAggDef(AggregationDef):
def __init__(self, bucket_period='M', **kwargs):
super(DateHistogramAggDef, self).__init__('date_hist', bucket_period, **kwargs)
class FieldBasedAggDef(AggregationDef):
def __init__(self, agg_type, field=None, **kwargs):
if not field:
raise Exception('The "field" property cannot be empty.')
super(FieldBasedAggDef, self).__init__(agg_type, field, **kwargs)
class TermsAggDef(FieldBasedAggDef):
def __init__(self, field=None, **kwargs):
super(TermsAggDef, self).__init__('terms', field, **kwargs)
class CardinalityAggDef(FieldBasedAggDef):
def __init__(self, field=None):
super(CardinalityAggDef, self).__init__('cardinality', field)
class AvgAggDef(FieldBasedAggDef):
def __init__(self, field=None):
super(AvgAggDef, self).__init__('avg', field)
class SumAggDef(FieldBasedAggDef):
def __init__(self, field=None):
super(SumAggDef, self).__init__('sum', field)
class AvgGeoLatAggDef(AggregationDef):
def __init__(self):
super(AvgGeoLatAggDef, self).__init__('avg_geo_lat')
class AvgGeoLonAggDef(AggregationDef):
def __init__(self):
super(AvgGeoLonAggDef, self).__init__('avg_geo_lon') | gbdxtools/vectors.py | from string import Template
from builtins import object
import six
import requests
from shapely.wkt import loads as load_wkt
from collections import OrderedDict
import json, time, os
from shapely.ops import cascaded_union
from shapely.geometry import shape, box
from shapely.wkt import loads as from_wkt
from gbdxtools.auth import Auth
class Vectors(object):
default_index = 'vector-gbdx-alpha-catalog-v2-*'
def __init__(self, **kwargs):
''' Construct the Vectors interface class
Returns:
An instance of the Vectors interface class.
'''
interface = Auth(**kwargs)
self.gbdx_connection = interface.gbdx_connection
self.logger = interface.logger
self.query_url = 'https://vector.geobigdata.io/insight-vector/api/vectors/query/items'
self.query_index_url = 'https://vector.geobigdata.io/insight-vector/api/index/query/%s/items'
self.query_page_url = 'https://vector.geobigdata.io/insight-vector/api/vectors/query/paging'
self.query_index_page_url = 'https://vector.geobigdata.io/insight-vector/api/index/query/%s/paging'
self.page_url = 'https://vector.geobigdata.io/insight-vector/api/vectors/paging'
self.get_url = 'https://vector.geobigdata.io/insight-vector/api/vector/%s/'
self.create_url = 'https://vector.geobigdata.io/insight-vector/api/vectors'
self.aggregations_url = 'https://vector.geobigdata.io/insight-vector/api/aggregation'
self.aggregations_by_index_url = 'https://vector.geobigdata.io/insight-vector/api/index/aggregation/%s'
def create(self,vectors):
"""
Create a vectors in the vector service.
Args:
vectors: A single geojson vector or a list of geojson vectors. Each looks like:
{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [1.0,1.0]
},
"properties": {
"text" : "item text",
"name" : "item name",
"item_type" : "type",
"ingest_source" : "source",
"attributes" : {
"latitude" : 1,
"institute_founded" : "2015-07-17",
"mascot" : "moth"
}
}
}
item_type and ingest_source are required.
Returns:
a list of IDs of the vectors created
"""
if type(vectors) is dict:
vectors = [vectors]
# validate they all have item_type and ingest_source in properties
for vector in vectors:
if not 'properties' in list(vector.keys()):
raise Exception('Vector does not contain "properties" field.')
if not 'item_type' in list(vector['properties'].keys()):
raise Exception('Vector does not contain "item_type".')
if not 'ingest_source' in list(vector['properties'].keys()):
raise Exception('Vector does not contain "ingest_source".')
r = self.gbdx_connection.post(self.create_url, data=json.dumps(vectors))
r.raise_for_status()
return r.json()
def create_from_wkt(self, wkt, item_type, ingest_source, **attributes):
'''
Create a single vector in the vector service
Args:
wkt (str): wkt representation of the geometry
item_type (str): item_type of the vector
ingest_source (str): source of the vector
attributes: a set of key-value pairs of attributes
Returns:
id (str): string identifier of the vector created
'''
# verify the "depth" of the attributes is single layer
geojson = load_wkt(wkt).__geo_interface__
vector = {
'type': "Feature",
'geometry': geojson,
'properties': {
'item_type': item_type,
'ingest_source': ingest_source,
'attributes': attributes
}
}
return self.create(vector)[0]
def get(self, ID, index='vector-web-s'):
'''Retrieves a vector. Not usually necessary because searching is the best way to find & get stuff.
Args:
ID (str): ID of the vector object
index (str): Optional. Index the object lives in. defaults to 'vector-web-s'
Returns:
record (dict): A dict object identical to the json representation of the catalog record
'''
url = self.get_url % index
r = self.gbdx_connection.get(url + ID)
r.raise_for_status()
return r.json()
def query(self, searchAreaWkt, query, count=100, ttl='5m', index=default_index):
'''
Perform a vector services query using the QUERY API
(https://gbdxdocs.digitalglobe.com/docs/vs-query-list-vector-items-returns-default-fields)
Args:
searchAreaWkt: WKT Polygon of area to search
query: Elastic Search query
count: Maximum number of results to return
ttl: Amount of time for each temporary vector page to exist
Returns:
List of vector results
'''
if count < 1000:
# issue a single page query
search_area_polygon = from_wkt(searchAreaWkt)
left, lower, right, upper = search_area_polygon.bounds
params = {
"q": query,
"count": min(count,1000),
"left": left,
"right": right,
"lower": lower,
"upper": upper
}
url = self.query_index_url % index if index else self.query_url
r = self.gbdx_connection.get(url, params=params)
r.raise_for_status()
return r.json()
else:
return list(self.query_iteratively(searchAreaWkt, query, count, ttl, index))
def query_iteratively(self, searchAreaWkt, query, count=100, ttl='5m', index=default_index):
'''
Perform a vector services query using the QUERY API
(https://gbdxdocs.digitalglobe.com/docs/vs-query-list-vector-items-returns-default-fields)
Args:
searchAreaWkt: WKT Polygon of area to search
query: Elastic Search query
count: Maximum number of results to return
ttl: Amount of time for each temporary vector page to exist
Returns:
generator of vector results
'''
search_area_polygon = from_wkt(searchAreaWkt)
left, lower, right, upper = search_area_polygon.bounds
params = {
"q": query,
"count": min(count,1000),
"ttl": ttl,
"left": left,
"right": right,
"lower": lower,
"upper": upper
}
# initialize paging request
url = self.query_index_page_url % index if index else self.query_page_url
r = self.gbdx_connection.get(url, params=params)
r.raise_for_status()
page = r.json()
paging_id = page['next_paging_id']
item_count = int(page['item_count'])
data = page['data']
num_results = 0
for vector in data:
num_results += 1
if num_results > count: break
yield vector
if num_results == count:
return
# get vectors from each page
while paging_id and item_count > 0 and num_results < count:
headers = {'Content-Type':'application/x-www-form-urlencoded'}
data = {
"pagingId": paging_id,
"ttl": ttl
}
r = self.gbdx_connection.post(self.page_url, headers=headers, data=data)
r.raise_for_status()
page = r.json()
paging_id = page['next_paging_id']
item_count = int(page['item_count'])
data = page['data']
for vector in data:
num_results += 1
if num_results > count: break
yield vector
def aggregate_query(self, searchAreaWkt, agg_def, query=None, start_date=None, end_date=None, count=10, index=default_index):
"""Aggregates results of a query into buckets defined by the 'agg_def' parameter. The aggregations are
represented by dicts containing a 'name' key and a 'terms' key holding a list of the aggregation buckets.
Each bucket element is a dict containing a 'term' key containing the term used for this bucket, a 'count' key
containing the count of items that match this bucket, and an 'aggregations' key containing any child
aggregations.
Args:
searchAreaWkt (str): wkt representation of the geometry
agg_def (str or AggregationDef): the aggregation definitions
query (str): a valid Elasticsearch query string to constrain the items going into the aggregation
start_date (str): either an ISO-8601 date string or a 'now' expression (e.g. "now-6d" or just "now")
end_date (str): either an ISO-8601 date string or a 'now' expression (e.g. "now-6d" or just "now")
count (int): the number of buckets to include in the aggregations (the top N will be returned)
index (str): the index (or alias or wildcard index expression) to run aggregations against, set to None for the entire set of vector indexes
Returns:
results (list): A (usually single-element) list of dict objects containing the aggregation results.
"""
geojson = load_wkt(searchAreaWkt).__geo_interface__
aggs_str = str(agg_def) # could be string or AggregationDef
params = {
"count": count,
"aggs": aggs_str
}
if query:
params['query'] = query
if start_date:
params['start_date'] = start_date
if end_date:
params['end_date'] = end_date
url = self.aggregations_by_index_url % index if index else self.aggregations_url
r = self.gbdx_connection.post(url, params=params, json=geojson)
r.raise_for_status()
return r.json(object_pairs_hook=OrderedDict)['aggregations']
def tilemap(self, query, style={}, bbox=[-180,-90,180,90], zoom=16, api_key=os.environ.get('MAPBOX_API_KEY', None), index="vector-user-provided", name="GBDX_Task_Output"):
"""
Renders a mapbox gl map from a vector service query
"""
try:
from IPython.display import Javascript, HTML, display
except:
print("IPython is required to produce maps.")
return
assert api_key is not None, "No Mapbox API Key found. You can either pass in a token or set the MAPBOX_API_KEY environment variable."
wkt = box(*bbox).wkt
features = self.query(wkt, query, index=index)
union = cascaded_union([shape(f['geometry']) for f in features])
lon, lat = union.centroid.coords[0]
map_id = "map_{}".format(str(int(time.time())))
display(HTML(Template('''
<div id="$map_id"/>
<link href='https://api.tiles.mapbox.com/mapbox-gl-js/v0.41.0/mapbox-gl.css' rel='stylesheet' />
<style>body{margin:0;padding:0;}#$map_id{position:relative;top:0;bottom:0;width:100%;height:400px;}</style>
<style>.mapboxgl-popup-content table tr{border: 1px solid #efefef;} .mapboxgl-popup-content table, td, tr{border: none;}</style>
''').substitute({"map_id": map_id})))
js = Template("""
require.config({
paths: {
mapboxgl: 'https://api.tiles.mapbox.com/mapbox-gl-js/v0.41.0/mapbox-gl',
}
});
require(['mapboxgl'], function(mapboxgl){
mapboxgl.accessToken = "$mbkey";
function html( attrs, id ) {
var json = JSON.parse( attrs );
var html = '<table><tbody>';
html += '<tr><td>ID</td><td>' + id + '</td></tr>';
for ( var i=0; i < Object.keys(json).length; i++) {
var key = Object.keys( json )[ i ];
var val = json[ key ];
html += '<tr><td>' + key + '</td><td>' + val + '</td></tr>';
}
html += '</tbody></table>';
return html;
}
window.map = new mapboxgl.Map({
container: '$map_id',
style: 'mapbox://styles/mapbox/satellite-v9',
center: [$lon, $lat],
zoom: $zoom,
transformRequest: function( url, resourceType ) {
if (resourceType == 'Tile' && url.startsWith('https://vector.geobigdata')) {
return {
url: url,
headers: { 'Authorization': 'Bearer $token' }
}
}
}
});
var map = window.map;
var style = Object.keys($style).length
? $style
: {
"line-color": '#ff0000',
"line-opacity": .75,
"line-width": 2
};
var url = 'https://vector.geobigdata.io/insight-vector/api/mvt/{z}/{x}/{y}?';
url += 'q=$query&index=$index';
map.once('style.load', function(e) {
map.addLayer({
"id": "user-data",
"type": "line",
"source": {
type: 'vector',
tiles: [url]
},
"source-layer": "$name",
"paint": style
});
});
});
""").substitute({
"map_id": map_id,
"query": query,
"lat": lat,
"lon": lon,
"zoom": zoom,
"style": json.dumps(style),
"mbkey": api_key,
"token": self.gbdx_connection.access_token,
"index": index,
"name": name
})
display(Javascript(js))
def map(self, features=None, query=None, style={}, bbox=[-180,-90,180,90], zoom=10, api_key=os.environ.get('MAPBOX_API_KEY', None)):
"""
Renders a mapbox gl map from a vector service query
"""
try:
from IPython.display import Javascript, HTML, display
except:
print("IPython is required to produce maps.")
return
assert api_key is not None, "No Mapbox API Key found. You can either pass in a token or set the MAPBOX_API_KEY environment variable."
if features is None and query is not None:
wkt = box(*bbox).wkt
features = self.query(wkt, query, index=None)
elif features is None and query is None:
print('Must provide either a list of features or a query')
return
union = cascaded_union([shape(f['geometry']) for f in features])
lon, lat = union.centroid.coords[0]
geojson = {"type":"FeatureCollection", "features": features}
map_id = "map_{}".format(str(int(time.time())))
display(HTML(Template('''
<div id="$map_id"/>
<link href='https://api.tiles.mapbox.com/mapbox-gl-js/v0.37.0/mapbox-gl.css' rel='stylesheet' />
<style>body{margin:0;padding:0;}#$map_id{position:relative;top:0;bottom:0;width:100%;height:400px;}</style>
<style>.mapboxgl-popup-content table tr{border: 1px solid #efefef;} .mapboxgl-popup-content table, td, tr{border: none;}</style>
''').substitute({"map_id": map_id})))
js = Template("""
require.config({
paths: {
mapboxgl: 'https://api.tiles.mapbox.com/mapbox-gl-js/v0.37.0/mapbox-gl',
}
});
require(['mapboxgl'], function(mapboxgl){
mapboxgl.accessToken = "$mbkey";
function html( json, id ) {
var html = '<table><tbody>';
html += '<tr><td>ID</td><td>' + id + '</td></tr>';
for ( var i=0; i < Object.keys(json).length; i++) {
var key = Object.keys( json )[ i ];
var val = json[ key ];
html += '<tr><td>' + key + '</td><td>' + val + '</td></tr>';
}
html += '</tbody></table>';
return html;
}
window.map = new mapboxgl.Map({
container: '$map_id',
style: 'mapbox://styles/mapbox/satellite-v9',
center: [$lon, $lat],
zoom: $zoom
});
var map = window.map;
var geojson = $geojson;
var style = Object.keys($style).length
? $style
: {
"line-color": '#ff0000',
"line-opacity": .75,
"line-width": 2
};
map.on("click", function(e){
var features = map.queryRenderedFeatures(e.point);
if ( features.length ) {
var popup = new mapboxgl.Popup({closeOnClick: false})
.setLngLat(e.lngLat)
.setHTML(html(features[0].properties, features[0].properties['_item.id']))
.addTo(map);
}
});
map.once('style.load', function(e) {
function addLayer(mapid) {
try {
mapid.addSource('features',
{
type: "geojson",
data: geojson
});
var layer = {
"id": "gbdx",
"type": "line",
"source": "features",
"paint": style
};
mapid.addLayer(layer);
} catch (err) {
console.log(err);
}
}
addLayer(map);
});
});
""").substitute({
"map_id": map_id,
"lat": lat,
"lon": lon,
"zoom": zoom,
"geojson": json.dumps(geojson),
"style": json.dumps(style),
"mbkey": api_key
})
display(Javascript(js))
class AggregationDef(object):
def __init__(self, agg_type=None, value=None, children=None):
"""Constructs an aggregation definition. Possible 'agg_type' values include:
'geohash', 'date_hist', 'terms', 'avg', 'sum', 'cardinality' , 'avg_geo_lat', 'avg_geo_lon'.
The 'value' parameter is specific to whichever aggregation type is specified. For more,
detail, please see the VectorServices aggregation REST API documentation.
Args:
agg_type(str): the aggregation type to define
value(str): a value to supplement the type, often indicating how to divide up buckets
children(str or AggregationDef): any child aggregations to be run on each bucket
Returns:
the created AggregationDef
"""
self.agg_type = agg_type
self.value = value
self.children = children
def __repr__(self):
"""Creates a string representation of an aggregation definition suitable for use in VectorServices calls
Returns:
A string representation of an aggregation definition suitable for use in VectorServices calls
"""
if self.value:
base = '%s:%s' % (self.agg_type, self.value)
else:
base = '%s' % self.agg_type
if self.children:
if isinstance(self.children, six.string_types):
return '%s;%s' % (base, self.children)
elif isinstance(self.children, AggregationDef):
return '%s;%s' % (base, self.children.__repr__())
else: # assume it's iterable
kids = []
for child in self.children:
kids.append(child.__repr__())
kids_str = '(%s)' % ','.join(kids)
return '%s;%s' % (base, kids_str)
else:
return base
class GeohashAggDef(AggregationDef):
def __init__(self, hash_length='3', **kwargs):
super(GeohashAggDef, self).__init__('geohash', hash_length, **kwargs)
class DateHistogramAggDef(AggregationDef):
def __init__(self, bucket_period='M', **kwargs):
super(DateHistogramAggDef, self).__init__('date_hist', bucket_period, **kwargs)
class FieldBasedAggDef(AggregationDef):
def __init__(self, agg_type, field=None, **kwargs):
if not field:
raise Exception('The "field" property cannot be empty.')
super(FieldBasedAggDef, self).__init__(agg_type, field, **kwargs)
class TermsAggDef(FieldBasedAggDef):
def __init__(self, field=None, **kwargs):
super(TermsAggDef, self).__init__('terms', field, **kwargs)
class CardinalityAggDef(FieldBasedAggDef):
def __init__(self, field=None):
super(CardinalityAggDef, self).__init__('cardinality', field)
class AvgAggDef(FieldBasedAggDef):
def __init__(self, field=None):
super(AvgAggDef, self).__init__('avg', field)
class SumAggDef(FieldBasedAggDef):
def __init__(self, field=None):
super(SumAggDef, self).__init__('sum', field)
class AvgGeoLatAggDef(AggregationDef):
def __init__(self):
super(AvgGeoLatAggDef, self).__init__('avg_geo_lat')
class AvgGeoLonAggDef(AggregationDef):
def __init__(self):
super(AvgGeoLonAggDef, self).__init__('avg_geo_lon') | 0.794305 | 0.355579 |
import os
from environs import Env
env = Env()
env.read_env() # read .env file, if it exists
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Override in .env for local development
DEBUG = TEMPLATE_DEBUG = env.bool("DEBUG", default=False)
# NOTE: Error will be raised if SECRET_KEY is unset
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list('ALLOWED_HOSTS', [])
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'phonenumber_field',
'pplan.apps.PplanConfig',
'jiradata.apps.JiradataConfig',
'workdays.apps.WorkdaysConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'primetver.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'primetver.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
"default": env.dj_db_url(
"DATABASE_URL",
default="sqlite:///" + os.path.join(BASE_DIR, "db.sqlite3"),
ssl_require=not DEBUG,
),
"jira": env.dj_db_url(
"JIRA_URL",
default="sqlite:///" + os.path.join(BASE_DIR, "jira.sqlite3"),
ssl_require=not DEBUG,
)
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
#{
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
#},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = env.str("TIME_ZONE", default='Europe/Moscow')
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = env.str('STATIC_ROOT', '/var/www/django-primetver/static/')
# Email settings
email = env.dj_email_url("EMAIL_URL", default="smtp://")
EMAIL_HOST = email["EMAIL_HOST"]
EMAIL_PORT = email["EMAIL_PORT"]
EMAIL_HOST_PASSWORD = email["EMAIL_HOST_PASSWORD"]
EMAIL_HOST_USER = email["EMAIL_HOST_USER"]
EMAIL_USE_TLS = email["EMAIL_USE_TLS"] | primetver/settings.py | import os
from environs import Env
env = Env()
env.read_env() # read .env file, if it exists
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Override in .env for local development
DEBUG = TEMPLATE_DEBUG = env.bool("DEBUG", default=False)
# NOTE: Error will be raised if SECRET_KEY is unset
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list('ALLOWED_HOSTS', [])
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'phonenumber_field',
'pplan.apps.PplanConfig',
'jiradata.apps.JiradataConfig',
'workdays.apps.WorkdaysConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'primetver.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'primetver.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
"default": env.dj_db_url(
"DATABASE_URL",
default="sqlite:///" + os.path.join(BASE_DIR, "db.sqlite3"),
ssl_require=not DEBUG,
),
"jira": env.dj_db_url(
"JIRA_URL",
default="sqlite:///" + os.path.join(BASE_DIR, "jira.sqlite3"),
ssl_require=not DEBUG,
)
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
#{
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
#},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = env.str("TIME_ZONE", default='Europe/Moscow')
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = env.str('STATIC_ROOT', '/var/www/django-primetver/static/')
# Email settings
email = env.dj_email_url("EMAIL_URL", default="smtp://")
EMAIL_HOST = email["EMAIL_HOST"]
EMAIL_PORT = email["EMAIL_PORT"]
EMAIL_HOST_PASSWORD = email["EMAIL_HOST_PASSWORD"]
EMAIL_HOST_USER = email["EMAIL_HOST_USER"]
EMAIL_USE_TLS = email["EMAIL_USE_TLS"] | 0.297674 | 0.053949 |
from __future__ import print_function, absolute_import, division
import KratosMultiphysics
import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication
import KratosMultiphysics.KratosUnittest as KratosUnittest
import math
def get_displacement_vector(mp,disp):
index=0
for node in mp.Nodes:
disp[index] = node.GetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_X,0)
index = index + 1
disp[index] = node.GetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_Y,0)
index = index + 1
disp[index] = node.GetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_Z,0)
index = index + 1
disp[index] = node.GetSolutionStepValue(KratosMultiphysics.ROTATION_X,0)
index = index + 1
disp[index] = node.GetSolutionStepValue(KratosMultiphysics.ROTATION_Y,0)
index = index + 1
disp[index] = node.GetSolutionStepValue(KratosMultiphysics.ROTATION_Z,0)
index = index + 1
def add_variables(mp):
mp.AddNodalSolutionStepVariable(KratosMultiphysics.DISPLACEMENT)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.REACTION)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.ROTATION)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.TORQUE)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.VOLUME_ACCELERATION)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.ANGULAR_VELOCITY)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.ACCELERATION)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.ANGULAR_ACCELERATION)
def apply_material_properties(mp,dim):
#define properties
mp.GetProperties()[0].SetValue(KratosMultiphysics.YOUNG_MODULUS,100)
mp.GetProperties()[0].SetValue(KratosMultiphysics.DENSITY,7850)
mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.CROSS_AREA,1)
mp.GetProperties()[0].SetValue(KratosMultiphysics.POISSON_RATIO,0.30)
mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.TORSIONAL_INERTIA,0.1)
mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.I22,0.1)
mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.I33,0.1)
g = [0,0,0]
mp.GetProperties()[0].SetValue(KratosMultiphysics.VOLUME_ACCELERATION,g)
cl = StructuralMechanicsApplication.LinearElastic3DLaw()
mp.GetProperties()[0].SetValue(KratosMultiphysics.CONSTITUTIVE_LAW,cl)
def zero_vector(size):
v = KratosMultiphysics.Vector(size)
for i in range(size):
v[i] = 0.0
return v
class TestCrBeamAdjointElement(KratosUnittest.TestCase):
def setUp(self):
# create test model part
dim=3
self.current_model = KratosMultiphysics.Model()
self.model_part = self.current_model.CreateModelPart("test")
self.model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE,dim)
add_variables(self.model_part)
self.model_part.CreateNewNode(1, 0.0, 0.0, 0.0)
self.model_part.CreateNewNode(2, 1.0, 0.1, 0.3)
apply_material_properties(self.model_part,dim)
prop = self.model_part.GetProperties()[0]
self.model_part.CreateNewElement("AdjointFiniteDifferenceCrBeamElementLinear3D2N", 1, [1, 2], prop)
self.adjoint_beam_element = self.model_part.GetElement(1)
self.model_part.CreateNewElement("CrLinearBeamElement3D2N", 2, [1, 2], prop)
self.beam_element = self.model_part.GetElement(2)
self._assign_solution_step_data(0)
def _create_shape_perturbed_elements(self,mp,delta):
dim=3
self.model_part_1 = mp.GetModel().CreateModelPart("Shape_Perturbed_Elements")
add_variables(self.model_part_1)
x1 = mp.Nodes[1].X
y1 = mp.Nodes[1].Y
z1 = mp.Nodes[1].Z
x2 = mp.Nodes[2].X
y2 = mp.Nodes[2].Y
z2 = mp.Nodes[2].Z
self.model_part_1.CreateNewNode(1, x1, y1, z1)
self.model_part_1.CreateNewNode(2, x1+delta, y1, z1)
self.model_part_1.CreateNewNode(3, x1, y1+delta, z1)
self.model_part_1.CreateNewNode(4, x1, y1, z1+delta)
self.model_part_1.CreateNewNode(5, x2, y2, z2)
self.model_part_1.CreateNewNode(6, x2+delta, y2, z2)
self.model_part_1.CreateNewNode(7, x2, y2+delta, z2)
self.model_part_1.CreateNewNode(8, x2, y2, z2+delta)
apply_material_properties(self.model_part_1,dim)
prop = self.model_part_1.GetProperties()[0]
self.model_part_1.CreateNewElement("CrLinearBeamElement3D2N", 1, [2, 5], prop)
self.model_part_1.CreateNewElement("CrLinearBeamElement3D2N", 2, [3, 5], prop)
self.model_part_1.CreateNewElement("CrLinearBeamElement3D2N", 3, [4, 5], prop)
self.model_part_1.CreateNewElement("CrLinearBeamElement3D2N", 4, [1, 6], prop)
self.model_part_1.CreateNewElement("CrLinearBeamElement3D2N", 5, [1, 7], prop)
self.model_part_1.CreateNewElement("CrLinearBeamElement3D2N", 6, [1, 8], prop)
def _create_property_perturbed_elements(self,mp,delta):
dim = 3
self.model_part_2 = mp.GetModel().CreateModelPart("Property_Perturbed_Elements")
add_variables(self.model_part_2)
self.model_part_2.CreateNewNode(1, mp.Nodes[1].X, mp.Nodes[1].Y, mp.Nodes[1].Z)
self.model_part_2.CreateNewNode(2, mp.Nodes[2].X, mp.Nodes[2].Y, mp.Nodes[2].Z)
apply_material_properties(self.model_part_2,dim)
I22_initial = mp.GetProperties()[0][StructuralMechanicsApplication.I22]
self.model_part_2.GetProperties()[0].SetValue(StructuralMechanicsApplication.I22, I22_initial + delta)
prop = self.model_part_2.GetProperties()[0]
self.model_part_2.CreateNewElement("CrLinearBeamElement3D2N", 1, [1, 2], prop)
self.property_perturbed_beam_element = self.model_part_2.GetElement(1)
def _assign_solution_step_data(self, step=0):
# generate nodal solution step test data
self.model_part.Nodes[1].SetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_X,step,0.014725)
self.model_part.Nodes[1].SetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_Y,step,0.001200)
self.model_part.Nodes[1].SetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_Z,step,0.0725715)
self.model_part.Nodes[1].SetSolutionStepValue(KratosMultiphysics.ROTATION_X,step,0.00125)
self.model_part.Nodes[1].SetSolutionStepValue(KratosMultiphysics.ROTATION_Y,step,-0.114905)
self.model_part.Nodes[1].SetSolutionStepValue(KratosMultiphysics.ROTATION_Z,step,0.258032)
self.model_part.Nodes[2].SetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_X,step,0.019735)
self.model_part.Nodes[2].SetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_Y,step,0.002400)
self.model_part.Nodes[2].SetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_Z,step,0.377976)
self.model_part.Nodes[2].SetSolutionStepValue(KratosMultiphysics.ROTATION_X,step,-0.00155)
self.model_part.Nodes[2].SetSolutionStepValue(KratosMultiphysics.ROTATION_Y,step,-0.217714)
self.model_part.Nodes[2].SetSolutionStepValue(KratosMultiphysics.ROTATION_Z,step,0.2544032)
def _shape_perturbation_correction_factor(self):
dx = self.model_part.Nodes[1].X - self.model_part.Nodes[2].X
dy = self.model_part.Nodes[1].Y - self.model_part.Nodes[2].Y
dz = self.model_part.Nodes[1].Z - self.model_part.Nodes[2].Z
l = math.sqrt(dx*dx + dy*dy + dz*dz)
return l
def test_CalculateSensitivityMatrix_Shape(self):
# unperturbed residual
LHSUnperturbed = KratosMultiphysics.Matrix(12,12)
RHSUnperturbed = KratosMultiphysics.Matrix(12,12)
dummy_RHS = zero_vector(12)
PrimalDisplacement = zero_vector(12)
get_displacement_vector(self.model_part,PrimalDisplacement)
self.beam_element.CalculateLocalSystem(LHSUnperturbed,dummy_RHS,self.model_part.ProcessInfo)
RHSUnperturbed = LHSUnperturbed * PrimalDisplacement
# pseudo-load by finite difference approximation
h = 0.00001
corr_factor = self._shape_perturbation_correction_factor()
alpha = corr_factor * h
FDPseudoLoadMatrix = KratosMultiphysics.Matrix(6,12)
LHSPerturbed = KratosMultiphysics.Matrix(12,12)
RHSPerturbed = KratosMultiphysics.Matrix(12,12)
self._create_shape_perturbed_elements(self.model_part,alpha)
row_index = 0
for element in self.model_part_1.Elements:
element.CalculateLocalSystem(LHSPerturbed,dummy_RHS,self.model_part_1.ProcessInfo)
RHSPerturbed = LHSPerturbed * PrimalDisplacement
for j in range(12):
FDPseudoLoadMatrix[row_index,j] = -(RHSPerturbed[j] - RHSUnperturbed[j]) / alpha
row_index = row_index + 1
# pseudo-load computation by adjoint element
PseudoLoadMatrix = KratosMultiphysics.Matrix(6,12)
self.model_part.ProcessInfo[StructuralMechanicsApplication.PERTURBATION_SIZE] = h
self.adjoint_beam_element.CalculateSensitivityMatrix(KratosMultiphysics.SHAPE_SENSITIVITY,PseudoLoadMatrix,self.model_part.ProcessInfo)
self.assertMatrixAlmostEqual(FDPseudoLoadMatrix, PseudoLoadMatrix, 4)
def test_CalculateSensitivityMatrix_Property(self):
# unperturbed residual
LHSUnperturbed = KratosMultiphysics.Matrix(12,12)
RHSUnperturbed = zero_vector(12)
dummy_RHS = zero_vector(12)
PrimalDisplacement = zero_vector(12)
get_displacement_vector(self.model_part,PrimalDisplacement)
self.beam_element.CalculateLocalSystem(LHSUnperturbed, dummy_RHS, self.model_part.ProcessInfo)
RHSUnperturbed = LHSUnperturbed * PrimalDisplacement
# pseudo-load by finite difference approximation
h = 0.00001
FDPseudoLoadMatrix = KratosMultiphysics.Matrix(1,12)
LHSPerturbed = KratosMultiphysics.Matrix(12,12)
RHSPerturbed = zero_vector(12)
inital_property_value = self.model_part.GetProperties()[0][StructuralMechanicsApplication.I22]
delta = h * inital_property_value
self._create_property_perturbed_elements(self.model_part,delta)
self.property_perturbed_beam_element.CalculateLocalSystem(LHSPerturbed, dummy_RHS, self.model_part_2.ProcessInfo)
RHSPerturbed = LHSPerturbed * PrimalDisplacement
for j in range(12):
FDPseudoLoadMatrix[0,j] = -(RHSPerturbed[j] - RHSUnperturbed[j]) / delta
# pseudo-load computation by adjoint element
PseudoLoadMatrix = KratosMultiphysics.Matrix(1,12)
self.model_part.ProcessInfo[StructuralMechanicsApplication.PERTURBATION_SIZE] = h
self.adjoint_beam_element.CalculateSensitivityMatrix(StructuralMechanicsApplication.I22, PseudoLoadMatrix, self.model_part.ProcessInfo)
self.assertMatrixAlmostEqual(FDPseudoLoadMatrix, PseudoLoadMatrix, 4)
if __name__ == '__main__':
KratosUnittest.main() | applications/StructuralMechanicsApplication/tests/test_cr_beam_adjoint_element_3d2n.py | from __future__ import print_function, absolute_import, division
import KratosMultiphysics
import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication
import KratosMultiphysics.KratosUnittest as KratosUnittest
import math
def get_displacement_vector(mp,disp):
index=0
for node in mp.Nodes:
disp[index] = node.GetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_X,0)
index = index + 1
disp[index] = node.GetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_Y,0)
index = index + 1
disp[index] = node.GetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_Z,0)
index = index + 1
disp[index] = node.GetSolutionStepValue(KratosMultiphysics.ROTATION_X,0)
index = index + 1
disp[index] = node.GetSolutionStepValue(KratosMultiphysics.ROTATION_Y,0)
index = index + 1
disp[index] = node.GetSolutionStepValue(KratosMultiphysics.ROTATION_Z,0)
index = index + 1
def add_variables(mp):
mp.AddNodalSolutionStepVariable(KratosMultiphysics.DISPLACEMENT)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.REACTION)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.ROTATION)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.TORQUE)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.VOLUME_ACCELERATION)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.ANGULAR_VELOCITY)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.ACCELERATION)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.ANGULAR_ACCELERATION)
def apply_material_properties(mp,dim):
#define properties
mp.GetProperties()[0].SetValue(KratosMultiphysics.YOUNG_MODULUS,100)
mp.GetProperties()[0].SetValue(KratosMultiphysics.DENSITY,7850)
mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.CROSS_AREA,1)
mp.GetProperties()[0].SetValue(KratosMultiphysics.POISSON_RATIO,0.30)
mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.TORSIONAL_INERTIA,0.1)
mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.I22,0.1)
mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.I33,0.1)
g = [0,0,0]
mp.GetProperties()[0].SetValue(KratosMultiphysics.VOLUME_ACCELERATION,g)
cl = StructuralMechanicsApplication.LinearElastic3DLaw()
mp.GetProperties()[0].SetValue(KratosMultiphysics.CONSTITUTIVE_LAW,cl)
def zero_vector(size):
v = KratosMultiphysics.Vector(size)
for i in range(size):
v[i] = 0.0
return v
class TestCrBeamAdjointElement(KratosUnittest.TestCase):
def setUp(self):
# create test model part
dim=3
self.current_model = KratosMultiphysics.Model()
self.model_part = self.current_model.CreateModelPart("test")
self.model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE,dim)
add_variables(self.model_part)
self.model_part.CreateNewNode(1, 0.0, 0.0, 0.0)
self.model_part.CreateNewNode(2, 1.0, 0.1, 0.3)
apply_material_properties(self.model_part,dim)
prop = self.model_part.GetProperties()[0]
self.model_part.CreateNewElement("AdjointFiniteDifferenceCrBeamElementLinear3D2N", 1, [1, 2], prop)
self.adjoint_beam_element = self.model_part.GetElement(1)
self.model_part.CreateNewElement("CrLinearBeamElement3D2N", 2, [1, 2], prop)
self.beam_element = self.model_part.GetElement(2)
self._assign_solution_step_data(0)
def _create_shape_perturbed_elements(self,mp,delta):
dim=3
self.model_part_1 = mp.GetModel().CreateModelPart("Shape_Perturbed_Elements")
add_variables(self.model_part_1)
x1 = mp.Nodes[1].X
y1 = mp.Nodes[1].Y
z1 = mp.Nodes[1].Z
x2 = mp.Nodes[2].X
y2 = mp.Nodes[2].Y
z2 = mp.Nodes[2].Z
self.model_part_1.CreateNewNode(1, x1, y1, z1)
self.model_part_1.CreateNewNode(2, x1+delta, y1, z1)
self.model_part_1.CreateNewNode(3, x1, y1+delta, z1)
self.model_part_1.CreateNewNode(4, x1, y1, z1+delta)
self.model_part_1.CreateNewNode(5, x2, y2, z2)
self.model_part_1.CreateNewNode(6, x2+delta, y2, z2)
self.model_part_1.CreateNewNode(7, x2, y2+delta, z2)
self.model_part_1.CreateNewNode(8, x2, y2, z2+delta)
apply_material_properties(self.model_part_1,dim)
prop = self.model_part_1.GetProperties()[0]
self.model_part_1.CreateNewElement("CrLinearBeamElement3D2N", 1, [2, 5], prop)
self.model_part_1.CreateNewElement("CrLinearBeamElement3D2N", 2, [3, 5], prop)
self.model_part_1.CreateNewElement("CrLinearBeamElement3D2N", 3, [4, 5], prop)
self.model_part_1.CreateNewElement("CrLinearBeamElement3D2N", 4, [1, 6], prop)
self.model_part_1.CreateNewElement("CrLinearBeamElement3D2N", 5, [1, 7], prop)
self.model_part_1.CreateNewElement("CrLinearBeamElement3D2N", 6, [1, 8], prop)
def _create_property_perturbed_elements(self,mp,delta):
dim = 3
self.model_part_2 = mp.GetModel().CreateModelPart("Property_Perturbed_Elements")
add_variables(self.model_part_2)
self.model_part_2.CreateNewNode(1, mp.Nodes[1].X, mp.Nodes[1].Y, mp.Nodes[1].Z)
self.model_part_2.CreateNewNode(2, mp.Nodes[2].X, mp.Nodes[2].Y, mp.Nodes[2].Z)
apply_material_properties(self.model_part_2,dim)
I22_initial = mp.GetProperties()[0][StructuralMechanicsApplication.I22]
self.model_part_2.GetProperties()[0].SetValue(StructuralMechanicsApplication.I22, I22_initial + delta)
prop = self.model_part_2.GetProperties()[0]
self.model_part_2.CreateNewElement("CrLinearBeamElement3D2N", 1, [1, 2], prop)
self.property_perturbed_beam_element = self.model_part_2.GetElement(1)
def _assign_solution_step_data(self, step=0):
# generate nodal solution step test data
self.model_part.Nodes[1].SetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_X,step,0.014725)
self.model_part.Nodes[1].SetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_Y,step,0.001200)
self.model_part.Nodes[1].SetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_Z,step,0.0725715)
self.model_part.Nodes[1].SetSolutionStepValue(KratosMultiphysics.ROTATION_X,step,0.00125)
self.model_part.Nodes[1].SetSolutionStepValue(KratosMultiphysics.ROTATION_Y,step,-0.114905)
self.model_part.Nodes[1].SetSolutionStepValue(KratosMultiphysics.ROTATION_Z,step,0.258032)
self.model_part.Nodes[2].SetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_X,step,0.019735)
self.model_part.Nodes[2].SetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_Y,step,0.002400)
self.model_part.Nodes[2].SetSolutionStepValue(KratosMultiphysics.DISPLACEMENT_Z,step,0.377976)
self.model_part.Nodes[2].SetSolutionStepValue(KratosMultiphysics.ROTATION_X,step,-0.00155)
self.model_part.Nodes[2].SetSolutionStepValue(KratosMultiphysics.ROTATION_Y,step,-0.217714)
self.model_part.Nodes[2].SetSolutionStepValue(KratosMultiphysics.ROTATION_Z,step,0.2544032)
def _shape_perturbation_correction_factor(self):
dx = self.model_part.Nodes[1].X - self.model_part.Nodes[2].X
dy = self.model_part.Nodes[1].Y - self.model_part.Nodes[2].Y
dz = self.model_part.Nodes[1].Z - self.model_part.Nodes[2].Z
l = math.sqrt(dx*dx + dy*dy + dz*dz)
return l
def test_CalculateSensitivityMatrix_Shape(self):
# unperturbed residual
LHSUnperturbed = KratosMultiphysics.Matrix(12,12)
RHSUnperturbed = KratosMultiphysics.Matrix(12,12)
dummy_RHS = zero_vector(12)
PrimalDisplacement = zero_vector(12)
get_displacement_vector(self.model_part,PrimalDisplacement)
self.beam_element.CalculateLocalSystem(LHSUnperturbed,dummy_RHS,self.model_part.ProcessInfo)
RHSUnperturbed = LHSUnperturbed * PrimalDisplacement
# pseudo-load by finite difference approximation
h = 0.00001
corr_factor = self._shape_perturbation_correction_factor()
alpha = corr_factor * h
FDPseudoLoadMatrix = KratosMultiphysics.Matrix(6,12)
LHSPerturbed = KratosMultiphysics.Matrix(12,12)
RHSPerturbed = KratosMultiphysics.Matrix(12,12)
self._create_shape_perturbed_elements(self.model_part,alpha)
row_index = 0
for element in self.model_part_1.Elements:
element.CalculateLocalSystem(LHSPerturbed,dummy_RHS,self.model_part_1.ProcessInfo)
RHSPerturbed = LHSPerturbed * PrimalDisplacement
for j in range(12):
FDPseudoLoadMatrix[row_index,j] = -(RHSPerturbed[j] - RHSUnperturbed[j]) / alpha
row_index = row_index + 1
# pseudo-load computation by adjoint element
PseudoLoadMatrix = KratosMultiphysics.Matrix(6,12)
self.model_part.ProcessInfo[StructuralMechanicsApplication.PERTURBATION_SIZE] = h
self.adjoint_beam_element.CalculateSensitivityMatrix(KratosMultiphysics.SHAPE_SENSITIVITY,PseudoLoadMatrix,self.model_part.ProcessInfo)
self.assertMatrixAlmostEqual(FDPseudoLoadMatrix, PseudoLoadMatrix, 4)
def test_CalculateSensitivityMatrix_Property(self):
# unperturbed residual
LHSUnperturbed = KratosMultiphysics.Matrix(12,12)
RHSUnperturbed = zero_vector(12)
dummy_RHS = zero_vector(12)
PrimalDisplacement = zero_vector(12)
get_displacement_vector(self.model_part,PrimalDisplacement)
self.beam_element.CalculateLocalSystem(LHSUnperturbed, dummy_RHS, self.model_part.ProcessInfo)
RHSUnperturbed = LHSUnperturbed * PrimalDisplacement
# pseudo-load by finite difference approximation
h = 0.00001
FDPseudoLoadMatrix = KratosMultiphysics.Matrix(1,12)
LHSPerturbed = KratosMultiphysics.Matrix(12,12)
RHSPerturbed = zero_vector(12)
inital_property_value = self.model_part.GetProperties()[0][StructuralMechanicsApplication.I22]
delta = h * inital_property_value
self._create_property_perturbed_elements(self.model_part,delta)
self.property_perturbed_beam_element.CalculateLocalSystem(LHSPerturbed, dummy_RHS, self.model_part_2.ProcessInfo)
RHSPerturbed = LHSPerturbed * PrimalDisplacement
for j in range(12):
FDPseudoLoadMatrix[0,j] = -(RHSPerturbed[j] - RHSUnperturbed[j]) / delta
# pseudo-load computation by adjoint element
PseudoLoadMatrix = KratosMultiphysics.Matrix(1,12)
self.model_part.ProcessInfo[StructuralMechanicsApplication.PERTURBATION_SIZE] = h
self.adjoint_beam_element.CalculateSensitivityMatrix(StructuralMechanicsApplication.I22, PseudoLoadMatrix, self.model_part.ProcessInfo)
self.assertMatrixAlmostEqual(FDPseudoLoadMatrix, PseudoLoadMatrix, 4)
if __name__ == '__main__':
KratosUnittest.main() | 0.444324 | 0.319705 |
import unittest
from decimal import Decimal
from djmodels.core import validators
from djmodels.core.exceptions import ValidationError
from djmodels.db import connection, models
from djmodels.test import TestCase
from .models import BigD, Foo
class DecimalFieldTests(TestCase):
def test_to_python(self):
f = models.DecimalField(max_digits=4, decimal_places=2)
self.assertEqual(f.to_python(3), Decimal('3'))
self.assertEqual(f.to_python('3.14'), Decimal('3.14'))
# to_python() converts floats and honors max_digits.
self.assertEqual(f.to_python(3.1415926535897), Decimal('3.142'))
self.assertEqual(f.to_python(2.4), Decimal('2.400'))
# Uses default rounding of ROUND_HALF_EVEN.
self.assertEqual(f.to_python(2.0625), Decimal('2.062'))
self.assertEqual(f.to_python(2.1875), Decimal('2.188'))
msg = "'abc' value must be a decimal number."
with self.assertRaisesMessage(ValidationError, msg):
f.to_python('abc')
def test_default(self):
f = models.DecimalField(default=Decimal('0.00'))
self.assertEqual(f.get_default(), Decimal('0.00'))
def test_get_prep_value(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertIsNone(f.get_prep_value(None))
self.assertEqual(f.get_prep_value('2.4'), Decimal('2.4'))
def test_filter_with_strings(self):
"""
Should be able to filter decimal fields using strings (#8023).
"""
foo = Foo.objects.create(a='abc', d=Decimal('12.34'))
self.assertEqual(list(Foo.objects.filter(d='12.34')), [foo])
def test_save_without_float_conversion(self):
"""
Ensure decimals don't go through a corrupting float conversion during
save (#5079).
"""
bd = BigD(d='12.9')
bd.save()
bd = BigD.objects.get(pk=bd.pk)
self.assertEqual(bd.d, Decimal('12.9'))
@unittest.skipIf(connection.vendor == 'sqlite', 'SQLite stores values rounded to 15 significant digits.')
def test_fetch_from_db_without_float_rounding(self):
big_decimal = BigD.objects.create(d=Decimal('.100000000000000000000000000005'))
big_decimal.refresh_from_db()
self.assertEqual(big_decimal.d, Decimal('.100000000000000000000000000005'))
def test_lookup_really_big_value(self):
"""
Really big values can be used in a filter statement.
"""
# This should not crash.
Foo.objects.filter(d__gte=100000000000)
def test_max_digits_validation(self):
field = models.DecimalField(max_digits=2)
expected_message = validators.DecimalValidator.messages['max_digits'] % {'max': 2}
with self.assertRaisesMessage(ValidationError, expected_message):
field.clean(100, None)
def test_max_decimal_places_validation(self):
field = models.DecimalField(decimal_places=1)
expected_message = validators.DecimalValidator.messages['max_decimal_places'] % {'max': 1}
with self.assertRaisesMessage(ValidationError, expected_message):
field.clean(Decimal('0.99'), None)
def test_max_whole_digits_validation(self):
field = models.DecimalField(max_digits=3, decimal_places=1)
expected_message = validators.DecimalValidator.messages['max_whole_digits'] % {'max': 2}
with self.assertRaisesMessage(ValidationError, expected_message):
field.clean(Decimal('999'), None)
def test_roundtrip_with_trailing_zeros(self):
"""Trailing zeros in the fractional part aren't truncated."""
obj = Foo.objects.create(a='bar', d=Decimal('8.320'))
obj.refresh_from_db()
self.assertEqual(obj.d.compare_total(Decimal('8.320')), Decimal('0')) | tests/model_fields/test_decimalfield.py | import unittest
from decimal import Decimal
from djmodels.core import validators
from djmodels.core.exceptions import ValidationError
from djmodels.db import connection, models
from djmodels.test import TestCase
from .models import BigD, Foo
class DecimalFieldTests(TestCase):
def test_to_python(self):
f = models.DecimalField(max_digits=4, decimal_places=2)
self.assertEqual(f.to_python(3), Decimal('3'))
self.assertEqual(f.to_python('3.14'), Decimal('3.14'))
# to_python() converts floats and honors max_digits.
self.assertEqual(f.to_python(3.1415926535897), Decimal('3.142'))
self.assertEqual(f.to_python(2.4), Decimal('2.400'))
# Uses default rounding of ROUND_HALF_EVEN.
self.assertEqual(f.to_python(2.0625), Decimal('2.062'))
self.assertEqual(f.to_python(2.1875), Decimal('2.188'))
msg = "'abc' value must be a decimal number."
with self.assertRaisesMessage(ValidationError, msg):
f.to_python('abc')
def test_default(self):
f = models.DecimalField(default=Decimal('0.00'))
self.assertEqual(f.get_default(), Decimal('0.00'))
def test_get_prep_value(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertIsNone(f.get_prep_value(None))
self.assertEqual(f.get_prep_value('2.4'), Decimal('2.4'))
def test_filter_with_strings(self):
"""
Should be able to filter decimal fields using strings (#8023).
"""
foo = Foo.objects.create(a='abc', d=Decimal('12.34'))
self.assertEqual(list(Foo.objects.filter(d='12.34')), [foo])
def test_save_without_float_conversion(self):
"""
Ensure decimals don't go through a corrupting float conversion during
save (#5079).
"""
bd = BigD(d='12.9')
bd.save()
bd = BigD.objects.get(pk=bd.pk)
self.assertEqual(bd.d, Decimal('12.9'))
@unittest.skipIf(connection.vendor == 'sqlite', 'SQLite stores values rounded to 15 significant digits.')
def test_fetch_from_db_without_float_rounding(self):
big_decimal = BigD.objects.create(d=Decimal('.100000000000000000000000000005'))
big_decimal.refresh_from_db()
self.assertEqual(big_decimal.d, Decimal('.100000000000000000000000000005'))
def test_lookup_really_big_value(self):
"""
Really big values can be used in a filter statement.
"""
# This should not crash.
Foo.objects.filter(d__gte=100000000000)
def test_max_digits_validation(self):
field = models.DecimalField(max_digits=2)
expected_message = validators.DecimalValidator.messages['max_digits'] % {'max': 2}
with self.assertRaisesMessage(ValidationError, expected_message):
field.clean(100, None)
def test_max_decimal_places_validation(self):
field = models.DecimalField(decimal_places=1)
expected_message = validators.DecimalValidator.messages['max_decimal_places'] % {'max': 1}
with self.assertRaisesMessage(ValidationError, expected_message):
field.clean(Decimal('0.99'), None)
def test_max_whole_digits_validation(self):
field = models.DecimalField(max_digits=3, decimal_places=1)
expected_message = validators.DecimalValidator.messages['max_whole_digits'] % {'max': 2}
with self.assertRaisesMessage(ValidationError, expected_message):
field.clean(Decimal('999'), None)
def test_roundtrip_with_trailing_zeros(self):
"""Trailing zeros in the fractional part aren't truncated."""
obj = Foo.objects.create(a='bar', d=Decimal('8.320'))
obj.refresh_from_db()
self.assertEqual(obj.d.compare_total(Decimal('8.320')), Decimal('0')) | 0.830353 | 0.544741 |
from cell import cell_to_rgb, rgb_to_cell, iterate_cell
import numpy as np
from PIL import Image
def board_from_pic(image):
""" Given an h*w*3 numpy array 'image',
return the h*w numpy array 'board'.
"""
return np.apply_along_axis(rgb_to_cell, 2, image)
def pic_from_board(board):
""" Given an h*w numpy array 'board',
return the h*w*3 numpy array 'image'.
"""
h, w = board.shape
outimage = np.zeros(shape=(h,w,3),dtype=np.uint8)
for y, row in enumerate(board):
for x, cell in enumerate(row):
outimage[y,x] = cell_to_rgb(cell)
return outimage
def save_pic(board, filename):
""" Save a board array as an image at the filename """
h, w = board.shape
rgbdata = tuple(map(tuple, pic_from_board(board).reshape(-1,3).tolist()))
image = Image.new("RGB",(w,h))
image.putdata(rgbdata)
image.save(filename, "PNG")
def load_pic(filename):
""" Load an image as a board array from the filename """
return board_from_pic(np.asarray(Image.open(filename)))
def _get_neighbours(board, x, y, w, h):
""" Get the 8 orthogonal neighbours of cell x, y from the board.
w and h are passed as parameters (rather than taken from board.shape)
because I assumed it'd be faster. I still need to check that.
Operates on the array as a torus (i.e. wraps on edges)
... What? It's a _ function! Let me optimize prematurely if I want to!
"""
return ((board[y, (x+1)%w], board[y, (x+2)%w]),
(board[y, (x-1)%w], board[y, (x-2)%w]),
(board[(y-1)%h, x], board[(y-2)%h, x]),
(board[(y+1)%h, x], board[(y+2)%h, x]))
def iterate_board(board):
""" Given the h*w numpy 'board' array, return the once-iterated
board, iterated according to the brainbow rules.
(See: cell.iterate_cell),
"""
# This can be modified to work in-place on board, if that makes it faster.
# ... But if we wanted speed, we'd write this in C or Rust or something ;)
board_copy = np.copy(board)
h, w = board.shape
for y, row in enumerate(board):
for x, cell in enumerate(row):
neighbours = _get_neighbours(board, x, y, w, h)
board_copy[y,x] = iterate_cell(cell, neighbours)
return board_copy | bbworld.py | from cell import cell_to_rgb, rgb_to_cell, iterate_cell
import numpy as np
from PIL import Image
def board_from_pic(image):
""" Given an h*w*3 numpy array 'image',
return the h*w numpy array 'board'.
"""
return np.apply_along_axis(rgb_to_cell, 2, image)
def pic_from_board(board):
""" Given an h*w numpy array 'board',
return the h*w*3 numpy array 'image'.
"""
h, w = board.shape
outimage = np.zeros(shape=(h,w,3),dtype=np.uint8)
for y, row in enumerate(board):
for x, cell in enumerate(row):
outimage[y,x] = cell_to_rgb(cell)
return outimage
def save_pic(board, filename):
""" Save a board array as an image at the filename """
h, w = board.shape
rgbdata = tuple(map(tuple, pic_from_board(board).reshape(-1,3).tolist()))
image = Image.new("RGB",(w,h))
image.putdata(rgbdata)
image.save(filename, "PNG")
def load_pic(filename):
""" Load an image as a board array from the filename """
return board_from_pic(np.asarray(Image.open(filename)))
def _get_neighbours(board, x, y, w, h):
""" Get the 8 orthogonal neighbours of cell x, y from the board.
w and h are passed as parameters (rather than taken from board.shape)
because I assumed it'd be faster. I still need to check that.
Operates on the array as a torus (i.e. wraps on edges)
... What? It's a _ function! Let me optimize prematurely if I want to!
"""
return ((board[y, (x+1)%w], board[y, (x+2)%w]),
(board[y, (x-1)%w], board[y, (x-2)%w]),
(board[(y-1)%h, x], board[(y-2)%h, x]),
(board[(y+1)%h, x], board[(y+2)%h, x]))
def iterate_board(board):
""" Given the h*w numpy 'board' array, return the once-iterated
board, iterated according to the brainbow rules.
(See: cell.iterate_cell),
"""
# This can be modified to work in-place on board, if that makes it faster.
# ... But if we wanted speed, we'd write this in C or Rust or something ;)
board_copy = np.copy(board)
h, w = board.shape
for y, row in enumerate(board):
for x, cell in enumerate(row):
neighbours = _get_neighbours(board, x, y, w, h)
board_copy[y,x] = iterate_cell(cell, neighbours)
return board_copy | 0.649356 | 0.665016 |
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from xclim.sdba.base import Grouper
from xclim.sdba.processing import adapt_freq, jitter_over_thresh, jitter_under_thresh
def test_jitter_under_thresh():
da = xr.DataArray([0.5, 2.1, np.nan])
out = jitter_under_thresh(da, 1)
assert da[0] != out[0]
assert da[0] < 1
assert da[0] > 0
np.testing.assert_allclose(da[1:], out[1:])
def test_jitter_over_thresh():
da = xr.DataArray([0.5, 2.1, np.nan])
out = jitter_over_thresh(da, 2, 3)
assert da[1] != out[1]
assert da[1] < 3
assert da[1] > 2
np.testing.assert_allclose(da[[0, 2]], out[[0, 2]])
@pytest.mark.parametrize("use_dask", [True, False])
def test_adapt_freq(use_dask):
time = pd.date_range("1990-01-01", "2020-12-31", freq="D")
prvals = np.random.randint(0, 100, size=(time.size, 3))
pr = xr.DataArray(
prvals, coords={"time": time, "lat": [0, 1, 2]}, dims=("time", "lat")
)
if use_dask:
pr = pr.chunk({"lat": 1})
group = Grouper("time.month")
prsim = xr.where(pr < 20, pr / 20, pr)
prref = xr.where(pr < 10, pr / 20, pr)
ds_in = xr.Dataset({"sim": prsim, "ref": prref})
ds_ad = adapt_freq(ds_in, thresh=1, group=group)
# Where the input is considered zero
input_zeros = ds_ad.sim_ad.where(prsim <= 1)
# The proportion of corrected values (time.size * 3 * 0.2 is the theoritical number of values under 1 in prsim)
dP0_out = (input_zeros > 1).sum() / (time.size * 3 * 0.2)
np.testing.assert_allclose(dP0_out, 0.5, atol=0.1)
# Assert that corrected values were generated in the range ]1, 20 + tol[
corrected = (
input_zeros.where(input_zeros > 1)
.stack(flat=["lat", "time"])
.reset_index("flat")
.dropna("flat")
)
assert ((corrected < 20.1) & (corrected > 1)).all()
# Assert that non-corrected values are untouched
# Again we add a 0.5 tol because of randomness.
xr.testing.assert_equal(
ds_ad.sim_ad.where(prsim > 20.1),
prsim.where(prsim > 20.5).transpose("lat", "time"),
)
# Assert that Pth and dP0 are approx the good values
np.testing.assert_allclose(ds_ad.pth, 20, rtol=0.05)
np.testing.assert_allclose(ds_ad.dP0, 0.5, atol=0.14) | xclim/testing/tests/test_sdba/test_processing.py | import numpy as np
import pandas as pd
import pytest
import xarray as xr
from xclim.sdba.base import Grouper
from xclim.sdba.processing import adapt_freq, jitter_over_thresh, jitter_under_thresh
def test_jitter_under_thresh():
da = xr.DataArray([0.5, 2.1, np.nan])
out = jitter_under_thresh(da, 1)
assert da[0] != out[0]
assert da[0] < 1
assert da[0] > 0
np.testing.assert_allclose(da[1:], out[1:])
def test_jitter_over_thresh():
da = xr.DataArray([0.5, 2.1, np.nan])
out = jitter_over_thresh(da, 2, 3)
assert da[1] != out[1]
assert da[1] < 3
assert da[1] > 2
np.testing.assert_allclose(da[[0, 2]], out[[0, 2]])
@pytest.mark.parametrize("use_dask", [True, False])
def test_adapt_freq(use_dask):
time = pd.date_range("1990-01-01", "2020-12-31", freq="D")
prvals = np.random.randint(0, 100, size=(time.size, 3))
pr = xr.DataArray(
prvals, coords={"time": time, "lat": [0, 1, 2]}, dims=("time", "lat")
)
if use_dask:
pr = pr.chunk({"lat": 1})
group = Grouper("time.month")
prsim = xr.where(pr < 20, pr / 20, pr)
prref = xr.where(pr < 10, pr / 20, pr)
ds_in = xr.Dataset({"sim": prsim, "ref": prref})
ds_ad = adapt_freq(ds_in, thresh=1, group=group)
# Where the input is considered zero
input_zeros = ds_ad.sim_ad.where(prsim <= 1)
# The proportion of corrected values (time.size * 3 * 0.2 is the theoritical number of values under 1 in prsim)
dP0_out = (input_zeros > 1).sum() / (time.size * 3 * 0.2)
np.testing.assert_allclose(dP0_out, 0.5, atol=0.1)
# Assert that corrected values were generated in the range ]1, 20 + tol[
corrected = (
input_zeros.where(input_zeros > 1)
.stack(flat=["lat", "time"])
.reset_index("flat")
.dropna("flat")
)
assert ((corrected < 20.1) & (corrected > 1)).all()
# Assert that non-corrected values are untouched
# Again we add a 0.5 tol because of randomness.
xr.testing.assert_equal(
ds_ad.sim_ad.where(prsim > 20.1),
prsim.where(prsim > 20.5).transpose("lat", "time"),
)
# Assert that Pth and dP0 are approx the good values
np.testing.assert_allclose(ds_ad.pth, 20, rtol=0.05)
np.testing.assert_allclose(ds_ad.dP0, 0.5, atol=0.14) | 0.663233 | 0.574932 |
would_you_rather_array = [
"Would you rather have the ability to see 10 minutes into the future or 150 years into the "
"future? ",
"Would you rather have telekinesis (the ability to move things with your mind) or telepathy (the ability to read "
"minds)? ", "Would you rather team up with Wonder Woman or Captain Marvel?",
"Would you rather be forced to sing along or dance to every single song you hear?",
"Would you rather find true love today or win the lottery next year?",
"Would you rather be in jail for five years or be in a coma for a decade?",
"Would you rather have another 10 years with your partner or a one-night stand with your celebrity crush?",
"Would you rather be chronically under-dressed or overdressed?",
"Would you rather have everyone you know be able to read your thoughts or for everyone you "
"know to have access to your Internet history? ",
"Would you rather lose your sight or your memories?",
"Would you rather have universal respect or unlimited power?",
"Would you rather give up air conditioning and heating for the rest of your life or give up "
"the Internet for the rest of your life? ",
"Would you rather swim in a pool full of Nutella or a pool full of maple syrup?",
"Would you rather labor under a hot sun or extreme cold?",
"Would you rather stay in during a snow day or build a fort?",
"Would you rather buy 10 things you don’t need every time you go shopping or always forget "
"the one thing that you need when you go to the store? ",
"Would you rather never be able to go out during the day or never be able to go out at night?",
"Would you rather have a personal maid or a personal chef?",
"Would you rather have Beyoncé’s talent or Jay-Z‘s business acumen?",
"Would you rather be 11 feet tall or nine inches tall?",
"Would you rather be an extra in an Oscar-winning movie or the lead in a box office bomb?",
"Would you rather vomit on your hero or have your hero vomit on you?",
"Would you rather communicate only in emoji or never be able to text at all ever again?",
"Would you rather be royalty ,000 years ago or an average person today?",
"Would you rather lounge by the pool or on the beach?",
"Would you rather wear the same socks for a month or the same underwear for a week?",
"Would you rather work an overtime shift with your annoying boss or spend full day with your mother-in-law?",
"Would you rather cuddle a koala or pal around with a panda?",
"Would you rather have a sing-off with Ariana Grande or a dance-off with Rihanna?",
"Would you rather always have B.O. and not know it or always smell B.O. on everyone else?",
"Would you rather watch nothing but Hallmark Christmas movies or nothing but horror movies?",
"Would you rather always be 10 minutes late or always be 20 minutes early?",
"Would you rather spend a week in the forest or a night in a real haunted house?",
"Would you rather find a rat in your kitchen or a roach in your bed?",
"Would you rather have a pause or a rewind button in your life?",
"Would you rather always have a full phone battery or a full gas tank?",
"Would you rather lose all your teeth or lose a day of your life every time you kissed someone?",
"Would you rather drink from a toilet or pee in a litter box?",
"Would you rather be forced to live the same day over and over again for a full year, "
"or take 3 years off the end of your life? ",
"Would you rather never eat watermelon ever again or be forced to eat watermelon with every meal?",
"Would you rather get a paper cut every time you turn a page or bite your tongue every time you eat?",
"Would you rather oversleep every day for a week or not get any sleep at all for four days?",
"Would you rather die in 20 years with no regrets or live to 100 with a lot of regrets?",
"Would you rather sip gin with <NAME> or shoot tequila with Dwayne “<NAME>” Johnson?",
"Would you rather get trapped in the middle of a food fight or a water balloon fight?",
"Would you rather walk to work in heels or drive to work in reverse?",
"Would you rather spend a year at war or a year in prison?",
"Would you rather die before or after your partner?",
"Would you rather have a child every year for 20 years or never have any children at all?",
"Would you rather take amazing selfies but look terrible in all other photos or be photogenic everywhere but in your selfies?",
"Would you rather be gassy on a first date or your wedding night?",
"Would you rather <NAME> or <NAME> play you in a movie?",
"Would you rather be able to take back anything you say or hear any conversation that is about you?",
"Would you rather have skin that changes color based on your emotions or tattoos appear all over your body depicting what you did yesterday?",
"Would you rather hunt and butcher your own meat or never eat meat again?",
"Would you rather lose all of your friends but keep your BFF or lose your BFF but keep the rest of your buds?",
"Would you rather have people spread a terrible lie about you or have people spread terrible but true tales about you?",
"Would you rather walk in on your parents or have them walk in on you?",
"Would you rather be the absolute best at something that no one takes seriously or be average at something well respected?",
"Would you rather have unlimited battery life on all of your devices or have free WiFi wherever you go?",
"Would you rather have Billie Eilish‘s future or Madonna’s legacy?",
"Would you rather have a third nipple or an extra toe?",
"Would you rather solve world hunger or global warming?",
"Would you rather have to wear every shirt inside out or every pair of pants backward?",
"Would you rather live in a treehouse or in a cave?",
"Would you rather win $5000 or your best friend win $10,000?",
"Would you rather be in history books for something terrible or be forgotten completely after you die?",
"Would you rather travel the world for free for a year or have $2,000 to spend however you please?",
"Would you rather your to only be able to talk to your dog or for your dog to be able to talk to only you—and everyone thinks you’re nuts?",
"Would you rather have a mullet for a year or be bald (no wigs!) for six months?",
"Would you rather go back to the past and meet your loved ones who passed away or go to the future to meet your children or grandchildren to be?",
"Would you rather have <NAME>’s lips or with <NAME>‘s hair?",
"Would you rather stay the age you are physically forever or stay the way you are now financially forever?",
"Would you rather be in a zombie apocalypse or a robot apocalypse?",
"Would you rather be alone all your life or surrounded by really annoying people?",
"Would you rather give up your cellphone for a month or bathing for a month?",
"Would you rather spend a day cleaning your worst enemy’s house or have your crush spend the day cleaning your house?",
"Would you rather spend a year entirely alone or a year without a home?",
"Would you rather buy all used underwear or all used toothbrushes?",
"Would you rather have a photographic memory or an IQ of 200?",
"Would you rather go on a cruise with your boss or never go on vacation ever again?",
"Would you rather forget your partner’s birthday or your anniversary every year?",
"Would you rather have to wear stilettos to sleep or have to wear slippers everywhere you go?",
"Would you rather change the outcome of the last election or get to decide the outcome of the next election?",
"Would you rather lose the ability to read or lose the ability to speak?",
"Would you rather smooch <NAME>, <NAME>, <NAME> or <NAME>?",
"Would you rather be beautiful and stupid or unattractive but a genius?",
"Would you rather have seven fingers on each hand or seven toes on each foot?",
"Would you rather work the job you have now for a year at double your current rate of pay or have one year off with what you are making now?",
"Would you rather be always stuck in traffic but find a perfect parking spot or never hit traffic but always take forever to park?",
"Would you rather have super-sensitive taste buds or super-sensitive hearing?",
"Would you rather ask your ex or a total stranger for a favor?",
"Would you rather go on tour with <NAME> or Cher?",
"Would you rather eat only pizza for a year or not eat any pizza for five years?",
"Would you rather never get another present in your life but always pick the perfect present for everyone else or keep getting presents but giving terrible ones to everyone else?",
"Would you rather sleep in a doghouse or let stray dogs sleep in your bed?",
"Would you rather be able to speak any language or be able to communicate with animals?",
"Would you rather have all of your messages and photos leak publicly or never use a cellphone ever again?",
"Would you rather run at 100 mph or fly at 20 mph?",
"Would you rather have Adele‘s voice or Normani’s dance moves?",
"Would you rather have to wear sweatpants everywhere for the rest of your life or never wear sweatpants again?",
"Would you rather have 7,000 spoons when all you need is a knife or always have a knife but never be able to use spoons?",
"Would you rather detect every lie you hear or get away with every lie you tell?",
"Would you rather be the funniest person in a room or the smartest person in a room?",
"Would you rather talk like Yoda or breathe like <NAME>?",
"Would you rather people knew all the details of your finances or all the details of your love life?",
"Would you rather listen to your least-favorite song on a loop for a year or never listen to any music at all for a year?",
"Would you rather go vegan for a month or only eat meat and dairy for a month?",
"Would you rather clean up someone else’s vomit or someone else’s blood?",
"Would you rather work for <NAME> or <NAME>?",
"Would you rather spend the weekend with pirates or ninjas?",
"Would you rather end every phone call with “I love you” or accidentally call your partner the wrong name during a fight?",
"Would you rather get your paycheck given to you in pennies or never be able to use cash again?",
"Would you rather see <NAME> in a movie or see <NAME> in concert?",
"Would you rather win the lottery but have to spend it all in one day or triple your current salary forever?",
"Would you rather live until you are 200 and look your age or look like you’re 22 your whole life, but die at age 65?",
"Would you rather give up cursing forever or give up ice cream for 12 years?",
"Would you rather hear a comforting lie or an uncomfortable truth?",
"Would you rather be locked for a week in a room that’s overly bright or a room that’s totally dark?",
"Would you rather someone see all the photos in your phone or read all your text messages?",
"Would you rather have a South Park-themed wedding or a Family Guy-themed funeral?",
"Would you rather have to hunt and gather all of your food or eat McDonald’s for every meal?",
"Would you rather have fortune or fame?",
"Would you rather celebrate the Fourth of July with <NAME> or Christmas with <NAME>?",
"Would you rather only be able to listen to one song for the rest of your life or only be able to watch one movie for the rest of your life?",
"Would you rather never use social media again or never watch another movie ever again?",
"Would you rather have police hunting you down for a crime you didn’t commit or a serial killer actually hunting you?",
"Would you rather live a peaceful life in a small cabin in the woods or a drama-filled life in a mansion in a big city?",
"Would you rather find your soulmate or your calling?",
"Would you rather drink sour milk or brush your teeth with soap?",
"Would you rather steal Duchess Meghan or Duchess Kate’s style?",
"Would you rather never get a cold ever again or never be stuck in traffic ever again?",
"Would you rather be tall and average looking or three feet tall but beautiful?",
"Would you rather visit the International Space Station for a week or spend a week in a hotel at the bottom of the ocean?",
"Would you rather confess to cheating on your partner or catch your partner cheating on you?",
"Would you rather have all traffic lights you approach be green or never have to stand in line again?",
"Would you rather share an onscreen kiss with <NAME> or <NAME>?",
"Would you rather never eat Christmas cookies ever again or never eat Halloween candy ever again?",
"Would you rather lose your long-term memory or your short-term memory?",
"Would you rather be stranded in the jungle or in the desert?",
"Would you rather everyone you love forget your birthday or everyone you love sing “Happy Birthday” to you for 24 hours straight?",
"Would you rather be invisible or be able to fly?",
"Would you rather spend every weekend indoors or spend every weekend outdoors?",
"Would you rather party with <NAME> and <NAME> or with <NAME> and <NAME>?",
"Would you rather give up wine for a year or drink nothing but wine for a year?",
"Would you rather start a colony on another planet or be the leader of a country on Earth?",
"Would you rather live in a house haunted by friendly ghosts or be a ghost reliving your average day after you die?",
"Would you rather have one wish granted today or 10 wishes granted 20 years from now?",
"Would you rather get hit on by someone 20 years older than you or someone 20 years younger than you?",
"Would you rather fall down in public or pass gas in public?",
"Would you rather only eat raw food or only eat TV dinners?",
"Would you rather run as fast as The Flash or be as strong as Superman?",
"Would you rather never have a wedgie or never have anything stuck in your teeth ever again?",
"Would you rather marry the most attractive person you’ve ever met or the best cook you’ve ever met?",
"Would you rather sing karaoke with <NAME> or with <NAME>?",
"Would you rather go back to kindergarten with everything you know now or know now everything your future self will learn?",
"Would you rather be able to read minds or predict the future?",
"Would you rather take a pill a day for nutrients and to feel full, but never eat anything again or eat whatever you want but never really feel full?",
"Would you rather be an unknown superhero or an infamous villain?",
"Would you rather always have an annoying song stuck in your head or always have an itch that you can’t reach?",
"Would you rather never be able to keep anyone else’s secrets or have someone tell all of your secrets?",
"Would you rather be Batman or Iron Man?",
"Would you rather be married to someone stunning who doesn’t think you’re attractive or be married to someone ugly who thinks you’re gorgeous?",
"Would you rather have a third ear or a third eye?",
"Would you rather have $1 million now or $1,000 a week for the rest of your life?",
"Would you rather binge-watch Sex And the City or Girls?",
"Would you rather be rich working a job you hate or poor working a job you love?",
"Would you rather wear real fur or fake jewels?",
"Would you rather work a high-paying job that you hate or your dream job with only just enough money for rent, food and utilities?",
"Would you rather wake up naked in a forest five miles from home or in your underwear at work?",
"Would you rather go backstage with your favorite band or be an extra on your favorite TV show?",
"Would you rather never eat your favorite food for the rest of your life or only eat your favorite food?",
"Would you rather be able to erase your own memories or be able to erase someone else’s memories?",
"Would you rather be so afraid of heights that you can’t go to the second floor of a building or be so afraid of the sun that you can only leave the house on rainy days?",
"Would you rather have a rap battle against <NAME> or Lizzo?",
"Would you rather save your best friend’s life if it meant five strangers would die or save five strangers if it meant sacrificing your best friend?",
"Would you rather give up coffee or soda forever?",
"Would you rather find a $100 bill floating in a public toilet or a $20 bill in your own pocket?",
"Would you rather wear nothing but neon orange or neon green for an entire year?",
"Would you rather eat the same thing for every meal for a year or be able to eat whatever you wanted, but only once every three days?",
"Would you rather get drunk off of one sip of alcohol or never get drunk no matter how much booze you imbibe?",
"Would you rather sell all of your possessions or sell one of your organs?",
"Would you rather clean a toilet with your toothbrush or a floor with your tongue?",
"Would you rather be asked the same question over and over again or never be spoken to ever again?",
"Would you rather be reincarnated as a fly or just stop existing when you die?",
"Would you rather be serenaded by <NAME> or <NAME>?",
"Would you rather be unable to close any door once it’s open or be unable to open any door once it’s closed?",
"Would you rather throw the best parties but have to clean up the mess by yourself or never go to a party again?",
"Would you rather have a tattoo of the title of the last book you read or the last TV show you watched?",
"Would you rather wear clothes that were always way too big or a couple sizes too small?",
"Would you rather give your parents or your boss access to your browser history?",
"Would you rather only be able to wash your hair twice a year or only be able to check your phone once a day?",
"Would you rather have a tennis lesson from <NAME> or a soccer lesson from Meghan Rapinoe?",
"Would you rather have a permanent unibrow or no eyebrows at all?",
"Would you rather have aliens be real and covered up by the government or have no extraterrestrial life at all in the universe?",
"Would you rather be caught liking your ex’s Instagram pics or your partner’s ex’s Instagram pics?",
"Would you rather never eat cookies ever again or only ever drink water?",
"Would you rather donate your organs to those who need them or donate your entire body to science?",
"Would you rather be criticized or be ignored?",
"Would you rather work alongside <NAME> or <NAME>?",
"Would you rather be punished for a crime you didn’t commit or have someone else take credit for one of your major accomplishments?",
"Would you rather eat an undercooked meal or a burnt meal?",
"Would you rather get a cooking lesson from <NAME> or <NAME>?",
"Would you rather have your boss or your parents look through your text messages?",
"Would you rather have your first child when you’re 18 or when you’re 50?",
"Would you rather star in a Star Wars or a Marvel film?",
"Would you rather wear heels to the gym or sneakers to a wedding?",
"Would you rather give up brushing your hair or give us brushing your teeth?",
"Would you rather master every musical instrument or every type of sport?",
"Would you rather always have wet socks or a small rock in your shoe?",
"Would you rather have <NAME> or Eminem perform the soundtrack to your life?",
"Would you rather be the class clown or the teacher’s pet?",
"Would you rather bathe in the dishwater or wash dishes in your bathwater?",
"Would you rather show up to a job interview with stained pants or pit stains?",
"Would you rather never age physically or never age mentally?",
"Would you rather date someone with bad breath or bad manners?",
"Would you rather never wear makeup ever again or wear a full face of the wrong shades every day?",
"Would you rather read the book or watch the movie?",
"Would you rather have a slumber party with <NAME> or go to a comedy show with <NAME>?",
"Would you rather eat chocolate on pizza or never eat chocolate ever again?",
"Would you rather have X-ray vision of people you find unattractive or everyone else have X-ray vision of you?",
"Would you rather have your own theme park or your own zoo?",
"Would you rather be the star player on a losing team or warm the bench on a championship roster?",
"Would you rather know when you’re going to die or how you’re going to die?",
"Would you rather lose all of your teeth or all of your hair?",
"Would you rather watch nothing but The Office or Friends for the rest of your life?",
"Would you rather lose your keys or your phone?",
"Would you rather live in a home with no electricity or in a home with no running water?",
"Would you rather be rich with no friends or poor and popular?",
"Would you rather look strong and be weak or look weak and be strong?",
"Would you rather have your style critiqued by <NAME> or Miranda Priestly?",
"Would you rather wear one or seven colors everyday?",
"Would you rather sneeze nonstop for 15 minutes once every day or sneeze once every three minutes of the day while you’re awake?",
"Would you rather walk barefoot in a public bathroom or through poison ivy?",
"Would you rather have the ability to see 10 years into your own future or six months into the future of the world?",
"Would you rather nobody remember who you are at your 20-year class reunion or have everybody comment on how old you look?",
"Would you rather shoot hoops with <NAME> or toss a football with <NAME>?",
"Would you rather live through an episode of Orange Is The New Black or Black Mirror?",
"Would you rather only be able to listen to Christmas songs all year round or only be able to watch nothing but horror movies?",
"Would you rather be a genius everyone thinks is an idiot or an idiot everyone thinks is a genius?",
"Would you rather win on Survivor or on The Bachelor or The Bachelorette?",
"Would you rather be beloved by the general public but your family and friends hate you, or be hated by the general public but your family and friends love you?",
"Would you rather be color blind or lose your sense of taste?",
"Would you rather live on a desert island with your celebrity crush or in a mansion with your ex?",
"Would you rather pass gas every time you meet someone new or burp every time you kiss someone?",
"Would you rather have tea with Queen Elizabeth or a beer with <NAME>?",
"Would you rather give up the Internet or showering for a month?",
"Would you rather get away with a terrible crime but live in fear of someone discovering it or go to prison for three years for a crime you didn’t commit?",
"Would you rather be forced to live the same day over and over again for a full year or take three years off the end of your life?"
] | would_you_rather/wyr.py | would_you_rather_array = [
"Would you rather have the ability to see 10 minutes into the future or 150 years into the "
"future? ",
"Would you rather have telekinesis (the ability to move things with your mind) or telepathy (the ability to read "
"minds)? ", "Would you rather team up with Wonder Woman or Captain Marvel?",
"Would you rather be forced to sing along or dance to every single song you hear?",
"Would you rather find true love today or win the lottery next year?",
"Would you rather be in jail for five years or be in a coma for a decade?",
"Would you rather have another 10 years with your partner or a one-night stand with your celebrity crush?",
"Would you rather be chronically under-dressed or overdressed?",
"Would you rather have everyone you know be able to read your thoughts or for everyone you "
"know to have access to your Internet history? ",
"Would you rather lose your sight or your memories?",
"Would you rather have universal respect or unlimited power?",
"Would you rather give up air conditioning and heating for the rest of your life or give up "
"the Internet for the rest of your life? ",
"Would you rather swim in a pool full of Nutella or a pool full of maple syrup?",
"Would you rather labor under a hot sun or extreme cold?",
"Would you rather stay in during a snow day or build a fort?",
"Would you rather buy 10 things you don’t need every time you go shopping or always forget "
"the one thing that you need when you go to the store? ",
"Would you rather never be able to go out during the day or never be able to go out at night?",
"Would you rather have a personal maid or a personal chef?",
"Would you rather have Beyoncé’s talent or Jay-Z‘s business acumen?",
"Would you rather be 11 feet tall or nine inches tall?",
"Would you rather be an extra in an Oscar-winning movie or the lead in a box office bomb?",
"Would you rather vomit on your hero or have your hero vomit on you?",
"Would you rather communicate only in emoji or never be able to text at all ever again?",
"Would you rather be royalty ,000 years ago or an average person today?",
"Would you rather lounge by the pool or on the beach?",
"Would you rather wear the same socks for a month or the same underwear for a week?",
"Would you rather work an overtime shift with your annoying boss or spend full day with your mother-in-law?",
"Would you rather cuddle a koala or pal around with a panda?",
"Would you rather have a sing-off with Ariana Grande or a dance-off with Rihanna?",
"Would you rather always have B.O. and not know it or always smell B.O. on everyone else?",
"Would you rather watch nothing but Hallmark Christmas movies or nothing but horror movies?",
"Would you rather always be 10 minutes late or always be 20 minutes early?",
"Would you rather spend a week in the forest or a night in a real haunted house?",
"Would you rather find a rat in your kitchen or a roach in your bed?",
"Would you rather have a pause or a rewind button in your life?",
"Would you rather always have a full phone battery or a full gas tank?",
"Would you rather lose all your teeth or lose a day of your life every time you kissed someone?",
"Would you rather drink from a toilet or pee in a litter box?",
"Would you rather be forced to live the same day over and over again for a full year, "
"or take 3 years off the end of your life? ",
"Would you rather never eat watermelon ever again or be forced to eat watermelon with every meal?",
"Would you rather get a paper cut every time you turn a page or bite your tongue every time you eat?",
"Would you rather oversleep every day for a week or not get any sleep at all for four days?",
"Would you rather die in 20 years with no regrets or live to 100 with a lot of regrets?",
"Would you rather sip gin with <NAME> or shoot tequila with Dwayne “<NAME>” Johnson?",
"Would you rather get trapped in the middle of a food fight or a water balloon fight?",
"Would you rather walk to work in heels or drive to work in reverse?",
"Would you rather spend a year at war or a year in prison?",
"Would you rather die before or after your partner?",
"Would you rather have a child every year for 20 years or never have any children at all?",
"Would you rather take amazing selfies but look terrible in all other photos or be photogenic everywhere but in your selfies?",
"Would you rather be gassy on a first date or your wedding night?",
"Would you rather <NAME> or <NAME> play you in a movie?",
"Would you rather be able to take back anything you say or hear any conversation that is about you?",
"Would you rather have skin that changes color based on your emotions or tattoos appear all over your body depicting what you did yesterday?",
"Would you rather hunt and butcher your own meat or never eat meat again?",
"Would you rather lose all of your friends but keep your BFF or lose your BFF but keep the rest of your buds?",
"Would you rather have people spread a terrible lie about you or have people spread terrible but true tales about you?",
"Would you rather walk in on your parents or have them walk in on you?",
"Would you rather be the absolute best at something that no one takes seriously or be average at something well respected?",
"Would you rather have unlimited battery life on all of your devices or have free WiFi wherever you go?",
"Would you rather have Billie Eilish‘s future or Madonna’s legacy?",
"Would you rather have a third nipple or an extra toe?",
"Would you rather solve world hunger or global warming?",
"Would you rather have to wear every shirt inside out or every pair of pants backward?",
"Would you rather live in a treehouse or in a cave?",
"Would you rather win $5000 or your best friend win $10,000?",
"Would you rather be in history books for something terrible or be forgotten completely after you die?",
"Would you rather travel the world for free for a year or have $2,000 to spend however you please?",
"Would you rather your to only be able to talk to your dog or for your dog to be able to talk to only you—and everyone thinks you’re nuts?",
"Would you rather have a mullet for a year or be bald (no wigs!) for six months?",
"Would you rather go back to the past and meet your loved ones who passed away or go to the future to meet your children or grandchildren to be?",
"Would you rather have <NAME>’s lips or with <NAME>‘s hair?",
"Would you rather stay the age you are physically forever or stay the way you are now financially forever?",
"Would you rather be in a zombie apocalypse or a robot apocalypse?",
"Would you rather be alone all your life or surrounded by really annoying people?",
"Would you rather give up your cellphone for a month or bathing for a month?",
"Would you rather spend a day cleaning your worst enemy’s house or have your crush spend the day cleaning your house?",
"Would you rather spend a year entirely alone or a year without a home?",
"Would you rather buy all used underwear or all used toothbrushes?",
"Would you rather have a photographic memory or an IQ of 200?",
"Would you rather go on a cruise with your boss or never go on vacation ever again?",
"Would you rather forget your partner’s birthday or your anniversary every year?",
"Would you rather have to wear stilettos to sleep or have to wear slippers everywhere you go?",
"Would you rather change the outcome of the last election or get to decide the outcome of the next election?",
"Would you rather lose the ability to read or lose the ability to speak?",
"Would you rather smooch <NAME>, <NAME>, <NAME> or <NAME>?",
"Would you rather be beautiful and stupid or unattractive but a genius?",
"Would you rather have seven fingers on each hand or seven toes on each foot?",
"Would you rather work the job you have now for a year at double your current rate of pay or have one year off with what you are making now?",
"Would you rather be always stuck in traffic but find a perfect parking spot or never hit traffic but always take forever to park?",
"Would you rather have super-sensitive taste buds or super-sensitive hearing?",
"Would you rather ask your ex or a total stranger for a favor?",
"Would you rather go on tour with <NAME> or Cher?",
"Would you rather eat only pizza for a year or not eat any pizza for five years?",
"Would you rather never get another present in your life but always pick the perfect present for everyone else or keep getting presents but giving terrible ones to everyone else?",
"Would you rather sleep in a doghouse or let stray dogs sleep in your bed?",
"Would you rather be able to speak any language or be able to communicate with animals?",
"Would you rather have all of your messages and photos leak publicly or never use a cellphone ever again?",
"Would you rather run at 100 mph or fly at 20 mph?",
"Would you rather have Adele‘s voice or Normani’s dance moves?",
"Would you rather have to wear sweatpants everywhere for the rest of your life or never wear sweatpants again?",
"Would you rather have 7,000 spoons when all you need is a knife or always have a knife but never be able to use spoons?",
"Would you rather detect every lie you hear or get away with every lie you tell?",
"Would you rather be the funniest person in a room or the smartest person in a room?",
"Would you rather talk like Yoda or breathe like <NAME>?",
"Would you rather people knew all the details of your finances or all the details of your love life?",
"Would you rather listen to your least-favorite song on a loop for a year or never listen to any music at all for a year?",
"Would you rather go vegan for a month or only eat meat and dairy for a month?",
"Would you rather clean up someone else’s vomit or someone else’s blood?",
"Would you rather work for <NAME> or <NAME>?",
"Would you rather spend the weekend with pirates or ninjas?",
"Would you rather end every phone call with “I love you” or accidentally call your partner the wrong name during a fight?",
"Would you rather get your paycheck given to you in pennies or never be able to use cash again?",
"Would you rather see <NAME> in a movie or see <NAME> in concert?",
"Would you rather win the lottery but have to spend it all in one day or triple your current salary forever?",
"Would you rather live until you are 200 and look your age or look like you’re 22 your whole life, but die at age 65?",
"Would you rather give up cursing forever or give up ice cream for 12 years?",
"Would you rather hear a comforting lie or an uncomfortable truth?",
"Would you rather be locked for a week in a room that’s overly bright or a room that’s totally dark?",
"Would you rather someone see all the photos in your phone or read all your text messages?",
"Would you rather have a South Park-themed wedding or a Family Guy-themed funeral?",
"Would you rather have to hunt and gather all of your food or eat McDonald’s for every meal?",
"Would you rather have fortune or fame?",
"Would you rather celebrate the Fourth of July with <NAME> or Christmas with <NAME>?",
"Would you rather only be able to listen to one song for the rest of your life or only be able to watch one movie for the rest of your life?",
"Would you rather never use social media again or never watch another movie ever again?",
"Would you rather have police hunting you down for a crime you didn’t commit or a serial killer actually hunting you?",
"Would you rather live a peaceful life in a small cabin in the woods or a drama-filled life in a mansion in a big city?",
"Would you rather find your soulmate or your calling?",
"Would you rather drink sour milk or brush your teeth with soap?",
"Would you rather steal Duchess Meghan or Duchess Kate’s style?",
"Would you rather never get a cold ever again or never be stuck in traffic ever again?",
"Would you rather be tall and average looking or three feet tall but beautiful?",
"Would you rather visit the International Space Station for a week or spend a week in a hotel at the bottom of the ocean?",
"Would you rather confess to cheating on your partner or catch your partner cheating on you?",
"Would you rather have all traffic lights you approach be green or never have to stand in line again?",
"Would you rather share an onscreen kiss with <NAME> or <NAME>?",
"Would you rather never eat Christmas cookies ever again or never eat Halloween candy ever again?",
"Would you rather lose your long-term memory or your short-term memory?",
"Would you rather be stranded in the jungle or in the desert?",
"Would you rather everyone you love forget your birthday or everyone you love sing “Happy Birthday” to you for 24 hours straight?",
"Would you rather be invisible or be able to fly?",
"Would you rather spend every weekend indoors or spend every weekend outdoors?",
"Would you rather party with <NAME> and <NAME> or with <NAME> and <NAME>?",
"Would you rather give up wine for a year or drink nothing but wine for a year?",
"Would you rather start a colony on another planet or be the leader of a country on Earth?",
"Would you rather live in a house haunted by friendly ghosts or be a ghost reliving your average day after you die?",
"Would you rather have one wish granted today or 10 wishes granted 20 years from now?",
"Would you rather get hit on by someone 20 years older than you or someone 20 years younger than you?",
"Would you rather fall down in public or pass gas in public?",
"Would you rather only eat raw food or only eat TV dinners?",
"Would you rather run as fast as The Flash or be as strong as Superman?",
"Would you rather never have a wedgie or never have anything stuck in your teeth ever again?",
"Would you rather marry the most attractive person you’ve ever met or the best cook you’ve ever met?",
"Would you rather sing karaoke with <NAME> or with <NAME>?",
"Would you rather go back to kindergarten with everything you know now or know now everything your future self will learn?",
"Would you rather be able to read minds or predict the future?",
"Would you rather take a pill a day for nutrients and to feel full, but never eat anything again or eat whatever you want but never really feel full?",
"Would you rather be an unknown superhero or an infamous villain?",
"Would you rather always have an annoying song stuck in your head or always have an itch that you can’t reach?",
"Would you rather never be able to keep anyone else’s secrets or have someone tell all of your secrets?",
"Would you rather be Batman or Iron Man?",
"Would you rather be married to someone stunning who doesn’t think you’re attractive or be married to someone ugly who thinks you’re gorgeous?",
"Would you rather have a third ear or a third eye?",
"Would you rather have $1 million now or $1,000 a week for the rest of your life?",
"Would you rather binge-watch Sex And the City or Girls?",
"Would you rather be rich working a job you hate or poor working a job you love?",
"Would you rather wear real fur or fake jewels?",
"Would you rather work a high-paying job that you hate or your dream job with only just enough money for rent, food and utilities?",
"Would you rather wake up naked in a forest five miles from home or in your underwear at work?",
"Would you rather go backstage with your favorite band or be an extra on your favorite TV show?",
"Would you rather never eat your favorite food for the rest of your life or only eat your favorite food?",
"Would you rather be able to erase your own memories or be able to erase someone else’s memories?",
"Would you rather be so afraid of heights that you can’t go to the second floor of a building or be so afraid of the sun that you can only leave the house on rainy days?",
"Would you rather have a rap battle against <NAME> or Lizzo?",
"Would you rather save your best friend’s life if it meant five strangers would die or save five strangers if it meant sacrificing your best friend?",
"Would you rather give up coffee or soda forever?",
"Would you rather find a $100 bill floating in a public toilet or a $20 bill in your own pocket?",
"Would you rather wear nothing but neon orange or neon green for an entire year?",
"Would you rather eat the same thing for every meal for a year or be able to eat whatever you wanted, but only once every three days?",
"Would you rather get drunk off of one sip of alcohol or never get drunk no matter how much booze you imbibe?",
"Would you rather sell all of your possessions or sell one of your organs?",
"Would you rather clean a toilet with your toothbrush or a floor with your tongue?",
"Would you rather be asked the same question over and over again or never be spoken to ever again?",
"Would you rather be reincarnated as a fly or just stop existing when you die?",
"Would you rather be serenaded by <NAME> or <NAME>?",
"Would you rather be unable to close any door once it’s open or be unable to open any door once it’s closed?",
"Would you rather throw the best parties but have to clean up the mess by yourself or never go to a party again?",
"Would you rather have a tattoo of the title of the last book you read or the last TV show you watched?",
"Would you rather wear clothes that were always way too big or a couple sizes too small?",
"Would you rather give your parents or your boss access to your browser history?",
"Would you rather only be able to wash your hair twice a year or only be able to check your phone once a day?",
"Would you rather have a tennis lesson from <NAME> or a soccer lesson from Meghan Rapinoe?",
"Would you rather have a permanent unibrow or no eyebrows at all?",
"Would you rather have aliens be real and covered up by the government or have no extraterrestrial life at all in the universe?",
"Would you rather be caught liking your ex’s Instagram pics or your partner’s ex’s Instagram pics?",
"Would you rather never eat cookies ever again or only ever drink water?",
"Would you rather donate your organs to those who need them or donate your entire body to science?",
"Would you rather be criticized or be ignored?",
"Would you rather work alongside <NAME> or <NAME>?",
"Would you rather be punished for a crime you didn’t commit or have someone else take credit for one of your major accomplishments?",
"Would you rather eat an undercooked meal or a burnt meal?",
"Would you rather get a cooking lesson from <NAME> or <NAME>?",
"Would you rather have your boss or your parents look through your text messages?",
"Would you rather have your first child when you’re 18 or when you’re 50?",
"Would you rather star in a Star Wars or a Marvel film?",
"Would you rather wear heels to the gym or sneakers to a wedding?",
"Would you rather give up brushing your hair or give us brushing your teeth?",
"Would you rather master every musical instrument or every type of sport?",
"Would you rather always have wet socks or a small rock in your shoe?",
"Would you rather have <NAME> or Eminem perform the soundtrack to your life?",
"Would you rather be the class clown or the teacher’s pet?",
"Would you rather bathe in the dishwater or wash dishes in your bathwater?",
"Would you rather show up to a job interview with stained pants or pit stains?",
"Would you rather never age physically or never age mentally?",
"Would you rather date someone with bad breath or bad manners?",
"Would you rather never wear makeup ever again or wear a full face of the wrong shades every day?",
"Would you rather read the book or watch the movie?",
"Would you rather have a slumber party with <NAME> or go to a comedy show with <NAME>?",
"Would you rather eat chocolate on pizza or never eat chocolate ever again?",
"Would you rather have X-ray vision of people you find unattractive or everyone else have X-ray vision of you?",
"Would you rather have your own theme park or your own zoo?",
"Would you rather be the star player on a losing team or warm the bench on a championship roster?",
"Would you rather know when you’re going to die or how you’re going to die?",
"Would you rather lose all of your teeth or all of your hair?",
"Would you rather watch nothing but The Office or Friends for the rest of your life?",
"Would you rather lose your keys or your phone?",
"Would you rather live in a home with no electricity or in a home with no running water?",
"Would you rather be rich with no friends or poor and popular?",
"Would you rather look strong and be weak or look weak and be strong?",
"Would you rather have your style critiqued by <NAME> or Miranda Priestly?",
"Would you rather wear one or seven colors everyday?",
"Would you rather sneeze nonstop for 15 minutes once every day or sneeze once every three minutes of the day while you’re awake?",
"Would you rather walk barefoot in a public bathroom or through poison ivy?",
"Would you rather have the ability to see 10 years into your own future or six months into the future of the world?",
"Would you rather nobody remember who you are at your 20-year class reunion or have everybody comment on how old you look?",
"Would you rather shoot hoops with <NAME> or toss a football with <NAME>?",
"Would you rather live through an episode of Orange Is The New Black or Black Mirror?",
"Would you rather only be able to listen to Christmas songs all year round or only be able to watch nothing but horror movies?",
"Would you rather be a genius everyone thinks is an idiot or an idiot everyone thinks is a genius?",
"Would you rather win on Survivor or on The Bachelor or The Bachelorette?",
"Would you rather be beloved by the general public but your family and friends hate you, or be hated by the general public but your family and friends love you?",
"Would you rather be color blind or lose your sense of taste?",
"Would you rather live on a desert island with your celebrity crush or in a mansion with your ex?",
"Would you rather pass gas every time you meet someone new or burp every time you kiss someone?",
"Would you rather have tea with Queen Elizabeth or a beer with <NAME>?",
"Would you rather give up the Internet or showering for a month?",
"Would you rather get away with a terrible crime but live in fear of someone discovering it or go to prison for three years for a crime you didn’t commit?",
"Would you rather be forced to live the same day over and over again for a full year or take three years off the end of your life?"
] | 0.220091 | 0.364834 |
r"""
Implements a buffer with insertion points. When you know you need to
"get back" to a place and write more later, simply call insertion_point()
at that spot and get a new StringIOTree object that is "left behind".
EXAMPLE:
>>> a = StringIOTree()
>>> _= a.write('first\n')
>>> b = a.insertion_point()
>>> _= a.write('third\n')
>>> _= b.write('second\n')
>>> a.getvalue().split()
['first', 'second', 'third']
>>> c = b.insertion_point()
>>> d = c.insertion_point()
>>> _= d.write('alpha\n')
>>> _= b.write('gamma\n')
>>> _= c.write('beta\n')
>>> b.getvalue().split()
['second', 'alpha', 'beta', 'gamma']
>>> try: from cStringIO import StringIO
... except ImportError: from io import StringIO
>>> i = StringIOTree()
>>> d.insert(i)
>>> _= i.write('inserted\n')
>>> out = StringIO()
>>> a.copyto(out)
>>> out.getvalue().split()
['first', 'second', 'alpha', 'inserted', 'beta', 'gamma', 'third']
"""
from __future__ import absolute_import #, unicode_literals
try:
# Prefer cStringIO since io.StringIO() does not support writing 'str' in Py2.
from cStringIO import StringIO
except ImportError:
from io import StringIO
class StringIOTree(object):
"""
See module docs.
"""
def __init__(self, stream=None):
self.prepended_children = []
if stream is None:
stream = StringIO()
self.stream = stream
self.write = stream.write
self.markers = []
def getvalue(self):
content = []
self._collect_in(content)
return "".join(content)
def _collect_in(self, target_list):
for x in self.prepended_children:
x._collect_in(target_list)
stream_content = self.stream.getvalue()
if stream_content:
target_list.append(stream_content)
def copyto(self, target):
"""Potentially cheaper than getvalue as no string concatenation
needs to happen."""
for child in self.prepended_children:
child.copyto(target)
stream_content = self.stream.getvalue()
if stream_content:
target.write(stream_content)
def commit(self):
# Save what we have written until now so that the buffer
# itself is empty -- this makes it ready for insertion
if self.stream.tell():
self.prepended_children.append(StringIOTree(self.stream))
self.prepended_children[-1].markers = self.markers
self.markers = []
self.stream = StringIO()
self.write = self.stream.write
def insert(self, iotree):
"""
Insert a StringIOTree (and all of its contents) at this location.
Further writing to self appears after what is inserted.
"""
self.commit()
self.prepended_children.append(iotree)
def insertion_point(self):
"""
Returns a new StringIOTree, which is left behind at the current position
(it what is written to the result will appear right before whatever is
next written to self).
Calling getvalue() or copyto() on the result will only return the
contents written to it.
"""
# Save what we have written until now
# This is so that getvalue on the result doesn't include it.
self.commit()
# Construct the new forked object to return
other = StringIOTree()
self.prepended_children.append(other)
return other
def allmarkers(self):
children = self.prepended_children
return [m for c in children for m in c.allmarkers()] + self.markers
"""
# Print the result of allmarkers in a nice human-readable form. Use it only for debugging.
# Prints e.g.
# /path/to/source.pyx:
# cython line 2 maps to 3299-3343
# cython line 4 maps to 2236-2245 2306 3188-3201
# /path/to/othersource.pyx:
# cython line 3 maps to 1234-1270
# ...
# Note: In the example above, 3343 maps to line 2, 3344 does not.
def print_hr_allmarkers(self):
from collections import defaultdict
markers = self.allmarkers()
totmap = defaultdict(lambda: defaultdict(list))
for c_lineno, (cython_desc, cython_lineno) in enumerate(markers):
if cython_lineno > 0 and cython_desc.filename is not None:
totmap[cython_desc.filename][cython_lineno].append(c_lineno + 1)
reprstr = ""
if totmap == 0:
reprstr += "allmarkers is empty\n"
try:
sorted(totmap.items())
except:
print(totmap)
print(totmap.items())
for cython_path, filemap in sorted(totmap.items()):
reprstr += cython_path + ":\n"
for cython_lineno, c_linenos in sorted(filemap.items()):
reprstr += "\tcython line " + str(cython_lineno) + " maps to "
i = 0
while i < len(c_linenos):
reprstr += str(c_linenos[i])
flag = False
while i+1 < len(c_linenos) and c_linenos[i+1] == c_linenos[i]+1:
i += 1
flag = True
if flag:
reprstr += "-" + str(c_linenos[i]) + " "
i += 1
reprstr += "\n"
import sys
sys.stdout.write(reprstr)
""" | Cython/StringIOTree.py |
r"""
Implements a buffer with insertion points. When you know you need to
"get back" to a place and write more later, simply call insertion_point()
at that spot and get a new StringIOTree object that is "left behind".
EXAMPLE:
>>> a = StringIOTree()
>>> _= a.write('first\n')
>>> b = a.insertion_point()
>>> _= a.write('third\n')
>>> _= b.write('second\n')
>>> a.getvalue().split()
['first', 'second', 'third']
>>> c = b.insertion_point()
>>> d = c.insertion_point()
>>> _= d.write('alpha\n')
>>> _= b.write('gamma\n')
>>> _= c.write('beta\n')
>>> b.getvalue().split()
['second', 'alpha', 'beta', 'gamma']
>>> try: from cStringIO import StringIO
... except ImportError: from io import StringIO
>>> i = StringIOTree()
>>> d.insert(i)
>>> _= i.write('inserted\n')
>>> out = StringIO()
>>> a.copyto(out)
>>> out.getvalue().split()
['first', 'second', 'alpha', 'inserted', 'beta', 'gamma', 'third']
"""
from __future__ import absolute_import #, unicode_literals
try:
# Prefer cStringIO since io.StringIO() does not support writing 'str' in Py2.
from cStringIO import StringIO
except ImportError:
from io import StringIO
class StringIOTree(object):
"""
See module docs.
"""
def __init__(self, stream=None):
self.prepended_children = []
if stream is None:
stream = StringIO()
self.stream = stream
self.write = stream.write
self.markers = []
def getvalue(self):
content = []
self._collect_in(content)
return "".join(content)
def _collect_in(self, target_list):
for x in self.prepended_children:
x._collect_in(target_list)
stream_content = self.stream.getvalue()
if stream_content:
target_list.append(stream_content)
def copyto(self, target):
"""Potentially cheaper than getvalue as no string concatenation
needs to happen."""
for child in self.prepended_children:
child.copyto(target)
stream_content = self.stream.getvalue()
if stream_content:
target.write(stream_content)
def commit(self):
# Save what we have written until now so that the buffer
# itself is empty -- this makes it ready for insertion
if self.stream.tell():
self.prepended_children.append(StringIOTree(self.stream))
self.prepended_children[-1].markers = self.markers
self.markers = []
self.stream = StringIO()
self.write = self.stream.write
def insert(self, iotree):
"""
Insert a StringIOTree (and all of its contents) at this location.
Further writing to self appears after what is inserted.
"""
self.commit()
self.prepended_children.append(iotree)
def insertion_point(self):
"""
Returns a new StringIOTree, which is left behind at the current position
(it what is written to the result will appear right before whatever is
next written to self).
Calling getvalue() or copyto() on the result will only return the
contents written to it.
"""
# Save what we have written until now
# This is so that getvalue on the result doesn't include it.
self.commit()
# Construct the new forked object to return
other = StringIOTree()
self.prepended_children.append(other)
return other
def allmarkers(self):
children = self.prepended_children
return [m for c in children for m in c.allmarkers()] + self.markers
"""
# Print the result of allmarkers in a nice human-readable form. Use it only for debugging.
# Prints e.g.
# /path/to/source.pyx:
# cython line 2 maps to 3299-3343
# cython line 4 maps to 2236-2245 2306 3188-3201
# /path/to/othersource.pyx:
# cython line 3 maps to 1234-1270
# ...
# Note: In the example above, 3343 maps to line 2, 3344 does not.
def print_hr_allmarkers(self):
from collections import defaultdict
markers = self.allmarkers()
totmap = defaultdict(lambda: defaultdict(list))
for c_lineno, (cython_desc, cython_lineno) in enumerate(markers):
if cython_lineno > 0 and cython_desc.filename is not None:
totmap[cython_desc.filename][cython_lineno].append(c_lineno + 1)
reprstr = ""
if totmap == 0:
reprstr += "allmarkers is empty\n"
try:
sorted(totmap.items())
except:
print(totmap)
print(totmap.items())
for cython_path, filemap in sorted(totmap.items()):
reprstr += cython_path + ":\n"
for cython_lineno, c_linenos in sorted(filemap.items()):
reprstr += "\tcython line " + str(cython_lineno) + " maps to "
i = 0
while i < len(c_linenos):
reprstr += str(c_linenos[i])
flag = False
while i+1 < len(c_linenos) and c_linenos[i+1] == c_linenos[i]+1:
i += 1
flag = True
if flag:
reprstr += "-" + str(c_linenos[i]) + " "
i += 1
reprstr += "\n"
import sys
sys.stdout.write(reprstr)
""" | 0.755005 | 0.261667 |
import re
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import ReservedKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.aggregator import Aggregator
from nvflare.app_common.app_constant import AppConstants
class InTimeAccumulateWeightedAggregator(Aggregator):
def __init__(self, exclude_vars=None, aggregation_weights=None, expected_data_kind=DataKind.WEIGHT_DIFF):
"""Perform accumulated weighted aggregation
It computes
weighted_sum = sum(shareable*n_iteration*aggregation_weights) and
sum_of_weights = sum(n_iteration)
in accept function
The aggregate function returns
weighted_sum / sum_of_weights
Args:
exclude_vars ([type], optional): regex to match excluded vars during aggregation. Defaults to None.
aggregation_weights ([type], optional): dictionary to map client name to its aggregation weights. Defaults to None.
"""
super().__init__()
self.exclude_vars = re.compile(exclude_vars) if exclude_vars else None
self.aggregation_weights = aggregation_weights or {}
if expected_data_kind not in [DataKind.WEIGHT_DIFF, DataKind.WEIGHTS]:
raise ValueError(f"{expected_data_kind=} not in WEIGHT_DIFF or WEIGHTS")
self.expected_data_kind = expected_data_kind
self.logger.debug(f"aggregation weights control: {aggregation_weights}")
self.reset_stats()
self.warning_count = {}
self.warning_limit = 10
self.total = dict()
self.counts = dict()
self.history = list()
def reset_stats(self):
self.total = {}
self.counts = {}
self.history = []
def accept(self, shareable: Shareable, fl_ctx: FLContext) -> bool:
"""Store shareable and update aggregator's internal state
Args:
shareable: information from client
fl_ctx: context provided by workflow
Returns:
The first boolean indicates if this shareable is accepted.
The second bollean indicates if aggregate can be called.
"""
try:
dxo = from_shareable(shareable)
except:
self.log_exception(fl_ctx, "shareable data is not a valid DXO")
return False
assert isinstance(dxo, DXO)
if dxo.data_kind not in (DataKind.WEIGHT_DIFF, DataKind.WEIGHTS):
self.log_error(fl_ctx, "cannot handle data kind {}".format(dxo.data_kind))
return False
if dxo.data_kind != self.expected_data_kind:
self.log_error(fl_ctx, "expect {} but got {}".format(self.expected_data_kind, dxo.data_kind))
return False
if (processed_algorithm := dxo.get_meta_prop(MetaKey.PROCESSED_ALGORITHM)) is not None:
self.log_error(fl_ctx, f"unable to accept shareable processed by {processed_algorithm}")
return False
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
self.log_debug(fl_ctx, f"current_round: {current_round}")
client_name = shareable.get_peer_prop(key=ReservedKey.IDENTITY_NAME, default="?")
contribution_round = shareable.get_header(AppConstants.CONTRIBUTION_ROUND)
rc = shareable.get_return_code()
if rc and rc != ReturnCode.OK:
self.log_info(fl_ctx, f"Client {client_name} returned rc: {rc}. Disregarding contribution.")
return False
data = dxo.data
if data is None:
self.log_error(fl_ctx, "no data to aggregate")
return False
n_iter = dxo.get_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND)
if contribution_round != current_round:
self.log_info(
fl_ctx,
f"discarding shareable from {client_name} at round: {contribution_round}. Current round is: {current_round}",
)
return False
for item in self.history:
if client_name == item["client_name"]:
prev_round = item["round"]
self.log_info(
fl_ctx,
f"discarding shareable from {client_name} at round: {contribution_round} as {prev_round} accepted already",
)
return False
if n_iter is None:
if self.warning_count.get(client_name, 0) <= self.warning_limit:
self.log_warning(
fl_ctx,
f"NUM_STEPS_CURRENT_ROUND missing in meta of shareable"
f" from {client_name} and set to default value, 1.0. "
f" This kind of message will show {self.warning_limit} times at most.",
)
if client_name in self.warning_count:
self.warning_count[client_name] = self.warning_count[client_name] + 1
else:
self.warning_count[client_name] = 0
n_iter = 1.0
float_n_iter = float(n_iter)
aggregation_weight = self.aggregation_weights.get(client_name)
if aggregation_weight is None:
if self.warning_count.get(client_name, 0) <= self.warning_limit:
self.log_warning(
fl_ctx,
f"Aggregation_weight missing for {client_name} and set to default value, 1.0"
f" This kind of message will show {self.warning_limit} times at most.",
)
if client_name in self.warning_count:
self.warning_count[client_name] = self.warning_count[client_name] + 1
else:
self.warning_count[client_name] = 0
aggregation_weight = 1.0
for k, v in data.items():
if self.exclude_vars is not None and self.exclude_vars.search(k):
continue
weighted_value = v * aggregation_weight * float_n_iter
current_total = self.total.get(k, None)
if current_total is None:
self.total[k] = weighted_value
self.counts[k] = n_iter
else:
self.total[k] = current_total + weighted_value
self.counts[k] = self.counts[k] + n_iter
self.history.append(
{
"client_name": client_name,
"round": contribution_round,
"aggregation_weight": aggregation_weight,
"n_iter": n_iter,
}
)
self.log_debug(fl_ctx, "End accept")
return True
def aggregate(self, fl_ctx: FLContext) -> Shareable:
"""Called when workflow determines to generate shareable to send back to clients
Args:
fl_ctx (FLContext): context provided by workflow
Returns:
Shareable: the weighted mean of accepted shareables from clients
"""
self.log_debug(fl_ctx, "Start aggregation")
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
self.log_info(fl_ctx, f"aggregating {len(self.history)} update(s) at round {current_round}")
self.log_debug(fl_ctx, f"complete history {self.history}")
aggregated_dict = {k: v / self.counts[k] for k, v in self.total.items()}
self.reset_stats()
self.log_debug(fl_ctx, "End aggregation")
dxo = DXO(data_kind=self.expected_data_kind, data=aggregated_dict)
return dxo.to_shareable() | nvflare/app_common/aggregators/intime_accumulate_model_aggregator.py |
import re
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import ReservedKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.abstract.aggregator import Aggregator
from nvflare.app_common.app_constant import AppConstants
class InTimeAccumulateWeightedAggregator(Aggregator):
def __init__(self, exclude_vars=None, aggregation_weights=None, expected_data_kind=DataKind.WEIGHT_DIFF):
"""Perform accumulated weighted aggregation
It computes
weighted_sum = sum(shareable*n_iteration*aggregation_weights) and
sum_of_weights = sum(n_iteration)
in accept function
The aggregate function returns
weighted_sum / sum_of_weights
Args:
exclude_vars ([type], optional): regex to match excluded vars during aggregation. Defaults to None.
aggregation_weights ([type], optional): dictionary to map client name to its aggregation weights. Defaults to None.
"""
super().__init__()
self.exclude_vars = re.compile(exclude_vars) if exclude_vars else None
self.aggregation_weights = aggregation_weights or {}
if expected_data_kind not in [DataKind.WEIGHT_DIFF, DataKind.WEIGHTS]:
raise ValueError(f"{expected_data_kind=} not in WEIGHT_DIFF or WEIGHTS")
self.expected_data_kind = expected_data_kind
self.logger.debug(f"aggregation weights control: {aggregation_weights}")
self.reset_stats()
self.warning_count = {}
self.warning_limit = 10
self.total = dict()
self.counts = dict()
self.history = list()
def reset_stats(self):
self.total = {}
self.counts = {}
self.history = []
def accept(self, shareable: Shareable, fl_ctx: FLContext) -> bool:
"""Store shareable and update aggregator's internal state
Args:
shareable: information from client
fl_ctx: context provided by workflow
Returns:
The first boolean indicates if this shareable is accepted.
The second bollean indicates if aggregate can be called.
"""
try:
dxo = from_shareable(shareable)
except:
self.log_exception(fl_ctx, "shareable data is not a valid DXO")
return False
assert isinstance(dxo, DXO)
if dxo.data_kind not in (DataKind.WEIGHT_DIFF, DataKind.WEIGHTS):
self.log_error(fl_ctx, "cannot handle data kind {}".format(dxo.data_kind))
return False
if dxo.data_kind != self.expected_data_kind:
self.log_error(fl_ctx, "expect {} but got {}".format(self.expected_data_kind, dxo.data_kind))
return False
if (processed_algorithm := dxo.get_meta_prop(MetaKey.PROCESSED_ALGORITHM)) is not None:
self.log_error(fl_ctx, f"unable to accept shareable processed by {processed_algorithm}")
return False
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
self.log_debug(fl_ctx, f"current_round: {current_round}")
client_name = shareable.get_peer_prop(key=ReservedKey.IDENTITY_NAME, default="?")
contribution_round = shareable.get_header(AppConstants.CONTRIBUTION_ROUND)
rc = shareable.get_return_code()
if rc and rc != ReturnCode.OK:
self.log_info(fl_ctx, f"Client {client_name} returned rc: {rc}. Disregarding contribution.")
return False
data = dxo.data
if data is None:
self.log_error(fl_ctx, "no data to aggregate")
return False
n_iter = dxo.get_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND)
if contribution_round != current_round:
self.log_info(
fl_ctx,
f"discarding shareable from {client_name} at round: {contribution_round}. Current round is: {current_round}",
)
return False
for item in self.history:
if client_name == item["client_name"]:
prev_round = item["round"]
self.log_info(
fl_ctx,
f"discarding shareable from {client_name} at round: {contribution_round} as {prev_round} accepted already",
)
return False
if n_iter is None:
if self.warning_count.get(client_name, 0) <= self.warning_limit:
self.log_warning(
fl_ctx,
f"NUM_STEPS_CURRENT_ROUND missing in meta of shareable"
f" from {client_name} and set to default value, 1.0. "
f" This kind of message will show {self.warning_limit} times at most.",
)
if client_name in self.warning_count:
self.warning_count[client_name] = self.warning_count[client_name] + 1
else:
self.warning_count[client_name] = 0
n_iter = 1.0
float_n_iter = float(n_iter)
aggregation_weight = self.aggregation_weights.get(client_name)
if aggregation_weight is None:
if self.warning_count.get(client_name, 0) <= self.warning_limit:
self.log_warning(
fl_ctx,
f"Aggregation_weight missing for {client_name} and set to default value, 1.0"
f" This kind of message will show {self.warning_limit} times at most.",
)
if client_name in self.warning_count:
self.warning_count[client_name] = self.warning_count[client_name] + 1
else:
self.warning_count[client_name] = 0
aggregation_weight = 1.0
for k, v in data.items():
if self.exclude_vars is not None and self.exclude_vars.search(k):
continue
weighted_value = v * aggregation_weight * float_n_iter
current_total = self.total.get(k, None)
if current_total is None:
self.total[k] = weighted_value
self.counts[k] = n_iter
else:
self.total[k] = current_total + weighted_value
self.counts[k] = self.counts[k] + n_iter
self.history.append(
{
"client_name": client_name,
"round": contribution_round,
"aggregation_weight": aggregation_weight,
"n_iter": n_iter,
}
)
self.log_debug(fl_ctx, "End accept")
return True
def aggregate(self, fl_ctx: FLContext) -> Shareable:
"""Called when workflow determines to generate shareable to send back to clients
Args:
fl_ctx (FLContext): context provided by workflow
Returns:
Shareable: the weighted mean of accepted shareables from clients
"""
self.log_debug(fl_ctx, "Start aggregation")
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
self.log_info(fl_ctx, f"aggregating {len(self.history)} update(s) at round {current_round}")
self.log_debug(fl_ctx, f"complete history {self.history}")
aggregated_dict = {k: v / self.counts[k] for k, v in self.total.items()}
self.reset_stats()
self.log_debug(fl_ctx, "End aggregation")
dxo = DXO(data_kind=self.expected_data_kind, data=aggregated_dict)
return dxo.to_shareable() | 0.810141 | 0.199113 |
import json
import re
from typing import Callable, Iterator, List, Optional, Union
import scrapy
from scrapy.http import Request, Response
from scrapy.linkextractors import IGNORED_EXTENSIONS
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
from scrapy.spidermiddlewares.httperror import HttpError
from scrapy.utils.url import url_has_any_extension
from twisted.python.failure import Failure
EXCLUDED_URLS = [
# Google calendar returns 404s on HEAD requests unconditionally
'https://calendar.google.com/calendar/embed?src=<EMAIL>',
# Returns 409 errors to HEAD requests frequently
'https://medium.freecodecamp.org/',
# Returns 404 to HEAD requests unconditionally
'https://www.git-tower.com/blog/command-line-cheat-sheet/',
'https://marketplace.visualstudio.com/items?itemName=rafaelmaiolla.remote-vscode',
# Requires authentication
'https://circleci.com/gh/zulip/zulip/tree/master',
'https://circleci.com/gh/zulip/zulip/16617',
'https://www.linkedin.com/company/zulip-project',
# Returns 403 errors to HEAD requests
'https://giphy.com',
'https://giphy.com/apps/giphycapture',
'https://www.udemy.com/course/the-complete-react-native-and-redux-course/',
]
VNU_IGNORE = [
# Real errors that should be fixed.
r'Duplicate ID “[^”]*”\.',
r'The first occurrence of ID “[^”]*” was here\.',
r'Attribute “markdown” not allowed on element “div” at this point\.',
r'No “p” element in scope but a “p” end tag seen\.',
r'Element “div” not allowed as child of element “ul” in this context\. '
+ r'\(Suppressing further errors from this subtree\.\)',
# Warnings that are probably less important.
r'The “type” attribute is unnecessary for JavaScript resources\.',
]
VNU_IGNORE_REGEX = re.compile(r'|'.join(VNU_IGNORE))
class BaseDocumentationSpider(scrapy.Spider):
name: Optional[str] = None
# Exclude domain address.
deny_domains: List[str] = []
start_urls: List[str] = []
deny: List[str] = []
file_extensions: List[str] = ['.' + ext for ext in IGNORED_EXTENSIONS]
tags = ('a', 'area', 'img')
attrs = ('href', 'src')
def _has_extension(self, url: str) -> bool:
return url_has_any_extension(url, self.file_extensions)
def _is_external_url(self, url: str) -> bool:
return url.startswith('http') or self._has_extension(url)
def check_existing(self, response: Response) -> None:
self.log(response)
def _is_external_link(self, url: str) -> bool:
if "zulip.readthedocs" in url or "zulip.com" in url or "zulip.org" in url:
# We want CI to check any links to Zulip sites.
return False
if (len(url) > 4 and url[:4] == "file") or ("localhost" in url):
# We also want CI to check any links to built documentation.
return False
if 'github.com/zulip' in url:
# We want to check these links but due to rate limiting from GitHub, these checks often
# fail in the CI. Thus, we should treat these as external links for now.
# TODO: Figure out how to test github.com/zulip links in CI.
return True
return True
def check_fragment(self, response: Response) -> None:
self.log(response)
xpath_template = "//*[@id='{fragment}' or @name='{fragment}']"
m = re.match(r".+\#(?P<fragment>.*)$", response.request.url) # Get fragment value.
if not m:
return
fragment = m.group('fragment')
# Check fragment existing on response page.
if not response.selector.xpath(xpath_template.format(fragment=fragment)):
self.logger.error(
"Fragment #%s is not found on page %s", fragment, response.request.url)
def _vnu_callback(self, url: str) -> Callable[[Response], None]:
def callback(response: Response) -> None:
vnu_out = json.loads(response.text)
for message in vnu_out['messages']:
if not VNU_IGNORE_REGEX.fullmatch(message['message']):
self.logger.error(
'"%s":%d.%d-%d.%d: %s: %s',
url,
message.get('firstLine', message['lastLine']),
message.get('firstColumn', message['lastColumn']),
message['lastLine'],
message['lastColumn'],
message['type'],
message['message'],
)
return callback
def _make_requests(self, url: str) -> Iterator[Request]:
# These URLs are for Zulip's webapp, which with recent changes
# can be accessible without login an account. While we do
# crawl documentation served by the webapp (E.g. /help/), we
# don't want to crawl the webapp itself, so we exclude these.
if url in ['http://localhost:9981/', 'http://localhost:9981'] or url.startswith('http://localhost:9981/#') or url.startswith('http://localhost:9981#'):
return
callback: Callable[[Response], Optional[Iterator[Request]]] = self.parse
dont_filter = False
method = 'GET'
if self._is_external_url(url):
callback = self.check_existing
method = 'HEAD'
elif '#' in url:
dont_filter = True
callback = self.check_fragment
if getattr(self, 'skip_external', False) and self._is_external_link(url):
return
yield Request(url, method=method, callback=callback, dont_filter=dont_filter,
errback=self.error_callback)
def start_requests(self) -> Iterator[Request]:
for url in self.start_urls:
yield from self._make_requests(url)
def parse(self, response: Response) -> Iterator[Request]:
self.log(response)
if getattr(self, 'validate_html', False):
yield Request(
'http://127.0.0.1:9988/?out=json',
method='POST',
headers={'Content-Type': response.headers['Content-Type']},
body=response.body,
callback=self._vnu_callback(response.url),
errback=self.error_callback,
)
for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'],
tags=self.tags, attrs=self.attrs, deny=self.deny,
canonicalize=False).extract_links(response):
yield from self._make_requests(link.url)
def retry_request_with_get(self, request: Request) -> Iterator[Request]:
request.method = 'GET'
request.dont_filter = True
yield request
def exclude_error(self, url: str) -> bool:
return url in EXCLUDED_URLS
def error_callback(self, failure: Failure) -> Optional[Union[Failure, Iterator[Request]]]:
if failure.check(HttpError):
response = failure.value.response
if self.exclude_error(response.url):
return None
if response.status == 405 and response.request.method == 'HEAD':
# Method 'HEAD' not allowed, repeat request with 'GET'
return self.retry_request_with_get(response.request)
self.logger.error("Please check link: %s", response.request.url)
return failure | tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py | import json
import re
from typing import Callable, Iterator, List, Optional, Union
import scrapy
from scrapy.http import Request, Response
from scrapy.linkextractors import IGNORED_EXTENSIONS
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
from scrapy.spidermiddlewares.httperror import HttpError
from scrapy.utils.url import url_has_any_extension
from twisted.python.failure import Failure
EXCLUDED_URLS = [
# Google calendar returns 404s on HEAD requests unconditionally
'https://calendar.google.com/calendar/embed?src=<EMAIL>',
# Returns 409 errors to HEAD requests frequently
'https://medium.freecodecamp.org/',
# Returns 404 to HEAD requests unconditionally
'https://www.git-tower.com/blog/command-line-cheat-sheet/',
'https://marketplace.visualstudio.com/items?itemName=rafaelmaiolla.remote-vscode',
# Requires authentication
'https://circleci.com/gh/zulip/zulip/tree/master',
'https://circleci.com/gh/zulip/zulip/16617',
'https://www.linkedin.com/company/zulip-project',
# Returns 403 errors to HEAD requests
'https://giphy.com',
'https://giphy.com/apps/giphycapture',
'https://www.udemy.com/course/the-complete-react-native-and-redux-course/',
]
VNU_IGNORE = [
# Real errors that should be fixed.
r'Duplicate ID “[^”]*”\.',
r'The first occurrence of ID “[^”]*” was here\.',
r'Attribute “markdown” not allowed on element “div” at this point\.',
r'No “p” element in scope but a “p” end tag seen\.',
r'Element “div” not allowed as child of element “ul” in this context\. '
+ r'\(Suppressing further errors from this subtree\.\)',
# Warnings that are probably less important.
r'The “type” attribute is unnecessary for JavaScript resources\.',
]
VNU_IGNORE_REGEX = re.compile(r'|'.join(VNU_IGNORE))
class BaseDocumentationSpider(scrapy.Spider):
name: Optional[str] = None
# Exclude domain address.
deny_domains: List[str] = []
start_urls: List[str] = []
deny: List[str] = []
file_extensions: List[str] = ['.' + ext for ext in IGNORED_EXTENSIONS]
tags = ('a', 'area', 'img')
attrs = ('href', 'src')
def _has_extension(self, url: str) -> bool:
return url_has_any_extension(url, self.file_extensions)
def _is_external_url(self, url: str) -> bool:
return url.startswith('http') or self._has_extension(url)
def check_existing(self, response: Response) -> None:
self.log(response)
def _is_external_link(self, url: str) -> bool:
if "zulip.readthedocs" in url or "zulip.com" in url or "zulip.org" in url:
# We want CI to check any links to Zulip sites.
return False
if (len(url) > 4 and url[:4] == "file") or ("localhost" in url):
# We also want CI to check any links to built documentation.
return False
if 'github.com/zulip' in url:
# We want to check these links but due to rate limiting from GitHub, these checks often
# fail in the CI. Thus, we should treat these as external links for now.
# TODO: Figure out how to test github.com/zulip links in CI.
return True
return True
def check_fragment(self, response: Response) -> None:
self.log(response)
xpath_template = "//*[@id='{fragment}' or @name='{fragment}']"
m = re.match(r".+\#(?P<fragment>.*)$", response.request.url) # Get fragment value.
if not m:
return
fragment = m.group('fragment')
# Check fragment existing on response page.
if not response.selector.xpath(xpath_template.format(fragment=fragment)):
self.logger.error(
"Fragment #%s is not found on page %s", fragment, response.request.url)
def _vnu_callback(self, url: str) -> Callable[[Response], None]:
def callback(response: Response) -> None:
vnu_out = json.loads(response.text)
for message in vnu_out['messages']:
if not VNU_IGNORE_REGEX.fullmatch(message['message']):
self.logger.error(
'"%s":%d.%d-%d.%d: %s: %s',
url,
message.get('firstLine', message['lastLine']),
message.get('firstColumn', message['lastColumn']),
message['lastLine'],
message['lastColumn'],
message['type'],
message['message'],
)
return callback
def _make_requests(self, url: str) -> Iterator[Request]:
# These URLs are for Zulip's webapp, which with recent changes
# can be accessible without login an account. While we do
# crawl documentation served by the webapp (E.g. /help/), we
# don't want to crawl the webapp itself, so we exclude these.
if url in ['http://localhost:9981/', 'http://localhost:9981'] or url.startswith('http://localhost:9981/#') or url.startswith('http://localhost:9981#'):
return
callback: Callable[[Response], Optional[Iterator[Request]]] = self.parse
dont_filter = False
method = 'GET'
if self._is_external_url(url):
callback = self.check_existing
method = 'HEAD'
elif '#' in url:
dont_filter = True
callback = self.check_fragment
if getattr(self, 'skip_external', False) and self._is_external_link(url):
return
yield Request(url, method=method, callback=callback, dont_filter=dont_filter,
errback=self.error_callback)
def start_requests(self) -> Iterator[Request]:
for url in self.start_urls:
yield from self._make_requests(url)
def parse(self, response: Response) -> Iterator[Request]:
self.log(response)
if getattr(self, 'validate_html', False):
yield Request(
'http://127.0.0.1:9988/?out=json',
method='POST',
headers={'Content-Type': response.headers['Content-Type']},
body=response.body,
callback=self._vnu_callback(response.url),
errback=self.error_callback,
)
for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'],
tags=self.tags, attrs=self.attrs, deny=self.deny,
canonicalize=False).extract_links(response):
yield from self._make_requests(link.url)
def retry_request_with_get(self, request: Request) -> Iterator[Request]:
request.method = 'GET'
request.dont_filter = True
yield request
def exclude_error(self, url: str) -> bool:
return url in EXCLUDED_URLS
def error_callback(self, failure: Failure) -> Optional[Union[Failure, Iterator[Request]]]:
if failure.check(HttpError):
response = failure.value.response
if self.exclude_error(response.url):
return None
if response.status == 405 and response.request.method == 'HEAD':
# Method 'HEAD' not allowed, repeat request with 'GET'
return self.retry_request_with_get(response.request)
self.logger.error("Please check link: %s", response.request.url)
return failure | 0.637144 | 0.232016 |
from typing import Dict, Union, Callable
import tensorflow as tf
from opengnn.inputters.inputter import Inputter
from opengnn.utils.data import shifted_batch, get_padded_shapes, diverse_batch
from opengnn.utils.misc import count_lines
class TokenEmbedder(Inputter):
def __init__(self,
vocabulary_file_key,
embedding_size: int,
dropout_rate: Union[int, tf.Tensor] = 0.0,
truncated_sentence_size: int = None,
lowercase=True,
trainable: bool = True,
dtype: tf.DType = tf.float32):
"""
Args:
vocabulary_file ([type]): [description]
embedding_size ([type]): [description]
subtokens (bool, optional): Defaults to False. [description]
trainable (bool, optional): Defaults to True. [description]
dtype ([type], optional): Defaults to tf.float32. [description]
"""
super().__init__()
self.vocabulary_file_key = vocabulary_file_key
self.embedding_size = embedding_size
self.dropout_rate = dropout_rate
self.truncated_sentence_size = truncated_sentence_size
self.trainable = trainable
self.dtype = dtype
self.lowercase = lowercase
def extract_tensors(self):
def _tensor_extractor(sample):
size = len(sample) if self.truncated_sentence_size is None else self.truncated_sentence_size
return {"labels": [token.lower() if self.lowercase else token for token in sample][:size]}
tensor_types = {"labels": tf.string}
tensor_shapes = {"labels": tf.TensorShape([None])}
return _tensor_extractor, tensor_types, tensor_shapes
def initialize(self, metadata):
super().initialize(metadata)
self.vocabulary_file = metadata[self.vocabulary_file_key]
self.vocabulary_size = count_lines(
self.vocabulary_file) + 1
self.vocabulary = tf.contrib.lookup.index_table_from_file(
self.vocabulary_file,
vocab_size=self.vocabulary_size - 1,
num_oov_buckets=1)
def _process(self, data, input_data):
length = tf.shape(data['labels'])[0]
ids = self.vocabulary.lookup(data['labels'])
data['ids'] = ids
data['length'] = length
del data['labels']
return data
def batch(self, dataset, batch_size):
return dataset.padded_batch(
batch_size, get_padded_shapes(dataset))
def transform(self, inputs, mode):
ids, _ = inputs
try:
with tf.variable_scope('embedding', reuse=tf.AUTO_REUSE):
embeddings = tf.get_variable(
"t_embs", dtype=self.dtype, trainable=self.trainable)
except ValueError:
with tf.variable_scope('embedding', reuse=tf.AUTO_REUSE):
shape = [self.vocabulary_size, self.embedding_size]
embeddings = tf.get_variable(
"t_embs",
shape=shape,
dtype=self.dtype,
trainable=self.trainable)
embeddings = tf.nn.embedding_lookup(embeddings, ids)
if (isinstance(self.dropout_rate, tf.Tensor) or
self.dropout_rate > 0 and mode == tf.estimator.ModeKeys.TRAIN):
embeddings = tf.layers.dropout(
embeddings,
rate=self.dropout_rate)
return embeddings
def get_example_size(self, example):
return example['length']
class SubtokenEmbedder(TokenEmbedder):
def __init__(self,
subtokenizer: Callable,
vocabulary_file_key,
embedding_size: int,
dropout_rate: Union[int, tf.Tensor] = 0.0,
lowercase=True,
trainable: bool = True,
dtype: tf.DType = tf.float32):
super().__init__(
vocabulary_file_key,
embedding_size,
dropout_rate,
lowercase,
trainable,
dtype)
self.subtokenizer = subtokenizer
def extract_tensors(self):
#TODO: Truncation
def _tensor_extractor(sample):
indices, labels = [], []
for i, token in enumerate(sample):
for subtoken in self.subtokenizer(token):
indices.append(i)
labels.append(subtoken.lower() if self.lowercase else subtoken)
return {"indices": indices, "labels": labels, "length": len(sample)}
tensor_types = {
"length": tf.int32,
"labels": tf.string,
"indices": tf.int32
}
tensor_shapes = {
"length": tf.TensorShape([]),
"labels": tf.TensorShape([None]),
"indices": tf.TensorShape([None])
}
return _tensor_extractor, tensor_types, tensor_shapes
def _process(self, data: Dict[str, tf.Tensor], input_data)-> Dict[str, tf.Tensor]:
indices = tf.cast(tf.expand_dims(data['indices'], 1), tf.int64)
ids = tf.cast(self.vocabulary.lookup(data['labels']), tf.int64)
ids = tf.SparseTensor(indices, ids, (tf.cast(data['length'], tf.int64),))
data['ids'] = ids
del data['indices']
del data['labels']
return data
def batch(self, dataset, batch_size):
batch_fn_map = {"features": shifted_batch}
return diverse_batch(dataset, batch_size, batch_fn_map)
def transform(self, inputs, mode):
ids, lengths = inputs
try:
embeddings = tf.get_variable(
"t_embs", dtype=self.dtype, trainable=self.trainable)
except ValueError:
shape = [self.vocabulary_size, self.embedding_size]
embeddings = tf.get_variable(
"t_embs",
shape=shape,
dtype=self.dtype,
trainable=self.trainable)
max_vertices = tf.reduce_max(lengths)
features = tf.nn.embedding_lookup_sparse(
embeddings, ids, None,
combiner="mean")
return tf.reshape(features, (-1, max_vertices, self.embedding_size)) | opengnn/inputters/token_embedder.py | from typing import Dict, Union, Callable
import tensorflow as tf
from opengnn.inputters.inputter import Inputter
from opengnn.utils.data import shifted_batch, get_padded_shapes, diverse_batch
from opengnn.utils.misc import count_lines
class TokenEmbedder(Inputter):
def __init__(self,
vocabulary_file_key,
embedding_size: int,
dropout_rate: Union[int, tf.Tensor] = 0.0,
truncated_sentence_size: int = None,
lowercase=True,
trainable: bool = True,
dtype: tf.DType = tf.float32):
"""
Args:
vocabulary_file ([type]): [description]
embedding_size ([type]): [description]
subtokens (bool, optional): Defaults to False. [description]
trainable (bool, optional): Defaults to True. [description]
dtype ([type], optional): Defaults to tf.float32. [description]
"""
super().__init__()
self.vocabulary_file_key = vocabulary_file_key
self.embedding_size = embedding_size
self.dropout_rate = dropout_rate
self.truncated_sentence_size = truncated_sentence_size
self.trainable = trainable
self.dtype = dtype
self.lowercase = lowercase
def extract_tensors(self):
def _tensor_extractor(sample):
size = len(sample) if self.truncated_sentence_size is None else self.truncated_sentence_size
return {"labels": [token.lower() if self.lowercase else token for token in sample][:size]}
tensor_types = {"labels": tf.string}
tensor_shapes = {"labels": tf.TensorShape([None])}
return _tensor_extractor, tensor_types, tensor_shapes
def initialize(self, metadata):
super().initialize(metadata)
self.vocabulary_file = metadata[self.vocabulary_file_key]
self.vocabulary_size = count_lines(
self.vocabulary_file) + 1
self.vocabulary = tf.contrib.lookup.index_table_from_file(
self.vocabulary_file,
vocab_size=self.vocabulary_size - 1,
num_oov_buckets=1)
def _process(self, data, input_data):
length = tf.shape(data['labels'])[0]
ids = self.vocabulary.lookup(data['labels'])
data['ids'] = ids
data['length'] = length
del data['labels']
return data
def batch(self, dataset, batch_size):
return dataset.padded_batch(
batch_size, get_padded_shapes(dataset))
def transform(self, inputs, mode):
ids, _ = inputs
try:
with tf.variable_scope('embedding', reuse=tf.AUTO_REUSE):
embeddings = tf.get_variable(
"t_embs", dtype=self.dtype, trainable=self.trainable)
except ValueError:
with tf.variable_scope('embedding', reuse=tf.AUTO_REUSE):
shape = [self.vocabulary_size, self.embedding_size]
embeddings = tf.get_variable(
"t_embs",
shape=shape,
dtype=self.dtype,
trainable=self.trainable)
embeddings = tf.nn.embedding_lookup(embeddings, ids)
if (isinstance(self.dropout_rate, tf.Tensor) or
self.dropout_rate > 0 and mode == tf.estimator.ModeKeys.TRAIN):
embeddings = tf.layers.dropout(
embeddings,
rate=self.dropout_rate)
return embeddings
def get_example_size(self, example):
return example['length']
class SubtokenEmbedder(TokenEmbedder):
def __init__(self,
subtokenizer: Callable,
vocabulary_file_key,
embedding_size: int,
dropout_rate: Union[int, tf.Tensor] = 0.0,
lowercase=True,
trainable: bool = True,
dtype: tf.DType = tf.float32):
super().__init__(
vocabulary_file_key,
embedding_size,
dropout_rate,
lowercase,
trainable,
dtype)
self.subtokenizer = subtokenizer
def extract_tensors(self):
#TODO: Truncation
def _tensor_extractor(sample):
indices, labels = [], []
for i, token in enumerate(sample):
for subtoken in self.subtokenizer(token):
indices.append(i)
labels.append(subtoken.lower() if self.lowercase else subtoken)
return {"indices": indices, "labels": labels, "length": len(sample)}
tensor_types = {
"length": tf.int32,
"labels": tf.string,
"indices": tf.int32
}
tensor_shapes = {
"length": tf.TensorShape([]),
"labels": tf.TensorShape([None]),
"indices": tf.TensorShape([None])
}
return _tensor_extractor, tensor_types, tensor_shapes
def _process(self, data: Dict[str, tf.Tensor], input_data)-> Dict[str, tf.Tensor]:
indices = tf.cast(tf.expand_dims(data['indices'], 1), tf.int64)
ids = tf.cast(self.vocabulary.lookup(data['labels']), tf.int64)
ids = tf.SparseTensor(indices, ids, (tf.cast(data['length'], tf.int64),))
data['ids'] = ids
del data['indices']
del data['labels']
return data
def batch(self, dataset, batch_size):
batch_fn_map = {"features": shifted_batch}
return diverse_batch(dataset, batch_size, batch_fn_map)
def transform(self, inputs, mode):
ids, lengths = inputs
try:
embeddings = tf.get_variable(
"t_embs", dtype=self.dtype, trainable=self.trainable)
except ValueError:
shape = [self.vocabulary_size, self.embedding_size]
embeddings = tf.get_variable(
"t_embs",
shape=shape,
dtype=self.dtype,
trainable=self.trainable)
max_vertices = tf.reduce_max(lengths)
features = tf.nn.embedding_lookup_sparse(
embeddings, ids, None,
combiner="mean")
return tf.reshape(features, (-1, max_vertices, self.embedding_size)) | 0.862916 | 0.436562 |
"""Test the Report Queries."""
from tenant_schemas.utils import tenant_context
from api.functions import JSONBObjectKeys
from api.iam.test.iam_test_case import IamTestCase
from api.tags.gcp.queries import GCPTagQueryHandler
from api.tags.gcp.view import GCPTagView
from reporting.models import GCPCostEntryLineItemDailySummary
from reporting.models import GCPTagsSummary
from reporting.provider.gcp.models import GCPTagsValues
class GCPTagQueryHandlerTest(IamTestCase):
"""Tests for the Azure report query handler."""
def setUp(self):
"""Set up the customer view tests."""
super().setUp()
def test_execute_query_no_query_parameters(self):
"""Test that the execute query runs properly with no query."""
url = "?"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertEqual(handler.time_scope_units, "day")
self.assertEqual(handler.time_scope_value, -10)
def test_execute_query_10_day_parameters(self):
"""Test that the execute query runs properly with 10 day query."""
url = "?filter[time_scope_units]=day&filter[time_scope_value]=-10&filter[resolution]=daily"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertEqual(handler.time_scope_units, "day")
self.assertEqual(handler.time_scope_value, -10)
def test_execute_query_30_day_parameters(self):
"""Test that the execute query runs properly with 30 day query."""
url = "?filter[time_scope_units]=day&filter[time_scope_value]=-30&filter[resolution]=daily"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertEqual(handler.time_scope_units, "day")
self.assertEqual(handler.time_scope_value, -30)
def test_execute_query_10_day_parameters_only_keys(self):
"""Test that the execute query runs properly with 10 day query."""
url = "?filter[time_scope_units]=day&filter[time_scope_value]=-10&filter[resolution]=daily&key_only=True"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertEqual(handler.time_scope_units, "day")
self.assertEqual(handler.time_scope_value, -10)
def test_execute_query_month_parameters(self):
"""Test that the execute query runs properly with single month query."""
url = "?filter[resolution]=monthly&filter[time_scope_value]=-1&filter[time_scope_units]=month"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertEqual(handler.time_scope_units, "month")
self.assertEqual(handler.time_scope_value, -1)
def test_execute_query_two_month_parameters(self):
"""Test that the execute query runs properly with two month query."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-2&filter[resolution]=monthly"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertEqual(handler.time_scope_units, "month")
self.assertEqual(handler.time_scope_value, -2)
def test_execute_query_for_project(self):
"""Test that the execute query runs properly with project query."""
account = None
with tenant_context(self.tenant):
obj = GCPCostEntryLineItemDailySummary.objects.values("account_id").first()
account = obj.get("account_id")
url = f"?filter[time_scope_units]=day&filter[time_scope_value]=-10&filter[resolution]=daily&filter[account]={account}" # noqa: E501
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertEqual(handler.time_scope_units, "day")
self.assertEqual(handler.time_scope_value, -10)
def test_get_tag_keys_filter_true(self):
"""Test that not all tag keys are returned with a filter."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-2&filter[resolution]=monthly"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
tag_keys = set()
with tenant_context(self.tenant):
tags = (
GCPCostEntryLineItemDailySummary.objects.annotate(tag_keys=JSONBObjectKeys("tags"))
.values("tags")
.distinct()
.all()
)
for tag in tags:
if not tag.get("tags"):
continue
for key in tag.get("tags").keys():
tag_keys.add(key)
result = handler.get_tag_keys(filters=False)
self.assertEqual(sorted(result), sorted(list(tag_keys)))
def test_get_tag_keys_filter_false(self):
"""Test that all tag keys are returned with no filter."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-2&filter[resolution]=monthly"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
tag_keys = set()
with tenant_context(self.tenant):
tags = (
GCPCostEntryLineItemDailySummary.objects.annotate(tag_keys=JSONBObjectKeys("tags"))
.values("tags")
.distinct()
.all()
)
for tag in tags:
if not tag.get("tags"):
continue
for key in tag.get("tags").keys():
tag_keys.add(key)
result = handler.get_tag_keys(filters=False)
self.assertEqual(sorted(result), sorted(list(tag_keys)))
def test_get_tags_for_key_filter(self):
"""Test that get tags runs properly with key query."""
key = "<KEY>"
url = f"?filter[key]={key}"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
with tenant_context(self.tenant):
tags = GCPTagsSummary.objects.filter(key__contains=key).values("values").distinct().all()
tag_values = tags[0].get("values")
expected = {"key": key, "values": tag_values}
result = handler.get_tags()
self.assertEqual(result[0].get("key"), expected.get("key"))
self.assertEqual(sorted(result[0].get("values")), sorted(expected.get("values")))
def test_get_tag_values_for_value_filter(self):
"""Test that get tag values runs properly with value query."""
key = "<KEY>"
value = "test_storage_label"
url = f"?filter[value]={value}"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
handler.key = key
with tenant_context(self.tenant):
tags = GCPTagsValues.objects.filter(key__exact=key, value=value).values("value").distinct().all()
tag_values = [tag.get("value") for tag in tags]
expected = {"key": key, "values": tag_values}
result = handler.get_tag_values()
self.assertEqual(result[0].get("key"), expected.get("key"))
self.assertEqual(sorted(result[0].get("values")), sorted(expected.get("values")))
def test_get_tag_values_for_value_filter_partial_match(self):
"""Test that the execute query runs properly with value query."""
key = "version"
value = "a"
url = f"/version/?filter[value]={value}"
query_params = self.mocked_query_params(url, GCPTagView)
# the mocked query parameters dont include the key from the url so it needs to be added
query_params.kwargs = {"key": key}
handler = GCPTagQueryHandler(query_params)
with tenant_context(self.tenant):
tags = (
GCPTagsValues.objects.filter(key__exact=key, value__icontains=value).values("value").distinct().all()
)
tag_values = [tag.get("value") for tag in tags]
expected = {"key": key, "values": tag_values}
result = handler.get_tag_values()
self.assertEqual(result[0].get("key"), expected.get("key"))
self.assertEqual(sorted(result[0].get("values")), sorted(expected.get("values"))) | koku/api/tags/test/gcp/tests_queries.py | """Test the Report Queries."""
from tenant_schemas.utils import tenant_context
from api.functions import JSONBObjectKeys
from api.iam.test.iam_test_case import IamTestCase
from api.tags.gcp.queries import GCPTagQueryHandler
from api.tags.gcp.view import GCPTagView
from reporting.models import GCPCostEntryLineItemDailySummary
from reporting.models import GCPTagsSummary
from reporting.provider.gcp.models import GCPTagsValues
class GCPTagQueryHandlerTest(IamTestCase):
"""Tests for the Azure report query handler."""
def setUp(self):
"""Set up the customer view tests."""
super().setUp()
def test_execute_query_no_query_parameters(self):
"""Test that the execute query runs properly with no query."""
url = "?"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertEqual(handler.time_scope_units, "day")
self.assertEqual(handler.time_scope_value, -10)
def test_execute_query_10_day_parameters(self):
"""Test that the execute query runs properly with 10 day query."""
url = "?filter[time_scope_units]=day&filter[time_scope_value]=-10&filter[resolution]=daily"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertEqual(handler.time_scope_units, "day")
self.assertEqual(handler.time_scope_value, -10)
def test_execute_query_30_day_parameters(self):
"""Test that the execute query runs properly with 30 day query."""
url = "?filter[time_scope_units]=day&filter[time_scope_value]=-30&filter[resolution]=daily"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertEqual(handler.time_scope_units, "day")
self.assertEqual(handler.time_scope_value, -30)
def test_execute_query_10_day_parameters_only_keys(self):
"""Test that the execute query runs properly with 10 day query."""
url = "?filter[time_scope_units]=day&filter[time_scope_value]=-10&filter[resolution]=daily&key_only=True"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertEqual(handler.time_scope_units, "day")
self.assertEqual(handler.time_scope_value, -10)
def test_execute_query_month_parameters(self):
"""Test that the execute query runs properly with single month query."""
url = "?filter[resolution]=monthly&filter[time_scope_value]=-1&filter[time_scope_units]=month"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertEqual(handler.time_scope_units, "month")
self.assertEqual(handler.time_scope_value, -1)
def test_execute_query_two_month_parameters(self):
"""Test that the execute query runs properly with two month query."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-2&filter[resolution]=monthly"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertEqual(handler.time_scope_units, "month")
self.assertEqual(handler.time_scope_value, -2)
def test_execute_query_for_project(self):
"""Test that the execute query runs properly with project query."""
account = None
with tenant_context(self.tenant):
obj = GCPCostEntryLineItemDailySummary.objects.values("account_id").first()
account = obj.get("account_id")
url = f"?filter[time_scope_units]=day&filter[time_scope_value]=-10&filter[resolution]=daily&filter[account]={account}" # noqa: E501
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertEqual(handler.time_scope_units, "day")
self.assertEqual(handler.time_scope_value, -10)
def test_get_tag_keys_filter_true(self):
"""Test that not all tag keys are returned with a filter."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-2&filter[resolution]=monthly"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
tag_keys = set()
with tenant_context(self.tenant):
tags = (
GCPCostEntryLineItemDailySummary.objects.annotate(tag_keys=JSONBObjectKeys("tags"))
.values("tags")
.distinct()
.all()
)
for tag in tags:
if not tag.get("tags"):
continue
for key in tag.get("tags").keys():
tag_keys.add(key)
result = handler.get_tag_keys(filters=False)
self.assertEqual(sorted(result), sorted(list(tag_keys)))
def test_get_tag_keys_filter_false(self):
"""Test that all tag keys are returned with no filter."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-2&filter[resolution]=monthly"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
tag_keys = set()
with tenant_context(self.tenant):
tags = (
GCPCostEntryLineItemDailySummary.objects.annotate(tag_keys=JSONBObjectKeys("tags"))
.values("tags")
.distinct()
.all()
)
for tag in tags:
if not tag.get("tags"):
continue
for key in tag.get("tags").keys():
tag_keys.add(key)
result = handler.get_tag_keys(filters=False)
self.assertEqual(sorted(result), sorted(list(tag_keys)))
def test_get_tags_for_key_filter(self):
"""Test that get tags runs properly with key query."""
key = "<KEY>"
url = f"?filter[key]={key}"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
with tenant_context(self.tenant):
tags = GCPTagsSummary.objects.filter(key__contains=key).values("values").distinct().all()
tag_values = tags[0].get("values")
expected = {"key": key, "values": tag_values}
result = handler.get_tags()
self.assertEqual(result[0].get("key"), expected.get("key"))
self.assertEqual(sorted(result[0].get("values")), sorted(expected.get("values")))
def test_get_tag_values_for_value_filter(self):
"""Test that get tag values runs properly with value query."""
key = "<KEY>"
value = "test_storage_label"
url = f"?filter[value]={value}"
query_params = self.mocked_query_params(url, GCPTagView)
handler = GCPTagQueryHandler(query_params)
handler.key = key
with tenant_context(self.tenant):
tags = GCPTagsValues.objects.filter(key__exact=key, value=value).values("value").distinct().all()
tag_values = [tag.get("value") for tag in tags]
expected = {"key": key, "values": tag_values}
result = handler.get_tag_values()
self.assertEqual(result[0].get("key"), expected.get("key"))
self.assertEqual(sorted(result[0].get("values")), sorted(expected.get("values")))
def test_get_tag_values_for_value_filter_partial_match(self):
"""Test that the execute query runs properly with value query."""
key = "version"
value = "a"
url = f"/version/?filter[value]={value}"
query_params = self.mocked_query_params(url, GCPTagView)
# the mocked query parameters dont include the key from the url so it needs to be added
query_params.kwargs = {"key": key}
handler = GCPTagQueryHandler(query_params)
with tenant_context(self.tenant):
tags = (
GCPTagsValues.objects.filter(key__exact=key, value__icontains=value).values("value").distinct().all()
)
tag_values = [tag.get("value") for tag in tags]
expected = {"key": key, "values": tag_values}
result = handler.get_tag_values()
self.assertEqual(result[0].get("key"), expected.get("key"))
self.assertEqual(sorted(result[0].get("values")), sorted(expected.get("values"))) | 0.783368 | 0.487307 |
"""BERT finetuning runner."""
from __future__ import absolute_import
import logging
import torch
from torch import nn
from pytorch_transformers.modeling_bert import BertPreTrainedModel, BertModel
from torch.nn import CrossEntropyLoss
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class BertMCQConcat(BertPreTrainedModel):
def __init__(self, config):
super(BertMCQConcat, self).__init__(config)
self.bert = BertModel(config)
self._dropout = nn.Dropout(config.hidden_dropout_prob)
self._classification_layer = nn.Linear(config.hidden_size, 1)
self.apply(self.init_weights)
def forward(self, # type: ignore
input_ids, # batch_size, number_of_choices, max_seq_len
token_type_ids, # batch_size, number_of_choices, max_seq_len
input_mask, # batch_size, number_of_choices, max_seq_len
labels = None):
debug = False
# shape: batch_size*num_choices, max_len
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = input_mask.view(-1, input_mask.size(-1))
if debug:
print(f"flat_input_ids = {flat_input_ids}")
print(f"flat_token_type_ids = {flat_token_type_ids}")
print(f"flat_attention_mask = {flat_attention_mask}")
# shape: batch_size*num_choices, hidden_dim
_, pooled = self.bert(input_ids=flat_input_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask)
if debug:
print(f"pooled = {pooled}")
print(f"labels = {labels}")
pooled = self._dropout(pooled)
# apply classification layer
# shape: batch_size*num_choices, 1
logits = self._classification_layer(pooled)
if debug:
print(f"logits = {logits}")
# shape: batch_size,num_choices
reshaped_logits = logits.view(-1, input_ids.size(1))
if debug:
print(f"reshaped_logits = {reshaped_logits}")
probs = torch.nn.functional.softmax(reshaped_logits, dim=-1)
outputs = (reshaped_logits, probs)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss, reshaped_logits, prob) | pytorch_transformers/models/hf_bert_mcq_concat.py | """BERT finetuning runner."""
from __future__ import absolute_import
import logging
import torch
from torch import nn
from pytorch_transformers.modeling_bert import BertPreTrainedModel, BertModel
from torch.nn import CrossEntropyLoss
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class BertMCQConcat(BertPreTrainedModel):
def __init__(self, config):
super(BertMCQConcat, self).__init__(config)
self.bert = BertModel(config)
self._dropout = nn.Dropout(config.hidden_dropout_prob)
self._classification_layer = nn.Linear(config.hidden_size, 1)
self.apply(self.init_weights)
def forward(self, # type: ignore
input_ids, # batch_size, number_of_choices, max_seq_len
token_type_ids, # batch_size, number_of_choices, max_seq_len
input_mask, # batch_size, number_of_choices, max_seq_len
labels = None):
debug = False
# shape: batch_size*num_choices, max_len
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = input_mask.view(-1, input_mask.size(-1))
if debug:
print(f"flat_input_ids = {flat_input_ids}")
print(f"flat_token_type_ids = {flat_token_type_ids}")
print(f"flat_attention_mask = {flat_attention_mask}")
# shape: batch_size*num_choices, hidden_dim
_, pooled = self.bert(input_ids=flat_input_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask)
if debug:
print(f"pooled = {pooled}")
print(f"labels = {labels}")
pooled = self._dropout(pooled)
# apply classification layer
# shape: batch_size*num_choices, 1
logits = self._classification_layer(pooled)
if debug:
print(f"logits = {logits}")
# shape: batch_size,num_choices
reshaped_logits = logits.view(-1, input_ids.size(1))
if debug:
print(f"reshaped_logits = {reshaped_logits}")
probs = torch.nn.functional.softmax(reshaped_logits, dim=-1)
outputs = (reshaped_logits, probs)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss, reshaped_logits, prob) | 0.906647 | 0.248073 |
from pprint import pformat
from six import iteritems
import re
class BiosServerManagement(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'boot_option_num_retry': 'str',
'boot_option_re_cool_down': 'str',
'boot_option_retry': 'str',
'cdn_enable': 'str',
'frb2enable': 'str',
'os_boot_watchdog_timer': 'str',
'os_boot_watchdog_timer_policy': 'str',
'os_boot_watchdog_timer_timeout': 'str'
}
attribute_map = {
'boot_option_num_retry': 'BootOptionNumRetry',
'boot_option_re_cool_down': 'BootOptionReCoolDown',
'boot_option_retry': 'BootOptionRetry',
'cdn_enable': 'CdnEnable',
'frb2enable': 'Frb2enable',
'os_boot_watchdog_timer': 'OsBootWatchdogTimer',
'os_boot_watchdog_timer_policy': 'OsBootWatchdogTimerPolicy',
'os_boot_watchdog_timer_timeout': 'OsBootWatchdogTimerTimeout'
}
def __init__(self, boot_option_num_retry='platform-default', boot_option_re_cool_down='platform-default', boot_option_retry='platform-default', cdn_enable='platform-default', frb2enable='platform-default', os_boot_watchdog_timer='platform-default', os_boot_watchdog_timer_policy='platform-default', os_boot_watchdog_timer_timeout='platform-default'):
"""
BiosServerManagement - a model defined in Swagger
"""
self._boot_option_num_retry = None
self._boot_option_re_cool_down = None
self._boot_option_retry = None
self._cdn_enable = None
self._frb2enable = None
self._os_boot_watchdog_timer = None
self._os_boot_watchdog_timer_policy = None
self._os_boot_watchdog_timer_timeout = None
if boot_option_num_retry is not None:
self.boot_option_num_retry = boot_option_num_retry
if boot_option_re_cool_down is not None:
self.boot_option_re_cool_down = boot_option_re_cool_down
if boot_option_retry is not None:
self.boot_option_retry = boot_option_retry
if cdn_enable is not None:
self.cdn_enable = cdn_enable
if frb2enable is not None:
self.frb2enable = frb2enable
if os_boot_watchdog_timer is not None:
self.os_boot_watchdog_timer = os_boot_watchdog_timer
if os_boot_watchdog_timer_policy is not None:
self.os_boot_watchdog_timer_policy = os_boot_watchdog_timer_policy
if os_boot_watchdog_timer_timeout is not None:
self.os_boot_watchdog_timer_timeout = os_boot_watchdog_timer_timeout
@property
def boot_option_num_retry(self):
"""
Gets the boot_option_num_retry of this BiosServerManagement.
BIOS Token for setting Number of Retries configuration
:return: The boot_option_num_retry of this BiosServerManagement.
:rtype: str
"""
return self._boot_option_num_retry
@boot_option_num_retry.setter
def boot_option_num_retry(self, boot_option_num_retry):
"""
Sets the boot_option_num_retry of this BiosServerManagement.
BIOS Token for setting Number of Retries configuration
:param boot_option_num_retry: The boot_option_num_retry of this BiosServerManagement.
:type: str
"""
allowed_values = ["platform-default", "13", "5", "Infinite"]
if boot_option_num_retry not in allowed_values:
raise ValueError(
"Invalid value for `boot_option_num_retry` ({0}), must be one of {1}"
.format(boot_option_num_retry, allowed_values)
)
self._boot_option_num_retry = boot_option_num_retry
@property
def boot_option_re_cool_down(self):
"""
Gets the boot_option_re_cool_down of this BiosServerManagement.
BIOS Token for setting Cool Down Time (sec) configuration
:return: The boot_option_re_cool_down of this BiosServerManagement.
:rtype: str
"""
return self._boot_option_re_cool_down
@boot_option_re_cool_down.setter
def boot_option_re_cool_down(self, boot_option_re_cool_down):
"""
Sets the boot_option_re_cool_down of this BiosServerManagement.
BIOS Token for setting Cool Down Time (sec) configuration
:param boot_option_re_cool_down: The boot_option_re_cool_down of this BiosServerManagement.
:type: str
"""
allowed_values = ["platform-default", "15", "45", "90"]
if boot_option_re_cool_down not in allowed_values:
raise ValueError(
"Invalid value for `boot_option_re_cool_down` ({0}), must be one of {1}"
.format(boot_option_re_cool_down, allowed_values)
)
self._boot_option_re_cool_down = boot_option_re_cool_down
@property
def boot_option_retry(self):
"""
Gets the boot_option_retry of this BiosServerManagement.
BIOS Token for setting Boot Option Retry configuration
:return: The boot_option_retry of this BiosServerManagement.
:rtype: str
"""
return self._boot_option_retry
@boot_option_retry.setter
def boot_option_retry(self, boot_option_retry):
"""
Sets the boot_option_retry of this BiosServerManagement.
BIOS Token for setting Boot Option Retry configuration
:param boot_option_retry: The boot_option_retry of this BiosServerManagement.
:type: str
"""
allowed_values = ["platform-default", "enabled", "disabled"]
if boot_option_retry not in allowed_values:
raise ValueError(
"Invalid value for `boot_option_retry` ({0}), must be one of {1}"
.format(boot_option_retry, allowed_values)
)
self._boot_option_retry = boot_option_retry
@property
def cdn_enable(self):
"""
Gets the cdn_enable of this BiosServerManagement.
BIOS Token for setting Consistent Device Naming configuration
:return: The cdn_enable of this BiosServerManagement.
:rtype: str
"""
return self._cdn_enable
@cdn_enable.setter
def cdn_enable(self, cdn_enable):
"""
Sets the cdn_enable of this BiosServerManagement.
BIOS Token for setting Consistent Device Naming configuration
:param cdn_enable: The cdn_enable of this BiosServerManagement.
:type: str
"""
allowed_values = ["platform-default", "enabled", "disabled"]
if cdn_enable not in allowed_values:
raise ValueError(
"Invalid value for `cdn_enable` ({0}), must be one of {1}"
.format(cdn_enable, allowed_values)
)
self._cdn_enable = cdn_enable
@property
def frb2enable(self):
"""
Gets the frb2enable of this BiosServerManagement.
BIOS Token for setting FRB-2 Timer configuration
:return: The frb2enable of this BiosServerManagement.
:rtype: str
"""
return self._frb2enable
@frb2enable.setter
def frb2enable(self, frb2enable):
"""
Sets the frb2enable of this BiosServerManagement.
BIOS Token for setting FRB-2 Timer configuration
:param frb2enable: The frb2enable of this BiosServerManagement.
:type: str
"""
allowed_values = ["platform-default", "enabled", "disabled"]
if frb2enable not in allowed_values:
raise ValueError(
"Invalid value for `frb2enable` ({0}), must be one of {1}"
.format(frb2enable, allowed_values)
)
self._frb2enable = frb2enable
@property
def os_boot_watchdog_timer(self):
"""
Gets the os_boot_watchdog_timer of this BiosServerManagement.
BIOS Token for setting OS Watchdog Timer configuration
:return: The os_boot_watchdog_timer of this BiosServerManagement.
:rtype: str
"""
return self._os_boot_watchdog_timer
@os_boot_watchdog_timer.setter
def os_boot_watchdog_timer(self, os_boot_watchdog_timer):
"""
Sets the os_boot_watchdog_timer of this BiosServerManagement.
BIOS Token for setting OS Watchdog Timer configuration
:param os_boot_watchdog_timer: The os_boot_watchdog_timer of this BiosServerManagement.
:type: str
"""
allowed_values = ["platform-default", "enabled", "disabled"]
if os_boot_watchdog_timer not in allowed_values:
raise ValueError(
"Invalid value for `os_boot_watchdog_timer` ({0}), must be one of {1}"
.format(os_boot_watchdog_timer, allowed_values)
)
self._os_boot_watchdog_timer = os_boot_watchdog_timer
@property
def os_boot_watchdog_timer_policy(self):
"""
Gets the os_boot_watchdog_timer_policy of this BiosServerManagement.
BIOS Token for setting OS Watchdog Timer Policy configuration
:return: The os_boot_watchdog_timer_policy of this BiosServerManagement.
:rtype: str
"""
return self._os_boot_watchdog_timer_policy
@os_boot_watchdog_timer_policy.setter
def os_boot_watchdog_timer_policy(self, os_boot_watchdog_timer_policy):
"""
Sets the os_boot_watchdog_timer_policy of this BiosServerManagement.
BIOS Token for setting OS Watchdog Timer Policy configuration
:param os_boot_watchdog_timer_policy: The os_boot_watchdog_timer_policy of this BiosServerManagement.
:type: str
"""
allowed_values = ["platform-default", "power-off", "reset"]
if os_boot_watchdog_timer_policy not in allowed_values:
raise ValueError(
"Invalid value for `os_boot_watchdog_timer_policy` ({0}), must be one of {1}"
.format(os_boot_watchdog_timer_policy, allowed_values)
)
self._os_boot_watchdog_timer_policy = os_boot_watchdog_timer_policy
@property
def os_boot_watchdog_timer_timeout(self):
"""
Gets the os_boot_watchdog_timer_timeout of this BiosServerManagement.
BIOS Token for setting OS Watchdog Timer Timeout configuration
:return: The os_boot_watchdog_timer_timeout of this BiosServerManagement.
:rtype: str
"""
return self._os_boot_watchdog_timer_timeout
@os_boot_watchdog_timer_timeout.setter
def os_boot_watchdog_timer_timeout(self, os_boot_watchdog_timer_timeout):
"""
Sets the os_boot_watchdog_timer_timeout of this BiosServerManagement.
BIOS Token for setting OS Watchdog Timer Timeout configuration
:param os_boot_watchdog_timer_timeout: The os_boot_watchdog_timer_timeout of this BiosServerManagement.
:type: str
"""
allowed_values = ["platform-default", "10-minutes", "15-minutes", "20-minutes", "5-minutes"]
if os_boot_watchdog_timer_timeout not in allowed_values:
raise ValueError(
"Invalid value for `os_boot_watchdog_timer_timeout` ({0}), must be one of {1}"
.format(os_boot_watchdog_timer_timeout, allowed_values)
)
self._os_boot_watchdog_timer_timeout = os_boot_watchdog_timer_timeout
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, BiosServerManagement):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | intersight/models/bios_server_management.py | from pprint import pformat
from six import iteritems
import re
class BiosServerManagement(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'boot_option_num_retry': 'str',
'boot_option_re_cool_down': 'str',
'boot_option_retry': 'str',
'cdn_enable': 'str',
'frb2enable': 'str',
'os_boot_watchdog_timer': 'str',
'os_boot_watchdog_timer_policy': 'str',
'os_boot_watchdog_timer_timeout': 'str'
}
attribute_map = {
'boot_option_num_retry': 'BootOptionNumRetry',
'boot_option_re_cool_down': 'BootOptionReCoolDown',
'boot_option_retry': 'BootOptionRetry',
'cdn_enable': 'CdnEnable',
'frb2enable': 'Frb2enable',
'os_boot_watchdog_timer': 'OsBootWatchdogTimer',
'os_boot_watchdog_timer_policy': 'OsBootWatchdogTimerPolicy',
'os_boot_watchdog_timer_timeout': 'OsBootWatchdogTimerTimeout'
}
def __init__(self, boot_option_num_retry='platform-default', boot_option_re_cool_down='platform-default', boot_option_retry='platform-default', cdn_enable='platform-default', frb2enable='platform-default', os_boot_watchdog_timer='platform-default', os_boot_watchdog_timer_policy='platform-default', os_boot_watchdog_timer_timeout='platform-default'):
"""
BiosServerManagement - a model defined in Swagger
"""
self._boot_option_num_retry = None
self._boot_option_re_cool_down = None
self._boot_option_retry = None
self._cdn_enable = None
self._frb2enable = None
self._os_boot_watchdog_timer = None
self._os_boot_watchdog_timer_policy = None
self._os_boot_watchdog_timer_timeout = None
if boot_option_num_retry is not None:
self.boot_option_num_retry = boot_option_num_retry
if boot_option_re_cool_down is not None:
self.boot_option_re_cool_down = boot_option_re_cool_down
if boot_option_retry is not None:
self.boot_option_retry = boot_option_retry
if cdn_enable is not None:
self.cdn_enable = cdn_enable
if frb2enable is not None:
self.frb2enable = frb2enable
if os_boot_watchdog_timer is not None:
self.os_boot_watchdog_timer = os_boot_watchdog_timer
if os_boot_watchdog_timer_policy is not None:
self.os_boot_watchdog_timer_policy = os_boot_watchdog_timer_policy
if os_boot_watchdog_timer_timeout is not None:
self.os_boot_watchdog_timer_timeout = os_boot_watchdog_timer_timeout
@property
def boot_option_num_retry(self):
"""
Gets the boot_option_num_retry of this BiosServerManagement.
BIOS Token for setting Number of Retries configuration
:return: The boot_option_num_retry of this BiosServerManagement.
:rtype: str
"""
return self._boot_option_num_retry
@boot_option_num_retry.setter
def boot_option_num_retry(self, boot_option_num_retry):
"""
Sets the boot_option_num_retry of this BiosServerManagement.
BIOS Token for setting Number of Retries configuration
:param boot_option_num_retry: The boot_option_num_retry of this BiosServerManagement.
:type: str
"""
allowed_values = ["platform-default", "13", "5", "Infinite"]
if boot_option_num_retry not in allowed_values:
raise ValueError(
"Invalid value for `boot_option_num_retry` ({0}), must be one of {1}"
.format(boot_option_num_retry, allowed_values)
)
self._boot_option_num_retry = boot_option_num_retry
@property
def boot_option_re_cool_down(self):
"""
Gets the boot_option_re_cool_down of this BiosServerManagement.
BIOS Token for setting Cool Down Time (sec) configuration
:return: The boot_option_re_cool_down of this BiosServerManagement.
:rtype: str
"""
return self._boot_option_re_cool_down
@boot_option_re_cool_down.setter
def boot_option_re_cool_down(self, boot_option_re_cool_down):
"""
Sets the boot_option_re_cool_down of this BiosServerManagement.
BIOS Token for setting Cool Down Time (sec) configuration
:param boot_option_re_cool_down: The boot_option_re_cool_down of this BiosServerManagement.
:type: str
"""
allowed_values = ["platform-default", "15", "45", "90"]
if boot_option_re_cool_down not in allowed_values:
raise ValueError(
"Invalid value for `boot_option_re_cool_down` ({0}), must be one of {1}"
.format(boot_option_re_cool_down, allowed_values)
)
self._boot_option_re_cool_down = boot_option_re_cool_down
@property
def boot_option_retry(self):
"""
Gets the boot_option_retry of this BiosServerManagement.
BIOS Token for setting Boot Option Retry configuration
:return: The boot_option_retry of this BiosServerManagement.
:rtype: str
"""
return self._boot_option_retry
@boot_option_retry.setter
def boot_option_retry(self, boot_option_retry):
"""
Sets the boot_option_retry of this BiosServerManagement.
BIOS Token for setting Boot Option Retry configuration
:param boot_option_retry: The boot_option_retry of this BiosServerManagement.
:type: str
"""
allowed_values = ["platform-default", "enabled", "disabled"]
if boot_option_retry not in allowed_values:
raise ValueError(
"Invalid value for `boot_option_retry` ({0}), must be one of {1}"
.format(boot_option_retry, allowed_values)
)
self._boot_option_retry = boot_option_retry
@property
def cdn_enable(self):
"""
Gets the cdn_enable of this BiosServerManagement.
BIOS Token for setting Consistent Device Naming configuration
:return: The cdn_enable of this BiosServerManagement.
:rtype: str
"""
return self._cdn_enable
@cdn_enable.setter
def cdn_enable(self, cdn_enable):
"""
Sets the cdn_enable of this BiosServerManagement.
BIOS Token for setting Consistent Device Naming configuration
:param cdn_enable: The cdn_enable of this BiosServerManagement.
:type: str
"""
allowed_values = ["platform-default", "enabled", "disabled"]
if cdn_enable not in allowed_values:
raise ValueError(
"Invalid value for `cdn_enable` ({0}), must be one of {1}"
.format(cdn_enable, allowed_values)
)
self._cdn_enable = cdn_enable
@property
def frb2enable(self):
"""
Gets the frb2enable of this BiosServerManagement.
BIOS Token for setting FRB-2 Timer configuration
:return: The frb2enable of this BiosServerManagement.
:rtype: str
"""
return self._frb2enable
@frb2enable.setter
def frb2enable(self, frb2enable):
"""
Sets the frb2enable of this BiosServerManagement.
BIOS Token for setting FRB-2 Timer configuration
:param frb2enable: The frb2enable of this BiosServerManagement.
:type: str
"""
allowed_values = ["platform-default", "enabled", "disabled"]
if frb2enable not in allowed_values:
raise ValueError(
"Invalid value for `frb2enable` ({0}), must be one of {1}"
.format(frb2enable, allowed_values)
)
self._frb2enable = frb2enable
@property
def os_boot_watchdog_timer(self):
"""
Gets the os_boot_watchdog_timer of this BiosServerManagement.
BIOS Token for setting OS Watchdog Timer configuration
:return: The os_boot_watchdog_timer of this BiosServerManagement.
:rtype: str
"""
return self._os_boot_watchdog_timer
@os_boot_watchdog_timer.setter
def os_boot_watchdog_timer(self, os_boot_watchdog_timer):
"""
Sets the os_boot_watchdog_timer of this BiosServerManagement.
BIOS Token for setting OS Watchdog Timer configuration
:param os_boot_watchdog_timer: The os_boot_watchdog_timer of this BiosServerManagement.
:type: str
"""
allowed_values = ["platform-default", "enabled", "disabled"]
if os_boot_watchdog_timer not in allowed_values:
raise ValueError(
"Invalid value for `os_boot_watchdog_timer` ({0}), must be one of {1}"
.format(os_boot_watchdog_timer, allowed_values)
)
self._os_boot_watchdog_timer = os_boot_watchdog_timer
@property
def os_boot_watchdog_timer_policy(self):
"""
Gets the os_boot_watchdog_timer_policy of this BiosServerManagement.
BIOS Token for setting OS Watchdog Timer Policy configuration
:return: The os_boot_watchdog_timer_policy of this BiosServerManagement.
:rtype: str
"""
return self._os_boot_watchdog_timer_policy
@os_boot_watchdog_timer_policy.setter
def os_boot_watchdog_timer_policy(self, os_boot_watchdog_timer_policy):
"""
Sets the os_boot_watchdog_timer_policy of this BiosServerManagement.
BIOS Token for setting OS Watchdog Timer Policy configuration
:param os_boot_watchdog_timer_policy: The os_boot_watchdog_timer_policy of this BiosServerManagement.
:type: str
"""
allowed_values = ["platform-default", "power-off", "reset"]
if os_boot_watchdog_timer_policy not in allowed_values:
raise ValueError(
"Invalid value for `os_boot_watchdog_timer_policy` ({0}), must be one of {1}"
.format(os_boot_watchdog_timer_policy, allowed_values)
)
self._os_boot_watchdog_timer_policy = os_boot_watchdog_timer_policy
@property
def os_boot_watchdog_timer_timeout(self):
"""
Gets the os_boot_watchdog_timer_timeout of this BiosServerManagement.
BIOS Token for setting OS Watchdog Timer Timeout configuration
:return: The os_boot_watchdog_timer_timeout of this BiosServerManagement.
:rtype: str
"""
return self._os_boot_watchdog_timer_timeout
@os_boot_watchdog_timer_timeout.setter
def os_boot_watchdog_timer_timeout(self, os_boot_watchdog_timer_timeout):
"""
Sets the os_boot_watchdog_timer_timeout of this BiosServerManagement.
BIOS Token for setting OS Watchdog Timer Timeout configuration
:param os_boot_watchdog_timer_timeout: The os_boot_watchdog_timer_timeout of this BiosServerManagement.
:type: str
"""
allowed_values = ["platform-default", "10-minutes", "15-minutes", "20-minutes", "5-minutes"]
if os_boot_watchdog_timer_timeout not in allowed_values:
raise ValueError(
"Invalid value for `os_boot_watchdog_timer_timeout` ({0}), must be one of {1}"
.format(os_boot_watchdog_timer_timeout, allowed_values)
)
self._os_boot_watchdog_timer_timeout = os_boot_watchdog_timer_timeout
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, BiosServerManagement):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | 0.538741 | 0.056548 |
import numpy as np
from PyQt5.QtCore import Qt, QThread, QTimer
from PyQt5.QtGui import QPixmap, QFont, QImage, QCursor, QIntValidator
from PyQt5.QtWidgets import QMainWindow, QWidget, QPushButton, QHBoxLayout, QApplication, QLabel, \
QDialog, QDialogButtonBox, QVBoxLayout, QLineEdit
from pyqtgraph import ImageView, PlotWidget, GraphicsView, ImageItem
import cv2
class PictureView(GraphicsView):
def __init__(self, *args, **kwargs):
super(PictureView, self).__init__(*args, **kwargs)
def mousePressEvent(self, event):
print(event.pos())
def mouseReleaseEvent(self, event):
cursor = QCursor()
print(cursor.pos())
class ConfigDialog(QDialog):
def __init__(self, *args, **kwargs):
super(ConfigDialog, self).__init__(*args, **kwargs)
self.setWindowTitle("Region of Interest Configuration")
QBtn = QDialogButtonBox.Save | QDialogButtonBox.Cancel
self.buttonBox = QDialogButtonBox(QBtn)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
validator = QIntValidator(0, 1500)
self.layout = QVBoxLayout()
h_layout_1 = QHBoxLayout()
h_layout_2 = QHBoxLayout()
h_layout_1.addWidget(QLabel("Video Source:"))
self.video_source = QLineEdit()
h_layout_1.addWidget(self.video_source)
v_layout_1 = QVBoxLayout()
v_layout_1.addWidget(QLabel("Bottom Left - pixel (x, y):"))
v_layout_1.addWidget(QLabel("Bottom Right - pixel (x, y):"))
v_layout_1.addWidget(QLabel("Top Right - pixel (x, y):"))
v_layout_1.addWidget(QLabel("Top Left - pixel (x, y):"))
v_layout_1.addWidget(QLabel("Dimension - cm (width, depth):"))
h_layout_2.addLayout(v_layout_1)
v_layout_2 = QVBoxLayout()
self.bl_x = QLineEdit()
self.bl_x.setValidator(validator)
v_layout_2.addWidget(self.bl_x)
self.br_x = QLineEdit()
self.br_x.setValidator(validator)
v_layout_2.addWidget(self.br_x)
self.tr_x = QLineEdit()
self.tr_x.setValidator(validator)
v_layout_2.addWidget(self.tr_x)
self.tl_x = QLineEdit()
self.tl_x.setValidator(validator)
v_layout_2.addWidget(self.tl_x)
self.width = QLineEdit()
self.width.setValidator(validator)
v_layout_2.addWidget(self.width)
h_layout_2.addLayout(v_layout_2)
v_layout_3 = QVBoxLayout()
self.bl_y = QLineEdit()
self.bl_y.setValidator(validator)
v_layout_3.addWidget(self.bl_y)
self.br_y = QLineEdit()
self.br_y.setValidator(validator)
v_layout_3.addWidget(self.br_y)
self.tr_y = QLineEdit()
self.tr_y.setValidator(validator)
v_layout_3.addWidget(self.tr_y)
self.tl_y = QLineEdit()
self.tl_y.setValidator(validator)
v_layout_3.addWidget(self.tl_y)
self.depth = QLineEdit()
self.depth.setValidator(validator)
v_layout_3.addWidget(self.depth)
h_layout_2.addLayout(v_layout_3)
self.layout.addLayout(h_layout_1)
self.layout.addLayout(h_layout_2)
self.layout.addWidget(self.buttonBox)
self.setLayout(self.layout)
class StartWindow(QMainWindow):
def __init__(self, camera = None, net = None, config = None, image_width = 950):
super().__init__()
self.camera = camera
self.net = net
self.config = config
self.image_width = image_width
self.setFixedWidth(image_width + 78)
self.setFixedHeight(780)
self.central_widget = QWidget(self)
self.label_logo = QLabel(self.central_widget)
logo = QPixmap("logo.png")
self.label_logo.setPixmap(logo)
self.label_logo.setGeometry(20,20,181,81)
self.label_logo.setScaledContents(True)
self.label_logo_2 = QLabel(self.central_widget)
logo_2 = QPixmap("logo_2.png")
self.label_logo_2.setPixmap(logo_2)
self.label_logo_2.setGeometry(670,30,206,61)
self.label_logo_2.setScaledContents(True)
self.button_config = QPushButton('Configuration', self.central_widget)
self.button_config.setGeometry(240,30,191,61)
font = QFont()
font.setPointSize(24)
self.button_config.setFont(font)
self.button_config.clicked.connect(self.start_config)
self.button_detection = QPushButton('Start Detection', self.central_widget)
self.button_detection.setGeometry(450,30,191,61)
font = QFont()
font.setPointSize(24)
self.button_detection.setFont(font)
self.button_detection.clicked.connect(self.start_movie)
#self.label_image = QLabel(self.central_widget)
self.image_view = PictureView(self.central_widget)
self.image_view.setGeometry(39,110,image_width,630)
#self.image_view.hideAxis('left')
#self.image_view.hideAxis('bottom')
self.image_view.setStyleSheet("border :1px solid black;")
#self.label_image.setGeometry(40,110,1067,600)
#self.label_image.setScaledContents(True)
#self.label_image.setStyleSheet("border :1px solid black;")
self.setCentralWidget(self.central_widget)
self.update_timer = QTimer()
self.update_timer.timeout.connect(self.update_movie)
def start_config(self):
dlg = ConfigDialog(self)
dlg.bl_x.setText(str(self.config.bl_x))
dlg.bl_y.setText(str(self.config.bl_y))
dlg.br_x.setText(str(self.config.br_x))
dlg.br_y.setText(str(self.config.br_y))
dlg.tl_x.setText(str(self.config.tl_x))
dlg.tl_y.setText(str(self.config.tl_y))
dlg.tr_x.setText(str(self.config.tr_x))
dlg.tr_y.setText(str(self.config.tr_y))
dlg.width.setText(str(self.config.width))
dlg.depth.setText(str(self.config.depth))
dlg.video_source.setText(self.config.video_source)
if dlg.exec_():
self.config.bl_x = int(dlg.bl_x.text())
self.config.bl_y = int(dlg.bl_y.text())
self.config.br_x = int(dlg.br_x.text())
self.config.br_y = int(dlg.br_y.text())
self.config.tl_x = int(dlg.tl_x.text())
self.config.tl_y = int(dlg.tl_y.text())
self.config.tr_x = int(dlg.tr_x.text())
self.config.tr_y = int(dlg.tr_y.text())
self.config.width = int(dlg.width.text())
self.config.depth = int(dlg.depth.text())
self.config.video_source = dlg.video_source.text()
self.config.save()
def update_image(self):
frame = self.camera.get_frame()
frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
#self.image_view.setImage(frame.T)
image_item = ImageItem(frame)
self.image_view.addItem(image_item)
#height, width, channel = frame.shape
#bytesPerLine = 3 * width
#qimg = QImage(frame.data, width, height, bytesPerLine, QImage.Format_RGB888).rgbSwapped()
#self.label_image.setPixmap(QPixmap(qimg))
#self.update()
#print(height, width)
def update_movie(self):
#print(self.camera.last_frame.shape)
image_item = ImageItem(self.camera.last_frame)
self.image_view.addItem(image_item)
#self.image_view.setImage(self.camera.last_frame.T)
def update_brightness(self, value):
value /= 10
self.camera.set_brightness(value)
def start_movie(self):
self.movie_thread = MovieThread(self.camera, self.net, self.config)
self.movie_thread.start()
self.update_timer.start(30)
class MovieThread(QThread):
def __init__(self, camera, net, config):
super().__init__()
self.camera = camera
self.net = net
self.config = config
def run(self):
#self.camera.acquire_movie(500)
self.camera.detect_in_movie(500, self.net, self.config)
if __name__ == '__main__':
app = QApplication([])
window = StartWindow()
window.show()
app.exit(app.exec_()) | view.py | import numpy as np
from PyQt5.QtCore import Qt, QThread, QTimer
from PyQt5.QtGui import QPixmap, QFont, QImage, QCursor, QIntValidator
from PyQt5.QtWidgets import QMainWindow, QWidget, QPushButton, QHBoxLayout, QApplication, QLabel, \
QDialog, QDialogButtonBox, QVBoxLayout, QLineEdit
from pyqtgraph import ImageView, PlotWidget, GraphicsView, ImageItem
import cv2
class PictureView(GraphicsView):
def __init__(self, *args, **kwargs):
super(PictureView, self).__init__(*args, **kwargs)
def mousePressEvent(self, event):
print(event.pos())
def mouseReleaseEvent(self, event):
cursor = QCursor()
print(cursor.pos())
class ConfigDialog(QDialog):
def __init__(self, *args, **kwargs):
super(ConfigDialog, self).__init__(*args, **kwargs)
self.setWindowTitle("Region of Interest Configuration")
QBtn = QDialogButtonBox.Save | QDialogButtonBox.Cancel
self.buttonBox = QDialogButtonBox(QBtn)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
validator = QIntValidator(0, 1500)
self.layout = QVBoxLayout()
h_layout_1 = QHBoxLayout()
h_layout_2 = QHBoxLayout()
h_layout_1.addWidget(QLabel("Video Source:"))
self.video_source = QLineEdit()
h_layout_1.addWidget(self.video_source)
v_layout_1 = QVBoxLayout()
v_layout_1.addWidget(QLabel("Bottom Left - pixel (x, y):"))
v_layout_1.addWidget(QLabel("Bottom Right - pixel (x, y):"))
v_layout_1.addWidget(QLabel("Top Right - pixel (x, y):"))
v_layout_1.addWidget(QLabel("Top Left - pixel (x, y):"))
v_layout_1.addWidget(QLabel("Dimension - cm (width, depth):"))
h_layout_2.addLayout(v_layout_1)
v_layout_2 = QVBoxLayout()
self.bl_x = QLineEdit()
self.bl_x.setValidator(validator)
v_layout_2.addWidget(self.bl_x)
self.br_x = QLineEdit()
self.br_x.setValidator(validator)
v_layout_2.addWidget(self.br_x)
self.tr_x = QLineEdit()
self.tr_x.setValidator(validator)
v_layout_2.addWidget(self.tr_x)
self.tl_x = QLineEdit()
self.tl_x.setValidator(validator)
v_layout_2.addWidget(self.tl_x)
self.width = QLineEdit()
self.width.setValidator(validator)
v_layout_2.addWidget(self.width)
h_layout_2.addLayout(v_layout_2)
v_layout_3 = QVBoxLayout()
self.bl_y = QLineEdit()
self.bl_y.setValidator(validator)
v_layout_3.addWidget(self.bl_y)
self.br_y = QLineEdit()
self.br_y.setValidator(validator)
v_layout_3.addWidget(self.br_y)
self.tr_y = QLineEdit()
self.tr_y.setValidator(validator)
v_layout_3.addWidget(self.tr_y)
self.tl_y = QLineEdit()
self.tl_y.setValidator(validator)
v_layout_3.addWidget(self.tl_y)
self.depth = QLineEdit()
self.depth.setValidator(validator)
v_layout_3.addWidget(self.depth)
h_layout_2.addLayout(v_layout_3)
self.layout.addLayout(h_layout_1)
self.layout.addLayout(h_layout_2)
self.layout.addWidget(self.buttonBox)
self.setLayout(self.layout)
class StartWindow(QMainWindow):
def __init__(self, camera = None, net = None, config = None, image_width = 950):
super().__init__()
self.camera = camera
self.net = net
self.config = config
self.image_width = image_width
self.setFixedWidth(image_width + 78)
self.setFixedHeight(780)
self.central_widget = QWidget(self)
self.label_logo = QLabel(self.central_widget)
logo = QPixmap("logo.png")
self.label_logo.setPixmap(logo)
self.label_logo.setGeometry(20,20,181,81)
self.label_logo.setScaledContents(True)
self.label_logo_2 = QLabel(self.central_widget)
logo_2 = QPixmap("logo_2.png")
self.label_logo_2.setPixmap(logo_2)
self.label_logo_2.setGeometry(670,30,206,61)
self.label_logo_2.setScaledContents(True)
self.button_config = QPushButton('Configuration', self.central_widget)
self.button_config.setGeometry(240,30,191,61)
font = QFont()
font.setPointSize(24)
self.button_config.setFont(font)
self.button_config.clicked.connect(self.start_config)
self.button_detection = QPushButton('Start Detection', self.central_widget)
self.button_detection.setGeometry(450,30,191,61)
font = QFont()
font.setPointSize(24)
self.button_detection.setFont(font)
self.button_detection.clicked.connect(self.start_movie)
#self.label_image = QLabel(self.central_widget)
self.image_view = PictureView(self.central_widget)
self.image_view.setGeometry(39,110,image_width,630)
#self.image_view.hideAxis('left')
#self.image_view.hideAxis('bottom')
self.image_view.setStyleSheet("border :1px solid black;")
#self.label_image.setGeometry(40,110,1067,600)
#self.label_image.setScaledContents(True)
#self.label_image.setStyleSheet("border :1px solid black;")
self.setCentralWidget(self.central_widget)
self.update_timer = QTimer()
self.update_timer.timeout.connect(self.update_movie)
def start_config(self):
dlg = ConfigDialog(self)
dlg.bl_x.setText(str(self.config.bl_x))
dlg.bl_y.setText(str(self.config.bl_y))
dlg.br_x.setText(str(self.config.br_x))
dlg.br_y.setText(str(self.config.br_y))
dlg.tl_x.setText(str(self.config.tl_x))
dlg.tl_y.setText(str(self.config.tl_y))
dlg.tr_x.setText(str(self.config.tr_x))
dlg.tr_y.setText(str(self.config.tr_y))
dlg.width.setText(str(self.config.width))
dlg.depth.setText(str(self.config.depth))
dlg.video_source.setText(self.config.video_source)
if dlg.exec_():
self.config.bl_x = int(dlg.bl_x.text())
self.config.bl_y = int(dlg.bl_y.text())
self.config.br_x = int(dlg.br_x.text())
self.config.br_y = int(dlg.br_y.text())
self.config.tl_x = int(dlg.tl_x.text())
self.config.tl_y = int(dlg.tl_y.text())
self.config.tr_x = int(dlg.tr_x.text())
self.config.tr_y = int(dlg.tr_y.text())
self.config.width = int(dlg.width.text())
self.config.depth = int(dlg.depth.text())
self.config.video_source = dlg.video_source.text()
self.config.save()
def update_image(self):
frame = self.camera.get_frame()
frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
#self.image_view.setImage(frame.T)
image_item = ImageItem(frame)
self.image_view.addItem(image_item)
#height, width, channel = frame.shape
#bytesPerLine = 3 * width
#qimg = QImage(frame.data, width, height, bytesPerLine, QImage.Format_RGB888).rgbSwapped()
#self.label_image.setPixmap(QPixmap(qimg))
#self.update()
#print(height, width)
def update_movie(self):
#print(self.camera.last_frame.shape)
image_item = ImageItem(self.camera.last_frame)
self.image_view.addItem(image_item)
#self.image_view.setImage(self.camera.last_frame.T)
def update_brightness(self, value):
value /= 10
self.camera.set_brightness(value)
def start_movie(self):
self.movie_thread = MovieThread(self.camera, self.net, self.config)
self.movie_thread.start()
self.update_timer.start(30)
class MovieThread(QThread):
def __init__(self, camera, net, config):
super().__init__()
self.camera = camera
self.net = net
self.config = config
def run(self):
#self.camera.acquire_movie(500)
self.camera.detect_in_movie(500, self.net, self.config)
if __name__ == '__main__':
app = QApplication([])
window = StartWindow()
window.show()
app.exit(app.exec_()) | 0.511717 | 0.10581 |
import datetime
import os
import time
import pytest
from xxhash import xxh3_128
from ocdskingfisherarchive.crawl import Crawl
from ocdskingfisherarchive.exceptions import FutureDataVersionError, SourceMismatchError
from tests import crawl_fixture, create_crawl_directory, path
with open(path('data.json'), 'rb') as f:
checksum = xxh3_128(f.read()).hexdigest()
size = 239
current_time = time.time()
@pytest.mark.parametrize('mtime, expected', [
(current_time - 604800 + 60, 0),
(current_time - 604800 - 60, 1), # change if tests are slow
])
def test_all(mtime, expected, tmpdir, caplog):
create_crawl_directory(tmpdir, ['data.json'], 'log_error1.log')
os.utime(tmpdir.join('data', 'scotland', '20200902_052458'), (mtime, mtime))
crawls = list(Crawl.all(tmpdir.join('data'), tmpdir.join('logs', 'kingfisher')))
assert len(crawls) == expected
if expected:
assert crawls[0].source_id == 'scotland'
assert crawls[0].data_version == datetime.datetime(2020, 9, 2, 5, 24, 58)
assert crawls[0].data_directory == tmpdir.join('data')
assert crawls[0].scrapy_log_file.name == tmpdir.join('logs', 'kingfisher', 'scotland',
'307e8331edc801c691e21690db130256.log')
def test_all_not_existing(tmpdir):
assert list(Crawl.all(tmpdir, None)) == []
def test_all_spider_file(tmpdir):
file = tmpdir.join('source_id')
file.write('content')
assert list(Crawl.all(tmpdir, None)) == []
def test_all_spider_sample(tmpdir):
create_crawl_directory(tmpdir, ['data.json'], 'log_error1.log', source_id='scotland_sample')
assert list(Crawl.all(tmpdir.join('data'), tmpdir.join('logs', 'kingfisher'))) == []
def test_all_crawl_file(tmpdir):
file = tmpdir.mkdir('source_id').join('20200902_052458')
file.write('content')
assert list(Crawl.all(tmpdir, None)) == []
def test_all_crawl_format(tmpdir):
tmpdir.mkdir('source_id').mkdir('directory')
assert list(Crawl.all(tmpdir, None)) == []
@pytest.mark.parametrize('directory, expected', ([
('20200902_052458', datetime.datetime(2020, 9, 2, 5, 24, 58)),
('text', None),
]))
def test_parse_data_version(directory, expected):
assert Crawl.parse_data_version(directory) == expected
def test_str(tmpdir):
crawl = Crawl('scotland', '20200902_052458', tmpdir, None)
assert str(crawl) == os.path.join('scotland', '20200902_052458')
def test_directory(tmpdir):
crawl = Crawl('scotland', '20200902_052458', tmpdir, None)
assert crawl.local_directory == str(tmpdir.join('scotland', '20200902_052458'))
@pytest.mark.parametrize('data_files, log_file, expected', [
(None, 'log1.log', 'no_data_directory'),
([], 'log1.log', 'no_data_files'),
(['data.json'], None, 'no_log_file'),
(['data.json'], 'log_in_progress1.log', 'not_finished'),
(['data.json'], 'log_sample1.log', 'not_complete'),
(['data.json'], 'log_not_clean_enough.log', 'not_clean_enough'),
(['data.json'], 'log_error1.log', None),
])
def test_reject_reason(data_files, log_file, expected, tmpdir):
create_crawl_directory(tmpdir, data_files, log_file)
crawl = crawl_fixture(tmpdir)
assert crawl.reject_reason == expected
def test_reject_reason_cached(tmpdir):
create_crawl_directory(tmpdir, None, 'log1.log')
crawl = crawl_fixture(tmpdir)
assert crawl.reject_reason == 'no_data_directory'
assert crawl.reject_reason == 'no_data_directory'
@pytest.mark.parametrize('data_files, log_file, remote, expected', [
# Same month.
# Identical
(['data.json'], 'log_error1.log',
{'data_version': '20200901_000000', 'checksum': checksum, 'bytes': size, 'errors_count': 1, 'files_count': 2},
(False, 'same_period')),
# Same bytes
(['data.json'], 'log_error1.log',
{'data_version': '20200901_000000', 'bytes': size, 'errors_count': 2, 'files_count': 1},
(False, 'same_period')),
# More bytes, but not 50% more bytes
(['data.json'], 'log_error1.log',
{'data_version': '20200901_000000', 'bytes': size - 1, 'errors_count': 1, 'files_count': 2},
(False, 'same_period')),
# More bytes, but not 50% more files
(['data.json'], 'log_error1.log',
{'data_version': '20200901_000000', 'bytes': size - 1, 'errors_count': 1, 'files_count': 1.5},
(False, 'same_period')),
# More bytes, but less clean
(['data.json'], 'log_error1.log',
{'data_version': '20200901_000000', 'bytes': size - 1, 'errors_count': 0, 'files_count': 2},
(False, 'same_period')),
# More bytes, and 50% more bytes
(['data.json'], 'log_error1.log',
{'data_version': '20200901_000000', 'bytes': int(size // 1.5), 'errors_count': 1, 'files_count': 2},
(True, 'same_period_more_bytes')),
# More bytes, and 50% more files
(['data.json'], 'log_error1.log',
{'data_version': '20200901_000000', 'bytes': size - 1, 'errors_count': 1, 'files_count': 1},
(True, 'same_period_more_files')),
# More bytes, and more clean
(['data.json'], 'log_error1.log',
{'data_version': '20200901_000000', 'bytes': size - 1, 'errors_count': 2, 'files_count': 2},
(True, 'same_period_more_clean')),
# Earlier month.
# Identical
(['data.json'], 'log_error1.log',
{'data_version': '20200101_000000', 'checksum': checksum, 'bytes': size, 'errors_count': 1, 'files_count': 2},
(False, '2020_1_not_distinct')),
# Same errors
(['data.json'], 'log_error1.log',
{'data_version': '20200101_000000', 'bytes': size, 'errors_count': 1, 'files_count': 2},
(True, 'new_period')),
# More errors, fewer files, same bytes
(['data.json'], 'log_error1.log',
{'data_version': '20200101_000000', 'bytes': size, 'errors_count': 0, 'files_count': 3},
(False, '2020_1_not_distinct_maybe')),
# More errors, same files, fewer bytes
(['data.json'], 'log_error1.log',
{'data_version': '20200101_000000', 'bytes': size + 1, 'errors_count': 0, 'files_count': 2},
(False, '2020_1_not_distinct_maybe')),
])
def test_compare(data_files, log_file, remote, expected, archiver, tmpdir, caplog, monkeypatch):
create_crawl_directory(tmpdir, data_files, log_file)
remote['source_id'] = 'scotland'
remote.setdefault('checksum', 'other')
actual = crawl_fixture(tmpdir).compare(Crawl(**remote))
assert actual == expected
def test_compare_bad_source_id():
with pytest.raises(SourceMismatchError) as excinfo:
Crawl('scotland', '20200101_000000').compare(Crawl('united_kingdom', '20200101_000000'))
assert str(excinfo.value) == 'Crawl source mismatch: scotland != united_kingdom'
def test_compare_bad_data_version():
with pytest.raises(FutureDataVersionError) as excinfo:
Crawl('scotland', '20200101_000000').compare(Crawl('scotland', '20200201_000000'))
assert str(excinfo.value) == 'Future data version: 2020-02-01 00:00:00 > 2020-01-01 00:00:00'
def test_checksum(tmpdir):
file = tmpdir.join('test.json')
file.write('{"id": 1}')
spider_directory = tmpdir.mkdir('scotland')
file = spider_directory.join('test.json')
file.write('{"id": 1}')
crawl_directory = spider_directory.mkdir('20200902_052458')
file = crawl_directory.join('test.json')
file.write('{"id": 1}')
sub_directory = crawl_directory.mkdir('child')
file = sub_directory.join('test.json')
file.write('{"id": 100}')
crawl = Crawl('scotland', '20200902_052458', tmpdir, None)
assert crawl.checksum == '06bbee76269a3bd770704840395e8e10'
def test_checksum_empty(tmpdir):
crawl = Crawl('scotland', '20200902_052458', tmpdir, None)
assert crawl.checksum == '99aa06d3014798d86001c324468d497f'
def test_bytes(tmpdir):
file = tmpdir.join('test.json')
file.write('{"id": 1}')
spider_directory = tmpdir.mkdir('scotland')
file = spider_directory.join('test.json')
file.write('{"id": 1}')
crawl_directory = spider_directory.mkdir('20200902_052458')
file = crawl_directory.join('test.json')
file.write('{"id": 1}') # 9
sub_directory = crawl_directory.mkdir('child')
file = sub_directory.join('test.json')
file.write('{"id": 100}') # 11
crawl = Crawl('scotland', '20200902_052458', tmpdir, None)
assert crawl.bytes == 20
def test_bytes_empty(tmpdir):
crawl = Crawl('scotland', '20200902_052458', tmpdir, None)
assert crawl.bytes == 0
def test_asdict(tmpdir):
create_crawl_directory(tmpdir, ['data.json'], 'log_error1.log')
assert crawl_fixture(tmpdir).asdict() == {
'id': 'scotland/20200902_052458',
'source_id': 'scotland',
'data_version': '20200902_052458',
'bytes': None,
'checksum': None,
'files_count': None,
'errors_count': None,
'reject_reason': None,
'archived': None,
}
def test_asdict_cached(tmpdir):
create_crawl_directory(tmpdir, ['data.json'], 'log_error1.log')
assert crawl_fixture(tmpdir).asdict(cached=False) == {
'id': 'scotland/20200902_052458',
'source_id': 'scotland',
'data_version': '20200902_052458',
'bytes': 239,
'checksum': 'eba6c0bd00d10c54c3793ee13bcc114b',
'files_count': 2,
'errors_count': 1,
'reject_reason': None,
'archived': None,
} | tests/test_crawl.py | import datetime
import os
import time
import pytest
from xxhash import xxh3_128
from ocdskingfisherarchive.crawl import Crawl
from ocdskingfisherarchive.exceptions import FutureDataVersionError, SourceMismatchError
from tests import crawl_fixture, create_crawl_directory, path
with open(path('data.json'), 'rb') as f:
checksum = xxh3_128(f.read()).hexdigest()
size = 239
current_time = time.time()
@pytest.mark.parametrize('mtime, expected', [
(current_time - 604800 + 60, 0),
(current_time - 604800 - 60, 1), # change if tests are slow
])
def test_all(mtime, expected, tmpdir, caplog):
create_crawl_directory(tmpdir, ['data.json'], 'log_error1.log')
os.utime(tmpdir.join('data', 'scotland', '20200902_052458'), (mtime, mtime))
crawls = list(Crawl.all(tmpdir.join('data'), tmpdir.join('logs', 'kingfisher')))
assert len(crawls) == expected
if expected:
assert crawls[0].source_id == 'scotland'
assert crawls[0].data_version == datetime.datetime(2020, 9, 2, 5, 24, 58)
assert crawls[0].data_directory == tmpdir.join('data')
assert crawls[0].scrapy_log_file.name == tmpdir.join('logs', 'kingfisher', 'scotland',
'307e8331edc801c691e21690db130256.log')
def test_all_not_existing(tmpdir):
assert list(Crawl.all(tmpdir, None)) == []
def test_all_spider_file(tmpdir):
file = tmpdir.join('source_id')
file.write('content')
assert list(Crawl.all(tmpdir, None)) == []
def test_all_spider_sample(tmpdir):
create_crawl_directory(tmpdir, ['data.json'], 'log_error1.log', source_id='scotland_sample')
assert list(Crawl.all(tmpdir.join('data'), tmpdir.join('logs', 'kingfisher'))) == []
def test_all_crawl_file(tmpdir):
file = tmpdir.mkdir('source_id').join('20200902_052458')
file.write('content')
assert list(Crawl.all(tmpdir, None)) == []
def test_all_crawl_format(tmpdir):
tmpdir.mkdir('source_id').mkdir('directory')
assert list(Crawl.all(tmpdir, None)) == []
@pytest.mark.parametrize('directory, expected', ([
('20200902_052458', datetime.datetime(2020, 9, 2, 5, 24, 58)),
('text', None),
]))
def test_parse_data_version(directory, expected):
assert Crawl.parse_data_version(directory) == expected
def test_str(tmpdir):
crawl = Crawl('scotland', '20200902_052458', tmpdir, None)
assert str(crawl) == os.path.join('scotland', '20200902_052458')
def test_directory(tmpdir):
crawl = Crawl('scotland', '20200902_052458', tmpdir, None)
assert crawl.local_directory == str(tmpdir.join('scotland', '20200902_052458'))
@pytest.mark.parametrize('data_files, log_file, expected', [
(None, 'log1.log', 'no_data_directory'),
([], 'log1.log', 'no_data_files'),
(['data.json'], None, 'no_log_file'),
(['data.json'], 'log_in_progress1.log', 'not_finished'),
(['data.json'], 'log_sample1.log', 'not_complete'),
(['data.json'], 'log_not_clean_enough.log', 'not_clean_enough'),
(['data.json'], 'log_error1.log', None),
])
def test_reject_reason(data_files, log_file, expected, tmpdir):
create_crawl_directory(tmpdir, data_files, log_file)
crawl = crawl_fixture(tmpdir)
assert crawl.reject_reason == expected
def test_reject_reason_cached(tmpdir):
create_crawl_directory(tmpdir, None, 'log1.log')
crawl = crawl_fixture(tmpdir)
assert crawl.reject_reason == 'no_data_directory'
assert crawl.reject_reason == 'no_data_directory'
@pytest.mark.parametrize('data_files, log_file, remote, expected', [
# Same month.
# Identical
(['data.json'], 'log_error1.log',
{'data_version': '20200901_000000', 'checksum': checksum, 'bytes': size, 'errors_count': 1, 'files_count': 2},
(False, 'same_period')),
# Same bytes
(['data.json'], 'log_error1.log',
{'data_version': '20200901_000000', 'bytes': size, 'errors_count': 2, 'files_count': 1},
(False, 'same_period')),
# More bytes, but not 50% more bytes
(['data.json'], 'log_error1.log',
{'data_version': '20200901_000000', 'bytes': size - 1, 'errors_count': 1, 'files_count': 2},
(False, 'same_period')),
# More bytes, but not 50% more files
(['data.json'], 'log_error1.log',
{'data_version': '20200901_000000', 'bytes': size - 1, 'errors_count': 1, 'files_count': 1.5},
(False, 'same_period')),
# More bytes, but less clean
(['data.json'], 'log_error1.log',
{'data_version': '20200901_000000', 'bytes': size - 1, 'errors_count': 0, 'files_count': 2},
(False, 'same_period')),
# More bytes, and 50% more bytes
(['data.json'], 'log_error1.log',
{'data_version': '20200901_000000', 'bytes': int(size // 1.5), 'errors_count': 1, 'files_count': 2},
(True, 'same_period_more_bytes')),
# More bytes, and 50% more files
(['data.json'], 'log_error1.log',
{'data_version': '20200901_000000', 'bytes': size - 1, 'errors_count': 1, 'files_count': 1},
(True, 'same_period_more_files')),
# More bytes, and more clean
(['data.json'], 'log_error1.log',
{'data_version': '20200901_000000', 'bytes': size - 1, 'errors_count': 2, 'files_count': 2},
(True, 'same_period_more_clean')),
# Earlier month.
# Identical
(['data.json'], 'log_error1.log',
{'data_version': '20200101_000000', 'checksum': checksum, 'bytes': size, 'errors_count': 1, 'files_count': 2},
(False, '2020_1_not_distinct')),
# Same errors
(['data.json'], 'log_error1.log',
{'data_version': '20200101_000000', 'bytes': size, 'errors_count': 1, 'files_count': 2},
(True, 'new_period')),
# More errors, fewer files, same bytes
(['data.json'], 'log_error1.log',
{'data_version': '20200101_000000', 'bytes': size, 'errors_count': 0, 'files_count': 3},
(False, '2020_1_not_distinct_maybe')),
# More errors, same files, fewer bytes
(['data.json'], 'log_error1.log',
{'data_version': '20200101_000000', 'bytes': size + 1, 'errors_count': 0, 'files_count': 2},
(False, '2020_1_not_distinct_maybe')),
])
def test_compare(data_files, log_file, remote, expected, archiver, tmpdir, caplog, monkeypatch):
create_crawl_directory(tmpdir, data_files, log_file)
remote['source_id'] = 'scotland'
remote.setdefault('checksum', 'other')
actual = crawl_fixture(tmpdir).compare(Crawl(**remote))
assert actual == expected
def test_compare_bad_source_id():
with pytest.raises(SourceMismatchError) as excinfo:
Crawl('scotland', '20200101_000000').compare(Crawl('united_kingdom', '20200101_000000'))
assert str(excinfo.value) == 'Crawl source mismatch: scotland != united_kingdom'
def test_compare_bad_data_version():
with pytest.raises(FutureDataVersionError) as excinfo:
Crawl('scotland', '20200101_000000').compare(Crawl('scotland', '20200201_000000'))
assert str(excinfo.value) == 'Future data version: 2020-02-01 00:00:00 > 2020-01-01 00:00:00'
def test_checksum(tmpdir):
file = tmpdir.join('test.json')
file.write('{"id": 1}')
spider_directory = tmpdir.mkdir('scotland')
file = spider_directory.join('test.json')
file.write('{"id": 1}')
crawl_directory = spider_directory.mkdir('20200902_052458')
file = crawl_directory.join('test.json')
file.write('{"id": 1}')
sub_directory = crawl_directory.mkdir('child')
file = sub_directory.join('test.json')
file.write('{"id": 100}')
crawl = Crawl('scotland', '20200902_052458', tmpdir, None)
assert crawl.checksum == '06bbee76269a3bd770704840395e8e10'
def test_checksum_empty(tmpdir):
crawl = Crawl('scotland', '20200902_052458', tmpdir, None)
assert crawl.checksum == '99aa06d3014798d86001c324468d497f'
def test_bytes(tmpdir):
file = tmpdir.join('test.json')
file.write('{"id": 1}')
spider_directory = tmpdir.mkdir('scotland')
file = spider_directory.join('test.json')
file.write('{"id": 1}')
crawl_directory = spider_directory.mkdir('20200902_052458')
file = crawl_directory.join('test.json')
file.write('{"id": 1}') # 9
sub_directory = crawl_directory.mkdir('child')
file = sub_directory.join('test.json')
file.write('{"id": 100}') # 11
crawl = Crawl('scotland', '20200902_052458', tmpdir, None)
assert crawl.bytes == 20
def test_bytes_empty(tmpdir):
crawl = Crawl('scotland', '20200902_052458', tmpdir, None)
assert crawl.bytes == 0
def test_asdict(tmpdir):
create_crawl_directory(tmpdir, ['data.json'], 'log_error1.log')
assert crawl_fixture(tmpdir).asdict() == {
'id': 'scotland/20200902_052458',
'source_id': 'scotland',
'data_version': '20200902_052458',
'bytes': None,
'checksum': None,
'files_count': None,
'errors_count': None,
'reject_reason': None,
'archived': None,
}
def test_asdict_cached(tmpdir):
create_crawl_directory(tmpdir, ['data.json'], 'log_error1.log')
assert crawl_fixture(tmpdir).asdict(cached=False) == {
'id': 'scotland/20200902_052458',
'source_id': 'scotland',
'data_version': '20200902_052458',
'bytes': 239,
'checksum': 'eba6c0bd00d10c54c3793ee13bcc114b',
'files_count': 2,
'errors_count': 1,
'reject_reason': None,
'archived': None,
} | 0.379493 | 0.405037 |
from typing import Any, Callable, Dict, List, Optional
import os
import torch
from torch.utils.data import Dataset
from catalyst.contrib.datasets.functional import (
download_and_extract_archive,
read_image_file,
read_label_file,
)
from catalyst.data.dataset.metric_learning import (
MetricLearningTrainDataset,
QueryGalleryDataset,
)
class MNIST(Dataset):
"""`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset."""
_repr_indent = 4
resources = [
(
"http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz",
"f68b3c2dcbeaaa9fbdd348bbdeb94873",
),
(
"http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz",
"d53e105ee54ea40749a09fcbcd1e9432",
),
(
"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz",
"9fb629c4189551a2d022fa330f9573f3",
),
(
"http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz",
"ec29112dd5afa0611ce80d1b7f02629c",
),
]
training_file = "training.pt"
test_file = "test.pt"
classes = [
"0 - zero",
"1 - one",
"2 - two",
"3 - three",
"4 - four",
"5 - five",
"6 - six",
"7 - seven",
"8 - eight",
"9 - nine",
]
def __init__(
self,
root,
train=True,
transform=None,
target_transform=None,
download=False,
):
"""
Args:
root: Root directory of dataset where
``MNIST/processed/training.pt``
and ``MNIST/processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from
``training.pt``, otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from
the internet and puts it in root directory. If dataset
is already downloaded, it is not downloaded again.
transform (callable, optional): A function/transform that
takes in an image and returns a transformed version.
target_transform (callable, optional): A function/transform
that takes in the target and transforms it.
"""
if isinstance(root, torch._six.string_classes): # noqa: WPS437
root = os.path.expanduser(root)
self.root = root
self.train = train # training set or test set
self.transform = transform
self.target_transform = target_transform
if download:
self.download()
if not self._check_exists():
raise RuntimeError(
"Dataset not found. You can use download=True to download it"
)
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets = torch.load(
os.path.join(self.processed_folder, data_file)
)
def __getitem__(self, index):
"""
Args:
index: Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index].numpy(), int(self.targets[index])
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
"""@TODO: Docs. Contribution is welcome."""
return len(self.data)
def __repr__(self):
"""@TODO: Docs. Contribution is welcome."""
head = "Dataset " + self.__class__.__name__
body = ["Number of datapoints: {}".format(self.__len__())]
if self.root is not None:
body.append("Root location: {}".format(self.root))
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return "\n".join(lines)
@property
def raw_folder(self):
"""@TODO: Docs. Contribution is welcome."""
return os.path.join(self.root, self.__class__.__name__, "raw")
@property
def processed_folder(self):
"""@TODO: Docs. Contribution is welcome."""
return os.path.join(self.root, self.__class__.__name__, "processed")
@property
def class_to_idx(self):
"""@TODO: Docs. Contribution is welcome."""
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self):
return os.path.exists(
os.path.join(self.processed_folder, self.training_file)
) and os.path.exists(
os.path.join(self.processed_folder, self.test_file)
)
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder."""
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
# download files
for url, md5 in self.resources:
filename = url.rpartition("/")[2]
download_and_extract_archive(
url, download_root=self.raw_folder, filename=filename, md5=md5
)
# process and save as torch files
print("Processing...")
training_set = (
read_image_file(
os.path.join(self.raw_folder, "train-images-idx3-ubyte")
),
read_label_file(
os.path.join(self.raw_folder, "train-labels-idx1-ubyte")
),
)
test_set = (
read_image_file(
os.path.join(self.raw_folder, "t10k-images-idx3-ubyte")
),
read_label_file(
os.path.join(self.raw_folder, "t10k-labels-idx1-ubyte")
),
)
with open(
os.path.join(self.processed_folder, self.training_file), "wb"
) as f:
torch.save(training_set, f)
with open(
os.path.join(self.processed_folder, self.test_file), "wb"
) as f:
torch.save(test_set, f)
print("Done!")
def extra_repr(self):
"""@TODO: Docs. Contribution is welcome."""
return "Split: {}".format("Train" if self.train is True else "Test")
class MnistMLDataset(MetricLearningTrainDataset, MNIST):
"""
Simple wrapper for MNIST dataset for metric learning train stage.
This dataset can be used only for training. For test stage
use MnistQGDataset.
For this dataset we use only training part of the MNIST and only
those images that are labeled as 0, 1, 2, 3, 4.
"""
_split = 5
classes = [
"0 - zero",
"1 - one",
"2 - two",
"3 - three",
"4 - four",
]
def __init__(self, **kwargs):
"""
Raises:
ValueError: if train argument is False (MnistMLDataset
should be used only for training)
"""
if "train" in kwargs:
if kwargs["train"] is False:
raise ValueError(
"MnistMLDataset can be used only for training stage."
)
else:
kwargs["train"] = True
super(MnistMLDataset, self).__init__(**kwargs)
self._filter()
def get_labels(self) -> List[int]:
"""
Returns:
labels of digits
"""
return self.targets.tolist()
def _filter(self) -> None:
"""Filter MNIST dataset: select images of 0, 1, 2, 3, 4 classes."""
mask = self.targets < self._split
self.data = self.data[mask]
self.targets = self.targets[mask]
class MnistQGDataset(QueryGalleryDataset):
"""
MNIST for metric learning with query and gallery split.
MnistQGDataset should be used for test stage.
For this dataset we used only test part of the MNIST and only
those images that are labeled as 5, 6, 7, 8, 9.
"""
_split = 5
classes = [
"5 - five",
"6 - six",
"7 - seven",
"8 - eight",
"9 - nine",
]
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
gallery_fraq: Optional[float] = 0.2,
) -> None:
"""
Args:
root: root directory for storing dataset
transform: transform
gallery_fraq: gallery size
"""
self._mnist = MNIST(
root, train=False, download=True, transform=transform
)
self._filter()
self._gallery_size = int(gallery_fraq * len(self._mnist))
self._query_size = len(self._mnist) - self._gallery_size
self._is_query = torch.zeros(len(self._mnist)).type(torch.bool)
self._is_query[: self._query_size] = True
def _filter(self) -> None:
"""Filter MNIST dataset: select images of 5, 6, 7, 8, 9 classes."""
mask = self._mnist.targets >= self._split
self._mnist.data = self._mnist.data[mask]
self._mnist.targets = self._mnist.targets[mask]
def __getitem__(self, idx: int) -> Dict[str, Any]:
"""
Get item method for dataset
Args:
idx: index of the object
Returns:
Dict with features, targets and is_query flag
"""
image, label = self._mnist[idx]
return {
"features": image,
"targets": label,
"is_query": self._is_query[idx],
}
def __len__(self) -> int:
"""Length"""
return len(self._mnist)
def __repr__(self) -> None:
"""Print info about the dataset"""
return self._mnist.__repr__()
@property
def gallery_size(self) -> int:
"""Query Gallery dataset should have gallery_size property"""
return self._gallery_size
@property
def query_size(self) -> int:
"""Query Gallery dataset should have query_size property"""
return self._query_size
@property
def data(self) -> torch.Tensor:
"""Images from MNIST"""
return self._mnist.data
@property
def targets(self) -> torch.Tensor:
"""Labels of digits"""
return self._mnist.targets
__all__ = ["MNIST", "MnistMLDataset", "MnistQGDataset"] | catalyst/contrib/datasets/mnist.py | from typing import Any, Callable, Dict, List, Optional
import os
import torch
from torch.utils.data import Dataset
from catalyst.contrib.datasets.functional import (
download_and_extract_archive,
read_image_file,
read_label_file,
)
from catalyst.data.dataset.metric_learning import (
MetricLearningTrainDataset,
QueryGalleryDataset,
)
class MNIST(Dataset):
"""`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset."""
_repr_indent = 4
resources = [
(
"http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz",
"f68b3c2dcbeaaa9fbdd348bbdeb94873",
),
(
"http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz",
"d53e105ee54ea40749a09fcbcd1e9432",
),
(
"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz",
"9fb629c4189551a2d022fa330f9573f3",
),
(
"http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz",
"ec29112dd5afa0611ce80d1b7f02629c",
),
]
training_file = "training.pt"
test_file = "test.pt"
classes = [
"0 - zero",
"1 - one",
"2 - two",
"3 - three",
"4 - four",
"5 - five",
"6 - six",
"7 - seven",
"8 - eight",
"9 - nine",
]
def __init__(
self,
root,
train=True,
transform=None,
target_transform=None,
download=False,
):
"""
Args:
root: Root directory of dataset where
``MNIST/processed/training.pt``
and ``MNIST/processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from
``training.pt``, otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from
the internet and puts it in root directory. If dataset
is already downloaded, it is not downloaded again.
transform (callable, optional): A function/transform that
takes in an image and returns a transformed version.
target_transform (callable, optional): A function/transform
that takes in the target and transforms it.
"""
if isinstance(root, torch._six.string_classes): # noqa: WPS437
root = os.path.expanduser(root)
self.root = root
self.train = train # training set or test set
self.transform = transform
self.target_transform = target_transform
if download:
self.download()
if not self._check_exists():
raise RuntimeError(
"Dataset not found. You can use download=True to download it"
)
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets = torch.load(
os.path.join(self.processed_folder, data_file)
)
def __getitem__(self, index):
"""
Args:
index: Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index].numpy(), int(self.targets[index])
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
"""@TODO: Docs. Contribution is welcome."""
return len(self.data)
def __repr__(self):
"""@TODO: Docs. Contribution is welcome."""
head = "Dataset " + self.__class__.__name__
body = ["Number of datapoints: {}".format(self.__len__())]
if self.root is not None:
body.append("Root location: {}".format(self.root))
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return "\n".join(lines)
@property
def raw_folder(self):
"""@TODO: Docs. Contribution is welcome."""
return os.path.join(self.root, self.__class__.__name__, "raw")
@property
def processed_folder(self):
"""@TODO: Docs. Contribution is welcome."""
return os.path.join(self.root, self.__class__.__name__, "processed")
@property
def class_to_idx(self):
"""@TODO: Docs. Contribution is welcome."""
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self):
return os.path.exists(
os.path.join(self.processed_folder, self.training_file)
) and os.path.exists(
os.path.join(self.processed_folder, self.test_file)
)
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder."""
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
# download files
for url, md5 in self.resources:
filename = url.rpartition("/")[2]
download_and_extract_archive(
url, download_root=self.raw_folder, filename=filename, md5=md5
)
# process and save as torch files
print("Processing...")
training_set = (
read_image_file(
os.path.join(self.raw_folder, "train-images-idx3-ubyte")
),
read_label_file(
os.path.join(self.raw_folder, "train-labels-idx1-ubyte")
),
)
test_set = (
read_image_file(
os.path.join(self.raw_folder, "t10k-images-idx3-ubyte")
),
read_label_file(
os.path.join(self.raw_folder, "t10k-labels-idx1-ubyte")
),
)
with open(
os.path.join(self.processed_folder, self.training_file), "wb"
) as f:
torch.save(training_set, f)
with open(
os.path.join(self.processed_folder, self.test_file), "wb"
) as f:
torch.save(test_set, f)
print("Done!")
def extra_repr(self):
"""@TODO: Docs. Contribution is welcome."""
return "Split: {}".format("Train" if self.train is True else "Test")
class MnistMLDataset(MetricLearningTrainDataset, MNIST):
"""
Simple wrapper for MNIST dataset for metric learning train stage.
This dataset can be used only for training. For test stage
use MnistQGDataset.
For this dataset we use only training part of the MNIST and only
those images that are labeled as 0, 1, 2, 3, 4.
"""
_split = 5
classes = [
"0 - zero",
"1 - one",
"2 - two",
"3 - three",
"4 - four",
]
def __init__(self, **kwargs):
"""
Raises:
ValueError: if train argument is False (MnistMLDataset
should be used only for training)
"""
if "train" in kwargs:
if kwargs["train"] is False:
raise ValueError(
"MnistMLDataset can be used only for training stage."
)
else:
kwargs["train"] = True
super(MnistMLDataset, self).__init__(**kwargs)
self._filter()
def get_labels(self) -> List[int]:
"""
Returns:
labels of digits
"""
return self.targets.tolist()
def _filter(self) -> None:
"""Filter MNIST dataset: select images of 0, 1, 2, 3, 4 classes."""
mask = self.targets < self._split
self.data = self.data[mask]
self.targets = self.targets[mask]
class MnistQGDataset(QueryGalleryDataset):
"""
MNIST for metric learning with query and gallery split.
MnistQGDataset should be used for test stage.
For this dataset we used only test part of the MNIST and only
those images that are labeled as 5, 6, 7, 8, 9.
"""
_split = 5
classes = [
"5 - five",
"6 - six",
"7 - seven",
"8 - eight",
"9 - nine",
]
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
gallery_fraq: Optional[float] = 0.2,
) -> None:
"""
Args:
root: root directory for storing dataset
transform: transform
gallery_fraq: gallery size
"""
self._mnist = MNIST(
root, train=False, download=True, transform=transform
)
self._filter()
self._gallery_size = int(gallery_fraq * len(self._mnist))
self._query_size = len(self._mnist) - self._gallery_size
self._is_query = torch.zeros(len(self._mnist)).type(torch.bool)
self._is_query[: self._query_size] = True
def _filter(self) -> None:
"""Filter MNIST dataset: select images of 5, 6, 7, 8, 9 classes."""
mask = self._mnist.targets >= self._split
self._mnist.data = self._mnist.data[mask]
self._mnist.targets = self._mnist.targets[mask]
def __getitem__(self, idx: int) -> Dict[str, Any]:
"""
Get item method for dataset
Args:
idx: index of the object
Returns:
Dict with features, targets and is_query flag
"""
image, label = self._mnist[idx]
return {
"features": image,
"targets": label,
"is_query": self._is_query[idx],
}
def __len__(self) -> int:
"""Length"""
return len(self._mnist)
def __repr__(self) -> None:
"""Print info about the dataset"""
return self._mnist.__repr__()
@property
def gallery_size(self) -> int:
"""Query Gallery dataset should have gallery_size property"""
return self._gallery_size
@property
def query_size(self) -> int:
"""Query Gallery dataset should have query_size property"""
return self._query_size
@property
def data(self) -> torch.Tensor:
"""Images from MNIST"""
return self._mnist.data
@property
def targets(self) -> torch.Tensor:
"""Labels of digits"""
return self._mnist.targets
__all__ = ["MNIST", "MnistMLDataset", "MnistQGDataset"] | 0.840062 | 0.394463 |
from __future__ import division
import math
import torch
from torch import nn
from torch.nn import functional as F
# Factorised NoisyLinear layer with bias
class NoisyLinear(nn.Module):
def __init__(self, in_features, out_features, std_init=0.5):
super(NoisyLinear, self).__init__()
self.module_name = 'noisy_linear'
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = nn.Parameter(torch.empty(out_features, in_features))
self.weight_sigma = nn.Parameter(torch.empty(out_features, in_features))
self.register_buffer('weight_epsilon', torch.empty(out_features, in_features))
self.bias_mu = nn.Parameter(torch.empty(out_features))
self.bias_sigma = nn.Parameter(torch.empty(out_features))
self.register_buffer('bias_epsilon', torch.empty(out_features))
self.reset_parameters()
self.reset_noise()
def reset_parameters(self):
mu_range = 1 / math.sqrt(self.in_features)
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))
def _scale_noise(self, size):
x = torch.randn(size)
return x.sign().mul_(x.abs().sqrt_())
def reset_noise(self):
epsilon_in = self._scale_noise(self.in_features)
epsilon_out = self._scale_noise(self.out_features)
self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
self.bias_epsilon.copy_(epsilon_out)
def forward(self, input):
if self.training:
return F.linear(input, self.weight_mu + self.weight_sigma * self.weight_epsilon, self.bias_mu + self.bias_sigma * self.bias_epsilon)
else:
return F.linear(input, self.weight_mu, self.bias_mu)
class DQN(nn.Module):
def __init__(self, args, action_space,curl=False):
super(DQN, self).__init__()
self.atoms = args.atoms
self.action_space = action_space
self.curl=curl
if args.architecture == 'canonical':
self.convs = nn.Sequential(nn.Conv2d(args.history_length, 32, 8, stride=4, padding=0), nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2, padding=0), nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1, padding=0), nn.ReLU())
self.conv_output_size = 3136
elif args.architecture == 'data-efficient':
self.convs = nn.Sequential(nn.Conv2d(args.history_length, 32, 5, stride=5, padding=0), nn.ReLU(),
nn.Conv2d(32, 64, 5, stride=5, padding=0), nn.ReLU())
self.conv_output_size = 576
self.fc_h_v = NoisyLinear(self.conv_output_size, args.hidden_size, std_init=args.noisy_std)
self.fc_h_a = NoisyLinear(self.conv_output_size, args.hidden_size, std_init=args.noisy_std)
self.fc_z_v = NoisyLinear(args.hidden_size, self.atoms, std_init=args.noisy_std)
self.fc_z_a = NoisyLinear(args.hidden_size, action_space * self.atoms, std_init=args.noisy_std)
self.W_h = nn.Parameter(torch.rand(self.conv_output_size, args.hidden_size))
self.W_c = nn.Parameter(torch.rand(args.hidden_size, 128))
self.b_h = nn.Parameter(torch.zeros(args.hidden_size))
self.b_c = nn.Parameter(torch.zeros(128))
self.W = nn.Parameter(torch.rand(128, 128))
self.ln1 = nn.LayerNorm(args.hidden_size)
self.ln2 = nn.LayerNorm(128)
def forward(self, x, log=False):
x = self.convs(x)
x = x.view(-1, self.conv_output_size)
v = self.fc_z_v(F.relu(self.fc_h_v(x))) # Value stream
a = self.fc_z_a(F.relu(self.fc_h_a(x))) # Advantage stream
h=None
if self.curl:
h = torch.matmul(x, self.W_h) + self.b_h # Contrastive head
h = self.ln1(h)
h = F.relu(h)
h = torch.matmul(h, self.W_c) + self.b_c # Contrastive head
h = self.ln2(h)
v, a = v.view(-1, 1, self.atoms), a.view(-1, self.action_space, self.atoms)
q = v + a - a.mean(1, keepdim=True) # Combine streams
if log: # Use log softmax for numerical stability
q = F.log_softmax(q, dim=2) # Log probabilities with action over second dimension
else:
q = F.softmax(q, dim=2) # Probabilities with action over second dimension
return q, h
def reset_noise(self):
for name, module in self.named_children():
if 'fc' in name:
module.reset_noise() | model.py | from __future__ import division
import math
import torch
from torch import nn
from torch.nn import functional as F
# Factorised NoisyLinear layer with bias
class NoisyLinear(nn.Module):
def __init__(self, in_features, out_features, std_init=0.5):
super(NoisyLinear, self).__init__()
self.module_name = 'noisy_linear'
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = nn.Parameter(torch.empty(out_features, in_features))
self.weight_sigma = nn.Parameter(torch.empty(out_features, in_features))
self.register_buffer('weight_epsilon', torch.empty(out_features, in_features))
self.bias_mu = nn.Parameter(torch.empty(out_features))
self.bias_sigma = nn.Parameter(torch.empty(out_features))
self.register_buffer('bias_epsilon', torch.empty(out_features))
self.reset_parameters()
self.reset_noise()
def reset_parameters(self):
mu_range = 1 / math.sqrt(self.in_features)
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))
def _scale_noise(self, size):
x = torch.randn(size)
return x.sign().mul_(x.abs().sqrt_())
def reset_noise(self):
epsilon_in = self._scale_noise(self.in_features)
epsilon_out = self._scale_noise(self.out_features)
self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
self.bias_epsilon.copy_(epsilon_out)
def forward(self, input):
if self.training:
return F.linear(input, self.weight_mu + self.weight_sigma * self.weight_epsilon, self.bias_mu + self.bias_sigma * self.bias_epsilon)
else:
return F.linear(input, self.weight_mu, self.bias_mu)
class DQN(nn.Module):
def __init__(self, args, action_space,curl=False):
super(DQN, self).__init__()
self.atoms = args.atoms
self.action_space = action_space
self.curl=curl
if args.architecture == 'canonical':
self.convs = nn.Sequential(nn.Conv2d(args.history_length, 32, 8, stride=4, padding=0), nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2, padding=0), nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1, padding=0), nn.ReLU())
self.conv_output_size = 3136
elif args.architecture == 'data-efficient':
self.convs = nn.Sequential(nn.Conv2d(args.history_length, 32, 5, stride=5, padding=0), nn.ReLU(),
nn.Conv2d(32, 64, 5, stride=5, padding=0), nn.ReLU())
self.conv_output_size = 576
self.fc_h_v = NoisyLinear(self.conv_output_size, args.hidden_size, std_init=args.noisy_std)
self.fc_h_a = NoisyLinear(self.conv_output_size, args.hidden_size, std_init=args.noisy_std)
self.fc_z_v = NoisyLinear(args.hidden_size, self.atoms, std_init=args.noisy_std)
self.fc_z_a = NoisyLinear(args.hidden_size, action_space * self.atoms, std_init=args.noisy_std)
self.W_h = nn.Parameter(torch.rand(self.conv_output_size, args.hidden_size))
self.W_c = nn.Parameter(torch.rand(args.hidden_size, 128))
self.b_h = nn.Parameter(torch.zeros(args.hidden_size))
self.b_c = nn.Parameter(torch.zeros(128))
self.W = nn.Parameter(torch.rand(128, 128))
self.ln1 = nn.LayerNorm(args.hidden_size)
self.ln2 = nn.LayerNorm(128)
def forward(self, x, log=False):
x = self.convs(x)
x = x.view(-1, self.conv_output_size)
v = self.fc_z_v(F.relu(self.fc_h_v(x))) # Value stream
a = self.fc_z_a(F.relu(self.fc_h_a(x))) # Advantage stream
h=None
if self.curl:
h = torch.matmul(x, self.W_h) + self.b_h # Contrastive head
h = self.ln1(h)
h = F.relu(h)
h = torch.matmul(h, self.W_c) + self.b_c # Contrastive head
h = self.ln2(h)
v, a = v.view(-1, 1, self.atoms), a.view(-1, self.action_space, self.atoms)
q = v + a - a.mean(1, keepdim=True) # Combine streams
if log: # Use log softmax for numerical stability
q = F.log_softmax(q, dim=2) # Log probabilities with action over second dimension
else:
q = F.softmax(q, dim=2) # Probabilities with action over second dimension
return q, h
def reset_noise(self):
for name, module in self.named_children():
if 'fc' in name:
module.reset_noise() | 0.942599 | 0.579995 |
import logging
import os
import difflib
from urllib import quote
from django.utils import simplejson
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
import access
import model
import settings
import util
DEFAULT_LABEL_BODY = u"""name: %(title)s
---
# %(title)s
Pages in this category:
[[List:%(label)s]]
_This is an automatically generated page._
"""
def render(template_name, data):
filename = os.path.join(os.path.dirname(__file__), 'templates', template_name)
if not os.path.exists(filename):
raise Exception('Template %s not found.' % template_name)
if 'user' not in data:
data['user'] = model.WikiUser.get_or_create(users.get_current_user())
if data['user']:
data['log_out_url'] = users.create_logout_url(quote(os.environ['PATH_INFO']))
else:
data['log_in_url'] = users.create_login_url(quote(os.environ['PATH_INFO']))
if 'is_admin' not in data:
data['is_admin'] = users.is_current_user_admin()
if 'sidebar' not in data:
data['sidebar'] = get_sidebar()
if 'footer' not in data:
data['footer'] = get_footer()
if 'settings' not in data:
data['settings'] = settings.get_all()
if 'base' not in data:
data['base'] = util.get_base_url()
return template.render(filename, data)
def get_sidebar():
page_name = settings.get('sidebar', 'gaewiki:sidebar')
page = model.WikiContent.get_by_title(page_name)
if page.is_saved():
body = page.body
else:
body = u'<a href="/"><img src="/gae-wiki-static/logo-186.png" width="186" alt="logo" height="167"/></a>\n\nThis is a good place for a brief introduction to your wiki, a logo and such things.\n\n[Edit this text](/w/edit?page=%s)' % page_name
return body
def get_footer():
page_name = settings.get('footer', 'gaewiki:footer')
page = model.WikiContent.get_by_title(page_name)
if page.is_saved():
body = page.body
else:
body = u'This wiki is built with [GAEWiki](http://gaewiki.googlecode.com/).'
return body
def view_page(page, user=None, is_admin=False, revision=None):
page = page.get_redirected()
if page.title.startswith("Label:") and not page.body:
page.body = DEFAULT_LABEL_BODY % {"title": page.title, "label": page.title[6:]}
data = {
'page': page,
'is_admin': is_admin,
'is_plain': page.get_property('format') == 'plain',
'can_edit': access.can_edit_page(page.title, user, is_admin),
'page_labels': page.get_property('labels', []),
'revision': revision,
}
# logging.debug(data)
if settings.get('enable-map'):
if page.get_property('map_label'):
data['map_url'] = '/w/pages/map?label=' + util.uurlencode(page.get_property('map_label'))
elif data['can_edit'] or page.geopt:
data['map_url'] = '/w/map?page=' + util.uurlencode(page.title)
if revision:
logging.debug(u'Viewing page "%s" (revision %s)' % (data['page'].title, revision))
else:
logging.debug(u'Viewing page "%s"' % data['page'].title)
return render('view_page.html', data)
def edit_page(page, comment=None):
logging.debug(u'Editing page "%s"' % page.title)
return render('edit_page.html', {
'page': page,
'comment': comment,
})
def list_pages(pages):
logging.debug(u'Listing %u pages.' % len(pages))
def link_line(title):
return (' ' * title.count('/')) + '- ' + ('[' + title + '](/' + title + ')' if ':' in title else '[[' + title + ']]')
lines = []
for page in pages:
title = page.title
if '/' in title:
parent = title[:title.rfind('/')]
if not lines or lines[-1][0] != parent:
parent_path = parent.split('/')
for depth in xrange(1, len(parent_path)+1):
target = '/'.join(parent_path[:depth])
if not lines or not lines[-1][0].startswith(target):
lines.append((target, link_line(target)))
lines.append((title, link_line(title)))
return render('index.html', {
'pages': pages,
'html': util.wikify(util.parse_markdown('\n'.join(line[1] for line in lines))),
})
def list_pages_feed(pages):
logging.debug(u'Listing %u pages.' % len(pages))
return render('index.rss', {
'pages': pages,
})
def show_page_history(page, user=None, is_admin=False):
return render('history.html', {
'page_title': page.title,
'page': page,
'revisions': page.get_history(),
'can_edit': access.can_edit_page(page.title, user, is_admin),
})
def get_sitemap(pages):
return render('sitemap.xml', {
'pages': pages,
})
def get_change_list(pages):
return render('changes.html', {
'pages': pages,
})
def get_change_feed(pages):
return render('changes.rss', {
'pages': pages,
})
def get_backlinks(page, links):
return render('backlinks.html', {
'page_title': page.title,
'page_links': links,
})
def get_users(users):
return render('users.html', {
'users': users,
})
def get_import_form():
return render('import.html', {})
def show_interwikis(iw):
return render('interwiki.html', {
'interwiki': iw,
})
def show_profile(wiki_user):
return render('profile.html', {
'user': wiki_user,
})
def show_page_map(label):
"""Renders the base page map code."""
return render('page_map.html', {
'map_label': label.replace('_', ' '),
})
def show_single_page_map(page):
"""Renders a page that displays a page on the map."""
pt = page.get_property('geo', default='61.72160269540121, 94.21821875')
return render('single_page_map.html', {
'page': page,
'page_ll': pt.split(',')
})
def show_pages_map_data(pages):
"""Returns the JavaScript with markers."""
data = {
'bounds': {
'minlat': 999,
'minlng': 999,
'maxlat': 0,
'maxlng': 0,
},
'markers': [],
'length': len(pages),
}
for page in pages:
lat = page.geopt.lat
lng = page.geopt.lon
if lat < data['bounds']['minlat']:
data['bounds']['minlat'] = lat
if lng < data['bounds']['minlng']:
data['bounds']['minlng'] = lng
if lat > data['bounds']['maxlat']:
data['bounds']['maxlat'] = lat
if lng > data['bounds']['maxlng']:
data['bounds']['maxlng'] = lng
data['markers'].append({
'lat': lat,
'lng': lng,
'title': page.title,
'html': render('map_info_window.html', {'page': page}).decode('utf-8'),
})
return 'var map_data = ' + simplejson.dumps(data) + ';'
def view_image_upload_page(user, is_admin, submit_url):
data = {
"user": user,
"is_admin": is_admin,
"submit_url": submit_url,
}
return render("upload_image.html", data)
def view_image(img, user, is_admin):
data = {
"image": img,
"user": user,
"is_admin": is_admin,
}
return render("view_image.html", data)
def view_image_list(lst, user, is_admin):
data = {
"images": lst,
"user": user,
"is_admin": is_admin,
}
return render("image_list.html", data)
def view_diff(r1, r2, user, is_admin):
a = r1.revision_body
if hasattr(r2, 'revision_body'):
b = r2.revision_body
else:
b = r2.body
seqm = difflib.SequenceMatcher(None, a, b)
output = []
for opcode, a0, a1, b0, b1 in seqm.get_opcodes():
if opcode == 'equal':
output.append(seqm.a[a0:a1])
elif opcode == 'insert':
output.append("<ins>" + seqm.b[b0:b1] + "</ins>")
elif opcode == 'delete':
output.append("<del>" + seqm.a[a0:a1] + "</del>")
elif opcode == 'replace':
output.append("<del>" + seqm.a[a0:a1] + "</del>")
output.append("<ins>" + seqm.b[b0:b1] + "</ins>")
else:
raise RuntimeError, "unexpected opcode"
data = {
"r1": r1,
"r2": r2,
"r1updated": r1.updated if hasattr(r1, 'updated') else r1.created,
"r2updated": r2.updated if hasattr(r2, 'updated') else r2.created,
"page_title": r2.title,
"diff_html": ''.join(output),
"user": user,
"is_admin": is_admin,
'can_edit': access.can_edit_page(r2.title, user, is_admin),
}
return render("diff.html", data) | gaewiki/view.py |
import logging
import os
import difflib
from urllib import quote
from django.utils import simplejson
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
import access
import model
import settings
import util
DEFAULT_LABEL_BODY = u"""name: %(title)s
---
# %(title)s
Pages in this category:
[[List:%(label)s]]
_This is an automatically generated page._
"""
def render(template_name, data):
filename = os.path.join(os.path.dirname(__file__), 'templates', template_name)
if not os.path.exists(filename):
raise Exception('Template %s not found.' % template_name)
if 'user' not in data:
data['user'] = model.WikiUser.get_or_create(users.get_current_user())
if data['user']:
data['log_out_url'] = users.create_logout_url(quote(os.environ['PATH_INFO']))
else:
data['log_in_url'] = users.create_login_url(quote(os.environ['PATH_INFO']))
if 'is_admin' not in data:
data['is_admin'] = users.is_current_user_admin()
if 'sidebar' not in data:
data['sidebar'] = get_sidebar()
if 'footer' not in data:
data['footer'] = get_footer()
if 'settings' not in data:
data['settings'] = settings.get_all()
if 'base' not in data:
data['base'] = util.get_base_url()
return template.render(filename, data)
def get_sidebar():
page_name = settings.get('sidebar', 'gaewiki:sidebar')
page = model.WikiContent.get_by_title(page_name)
if page.is_saved():
body = page.body
else:
body = u'<a href="/"><img src="/gae-wiki-static/logo-186.png" width="186" alt="logo" height="167"/></a>\n\nThis is a good place for a brief introduction to your wiki, a logo and such things.\n\n[Edit this text](/w/edit?page=%s)' % page_name
return body
def get_footer():
page_name = settings.get('footer', 'gaewiki:footer')
page = model.WikiContent.get_by_title(page_name)
if page.is_saved():
body = page.body
else:
body = u'This wiki is built with [GAEWiki](http://gaewiki.googlecode.com/).'
return body
def view_page(page, user=None, is_admin=False, revision=None):
page = page.get_redirected()
if page.title.startswith("Label:") and not page.body:
page.body = DEFAULT_LABEL_BODY % {"title": page.title, "label": page.title[6:]}
data = {
'page': page,
'is_admin': is_admin,
'is_plain': page.get_property('format') == 'plain',
'can_edit': access.can_edit_page(page.title, user, is_admin),
'page_labels': page.get_property('labels', []),
'revision': revision,
}
# logging.debug(data)
if settings.get('enable-map'):
if page.get_property('map_label'):
data['map_url'] = '/w/pages/map?label=' + util.uurlencode(page.get_property('map_label'))
elif data['can_edit'] or page.geopt:
data['map_url'] = '/w/map?page=' + util.uurlencode(page.title)
if revision:
logging.debug(u'Viewing page "%s" (revision %s)' % (data['page'].title, revision))
else:
logging.debug(u'Viewing page "%s"' % data['page'].title)
return render('view_page.html', data)
def edit_page(page, comment=None):
logging.debug(u'Editing page "%s"' % page.title)
return render('edit_page.html', {
'page': page,
'comment': comment,
})
def list_pages(pages):
logging.debug(u'Listing %u pages.' % len(pages))
def link_line(title):
return (' ' * title.count('/')) + '- ' + ('[' + title + '](/' + title + ')' if ':' in title else '[[' + title + ']]')
lines = []
for page in pages:
title = page.title
if '/' in title:
parent = title[:title.rfind('/')]
if not lines or lines[-1][0] != parent:
parent_path = parent.split('/')
for depth in xrange(1, len(parent_path)+1):
target = '/'.join(parent_path[:depth])
if not lines or not lines[-1][0].startswith(target):
lines.append((target, link_line(target)))
lines.append((title, link_line(title)))
return render('index.html', {
'pages': pages,
'html': util.wikify(util.parse_markdown('\n'.join(line[1] for line in lines))),
})
def list_pages_feed(pages):
logging.debug(u'Listing %u pages.' % len(pages))
return render('index.rss', {
'pages': pages,
})
def show_page_history(page, user=None, is_admin=False):
return render('history.html', {
'page_title': page.title,
'page': page,
'revisions': page.get_history(),
'can_edit': access.can_edit_page(page.title, user, is_admin),
})
def get_sitemap(pages):
return render('sitemap.xml', {
'pages': pages,
})
def get_change_list(pages):
return render('changes.html', {
'pages': pages,
})
def get_change_feed(pages):
return render('changes.rss', {
'pages': pages,
})
def get_backlinks(page, links):
return render('backlinks.html', {
'page_title': page.title,
'page_links': links,
})
def get_users(users):
return render('users.html', {
'users': users,
})
def get_import_form():
return render('import.html', {})
def show_interwikis(iw):
return render('interwiki.html', {
'interwiki': iw,
})
def show_profile(wiki_user):
return render('profile.html', {
'user': wiki_user,
})
def show_page_map(label):
"""Renders the base page map code."""
return render('page_map.html', {
'map_label': label.replace('_', ' '),
})
def show_single_page_map(page):
"""Renders a page that displays a page on the map."""
pt = page.get_property('geo', default='61.72160269540121, 94.21821875')
return render('single_page_map.html', {
'page': page,
'page_ll': pt.split(',')
})
def show_pages_map_data(pages):
"""Returns the JavaScript with markers."""
data = {
'bounds': {
'minlat': 999,
'minlng': 999,
'maxlat': 0,
'maxlng': 0,
},
'markers': [],
'length': len(pages),
}
for page in pages:
lat = page.geopt.lat
lng = page.geopt.lon
if lat < data['bounds']['minlat']:
data['bounds']['minlat'] = lat
if lng < data['bounds']['minlng']:
data['bounds']['minlng'] = lng
if lat > data['bounds']['maxlat']:
data['bounds']['maxlat'] = lat
if lng > data['bounds']['maxlng']:
data['bounds']['maxlng'] = lng
data['markers'].append({
'lat': lat,
'lng': lng,
'title': page.title,
'html': render('map_info_window.html', {'page': page}).decode('utf-8'),
})
return 'var map_data = ' + simplejson.dumps(data) + ';'
def view_image_upload_page(user, is_admin, submit_url):
data = {
"user": user,
"is_admin": is_admin,
"submit_url": submit_url,
}
return render("upload_image.html", data)
def view_image(img, user, is_admin):
data = {
"image": img,
"user": user,
"is_admin": is_admin,
}
return render("view_image.html", data)
def view_image_list(lst, user, is_admin):
data = {
"images": lst,
"user": user,
"is_admin": is_admin,
}
return render("image_list.html", data)
def view_diff(r1, r2, user, is_admin):
a = r1.revision_body
if hasattr(r2, 'revision_body'):
b = r2.revision_body
else:
b = r2.body
seqm = difflib.SequenceMatcher(None, a, b)
output = []
for opcode, a0, a1, b0, b1 in seqm.get_opcodes():
if opcode == 'equal':
output.append(seqm.a[a0:a1])
elif opcode == 'insert':
output.append("<ins>" + seqm.b[b0:b1] + "</ins>")
elif opcode == 'delete':
output.append("<del>" + seqm.a[a0:a1] + "</del>")
elif opcode == 'replace':
output.append("<del>" + seqm.a[a0:a1] + "</del>")
output.append("<ins>" + seqm.b[b0:b1] + "</ins>")
else:
raise RuntimeError, "unexpected opcode"
data = {
"r1": r1,
"r2": r2,
"r1updated": r1.updated if hasattr(r1, 'updated') else r1.created,
"r2updated": r2.updated if hasattr(r2, 'updated') else r2.created,
"page_title": r2.title,
"diff_html": ''.join(output),
"user": user,
"is_admin": is_admin,
'can_edit': access.can_edit_page(r2.title, user, is_admin),
}
return render("diff.html", data) | 0.386185 | 0.090133 |
import sys
import os
import torch
from torch import nn
from torch.nn import functional as F
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from my_ai import utility
from my_ai import vision
def softmax_regression():
# step1.load dataset
batch_size = 256
vision.LoadDataset.transforms_before_load((30,30))
train_iter, test_iter = vision.LoadDataset.load_fashion_mnist(batch_size)
# step2.build model
net = nn.Sequential(nn.Flatten(), nn.Linear(900, 10))
# step3.train model
num_epochs = 5
utility.train_1(net, train_iter, test_iter, num_epochs, lr=0.1)
def original_leNet():
# step1.load dataset
batch_size = 128
vision.LoadDataset.transforms_before_load((28, 28))
train_iter, test_iter = vision.LoadDataset.load_fashion_mnist(batch_size)
# step2.build model
net = nn.Sequential(nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),
nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),
nn.Linear(120, 84), nn.Sigmoid(), nn.Linear(84, 10))
# summary.summary_of_network(net,(20, 1, 28, 28))
# step3.train model
num_epochs = 5
utility.train_1(net, train_iter, test_iter, num_epochs, lr=0.9)
def improved_leNet():
# step1.load dataset
batch_size = 128
vision.LoadDataset.transforms_before_load((28, 28))
train_iter, test_iter = vision.LoadDataset.load_fashion_mnist(batch_size)
# step2.build model
net = nn.Sequential(nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(6, 16, kernel_size=5), nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2), nn.Flatten(),
nn.Linear(16 * 5 * 5, 120), nn.ReLU(),
nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 10))
# summary.summary_of_network(net,(1, 1, 28, 28))
# step3.train model
num_epochs = 5
utility.train_1(net, train_iter, test_iter, num_epochs, lr=0.1)
def alexNet():
# step1.load dataset
batch_size = 128
vision.LoadDataset.transforms_before_load((224, 224))
train_iter, test_iter = vision.LoadDataset.load_fashion_mnist(batch_size)
# step2.build model
net = nn.Sequential(
# Here, we use a larger 11 x 11 window to capture objects. At the same
# time, we use a stride of 4 to greatly reduce the height and width of the
# output. Here, the number of output channels is much larger than that in
# LeNet
nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2),
# Make the convolution window smaller, set padding to 2 for consistent
# height and width across the input and output, and increase the number of
# output channels
nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2),
# Use three successive convolutional layers and a smaller convolution
# window. Except for the final convolutional layer, the number of output
# channels is further increased. Pooling layers are not used to reduce the
# height and width of input after the first two convolutional layers
nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(),
nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(),
nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2), nn.Flatten(),
# Here, the number of outputs of the fully-connected layer is several
# times larger than that in LeNet. Use the dropout layer to mitigate
# overfitting
nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),
nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),
# Output layer. Since we are using Fashion-MNIST, the number of classes is
# 10, instead of 1000 as in the paper
nn.Linear(4096, 10))
# summary.summary_of_network(net,(1, 1, 224, 224))
# step3.train model
num_epochs = 10
utility.train_1(net, train_iter, test_iter, num_epochs, lr=0.01)
class Vgg11:
@classmethod
def vgg_block(cls, num_convs, in_channels, out_channels):
layers = []
for _ in range(num_convs):
layers.append(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))
layers.append(nn.ReLU())
in_channels = out_channels
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
return nn.Sequential(*layers)
@classmethod
def vgg(cls, conv_arch):
conv_blks = []
in_channels = 1
# The convolutional part
for (num_convs, out_channels) in conv_arch:
conv_blks.append(cls.vgg_block(num_convs, in_channels, out_channels))
in_channels = out_channels
return nn.Sequential(
*conv_blks, nn.Flatten(),
# The fully-connected part
nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5),
nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5),
nn.Linear(4096, 10))
@classmethod
def implementation(cls):
# step1.load dataset
batch_size = 128
vision.LoadDataset.transforms_before_load((224, 224))
train_iter, test_iter = vision.LoadDataset.load_fashion_mnist(batch_size)
# step2.build model
# Since VGG-11 is more computationally-heavy than AlexNet we construct a network with a
# smaller number of channels. This is more than sufficient for training on Fashion-MNIST.
large_conv_arch = ((1, 64), (1, 128), (2, 256), (2, 512), (2, 512))
ratio = 4
small_conv_arch = [(pair[0], pair[1] // ratio) for pair in large_conv_arch]
net = cls.vgg(small_conv_arch)
# summary.summary_of_network(net,(1, 1, 224, 224))
# step3.train model
num_epochs = 10
utility.train_1(net, train_iter, test_iter, num_epochs, lr=0.05)
class NiN:
@classmethod
def nin_block(cls, in_channels, out_channels, kernel_size, strides, padding):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding),
nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1),
nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1),
nn.ReLU())
@classmethod
def implementation(cls):
# step1.load dataset
batch_size = 128
vision.LoadDataset.transforms_before_load((224, 224))
train_iter, test_iter = vision.LoadDataset.load_fashion_mnist(batch_size)
# step2.build model
net = nn.Sequential(
cls.nin_block(1, 96, kernel_size=11, strides=4, padding=0),
nn.MaxPool2d(3, stride=2),
cls.nin_block(96, 256, kernel_size=5, strides=1, padding=2),
nn.MaxPool2d(3, stride=2),
cls.nin_block(256, 384, kernel_size=3, strides=1, padding=1),
nn.MaxPool2d(3, stride=2), nn.Dropout(0.5),
# There are 10 label classes
cls.nin_block(384, 10, kernel_size=3, strides=1, padding=1),
nn.AdaptiveAvgPool2d((1, 1)),
# Transform the four-dimensional output into two-dimensional output with a
# shape of (batch size, 10)
nn.Flatten())
# summary.summary_of_network(net, (1, 1, 224, 224))
# step3.train model
num_epochs = 10
utility.train_1(net, train_iter, test_iter, num_epochs, lr=0.1)
def googLeNet():
class Inceptionblock(nn.Module):
# `c1`--`c4` are the number of output channels for each path
def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):
super(Inceptionblock, self).__init__(**kwargs)
# Path 1 is a single 1 x 1 convolutional layer
self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1)
# Path 2 is a 1 x 1 convolutional layer followed by a 3 x 3
# convolutional layer
self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1)
self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)
# Path 3 is a 1 x 1 convolutional layer followed by a 5 x 5
# convolutional layer
self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1)
self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)
# Path 4 is a 3 x 3 maximum pooling layer followed by a 1 x 1
# convolutional layer
self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1)
def forward(self, x):
p1 = F.relu(self.p1_1(x))
p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))
p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))
p4 = F.relu(self.p4_2(self.p4_1(x)))
# Concatenate the outputs on the channel dimension
return torch.cat((p1, p2, p3, p4), dim=1)
# step1.load dataset
batch_size = 128
vision.LoadDataset.transforms_before_load((96, 96))
train_iter, test_iter = vision.LoadDataset.load_fashion_mnist(batch_size)
# step2.build model
b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2,
padding=1))
b2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1), nn.ReLU(),
nn.Conv2d(64, 192, kernel_size=3, padding=1), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b3 = nn.Sequential(Inceptionblock(192, 64, (96, 128), (16, 32), 32),
Inceptionblock(256, 128, (128, 192), (32, 96), 64),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b4 = nn.Sequential(Inceptionblock(480, 192, (96, 208), (16, 48), 64),
Inceptionblock(512, 160, (112, 224), (24, 64), 64),
Inceptionblock(512, 128, (128, 256), (24, 64), 64),
Inceptionblock(512, 112, (144, 288), (32, 64), 64),
Inceptionblock(528, 256, (160, 320), (32, 128), 128),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b5 = nn.Sequential(Inceptionblock(832, 256, (160, 320), (32, 128), 128),
Inceptionblock(832, 384, (192, 384), (48, 128), 128),
nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten())
net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))
# summary.summary_of_network(net, (1, 1, 96, 96))
# step3.train model
num_epochs = 10
utility.train_1(net, train_iter, test_iter, num_epochs, lr=0.1)
def run():
# launch visdom before launch this example
softmax_regression()
if __name__ == '__main__':
run() | example/cnn.py | import sys
import os
import torch
from torch import nn
from torch.nn import functional as F
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from my_ai import utility
from my_ai import vision
def softmax_regression():
# step1.load dataset
batch_size = 256
vision.LoadDataset.transforms_before_load((30,30))
train_iter, test_iter = vision.LoadDataset.load_fashion_mnist(batch_size)
# step2.build model
net = nn.Sequential(nn.Flatten(), nn.Linear(900, 10))
# step3.train model
num_epochs = 5
utility.train_1(net, train_iter, test_iter, num_epochs, lr=0.1)
def original_leNet():
# step1.load dataset
batch_size = 128
vision.LoadDataset.transforms_before_load((28, 28))
train_iter, test_iter = vision.LoadDataset.load_fashion_mnist(batch_size)
# step2.build model
net = nn.Sequential(nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),
nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),
nn.Linear(120, 84), nn.Sigmoid(), nn.Linear(84, 10))
# summary.summary_of_network(net,(20, 1, 28, 28))
# step3.train model
num_epochs = 5
utility.train_1(net, train_iter, test_iter, num_epochs, lr=0.9)
def improved_leNet():
# step1.load dataset
batch_size = 128
vision.LoadDataset.transforms_before_load((28, 28))
train_iter, test_iter = vision.LoadDataset.load_fashion_mnist(batch_size)
# step2.build model
net = nn.Sequential(nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(6, 16, kernel_size=5), nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2), nn.Flatten(),
nn.Linear(16 * 5 * 5, 120), nn.ReLU(),
nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 10))
# summary.summary_of_network(net,(1, 1, 28, 28))
# step3.train model
num_epochs = 5
utility.train_1(net, train_iter, test_iter, num_epochs, lr=0.1)
def alexNet():
# step1.load dataset
batch_size = 128
vision.LoadDataset.transforms_before_load((224, 224))
train_iter, test_iter = vision.LoadDataset.load_fashion_mnist(batch_size)
# step2.build model
net = nn.Sequential(
# Here, we use a larger 11 x 11 window to capture objects. At the same
# time, we use a stride of 4 to greatly reduce the height and width of the
# output. Here, the number of output channels is much larger than that in
# LeNet
nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2),
# Make the convolution window smaller, set padding to 2 for consistent
# height and width across the input and output, and increase the number of
# output channels
nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2),
# Use three successive convolutional layers and a smaller convolution
# window. Except for the final convolutional layer, the number of output
# channels is further increased. Pooling layers are not used to reduce the
# height and width of input after the first two convolutional layers
nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(),
nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(),
nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2), nn.Flatten(),
# Here, the number of outputs of the fully-connected layer is several
# times larger than that in LeNet. Use the dropout layer to mitigate
# overfitting
nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),
nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),
# Output layer. Since we are using Fashion-MNIST, the number of classes is
# 10, instead of 1000 as in the paper
nn.Linear(4096, 10))
# summary.summary_of_network(net,(1, 1, 224, 224))
# step3.train model
num_epochs = 10
utility.train_1(net, train_iter, test_iter, num_epochs, lr=0.01)
class Vgg11:
@classmethod
def vgg_block(cls, num_convs, in_channels, out_channels):
layers = []
for _ in range(num_convs):
layers.append(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))
layers.append(nn.ReLU())
in_channels = out_channels
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
return nn.Sequential(*layers)
@classmethod
def vgg(cls, conv_arch):
conv_blks = []
in_channels = 1
# The convolutional part
for (num_convs, out_channels) in conv_arch:
conv_blks.append(cls.vgg_block(num_convs, in_channels, out_channels))
in_channels = out_channels
return nn.Sequential(
*conv_blks, nn.Flatten(),
# The fully-connected part
nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5),
nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5),
nn.Linear(4096, 10))
@classmethod
def implementation(cls):
# step1.load dataset
batch_size = 128
vision.LoadDataset.transforms_before_load((224, 224))
train_iter, test_iter = vision.LoadDataset.load_fashion_mnist(batch_size)
# step2.build model
# Since VGG-11 is more computationally-heavy than AlexNet we construct a network with a
# smaller number of channels. This is more than sufficient for training on Fashion-MNIST.
large_conv_arch = ((1, 64), (1, 128), (2, 256), (2, 512), (2, 512))
ratio = 4
small_conv_arch = [(pair[0], pair[1] // ratio) for pair in large_conv_arch]
net = cls.vgg(small_conv_arch)
# summary.summary_of_network(net,(1, 1, 224, 224))
# step3.train model
num_epochs = 10
utility.train_1(net, train_iter, test_iter, num_epochs, lr=0.05)
class NiN:
@classmethod
def nin_block(cls, in_channels, out_channels, kernel_size, strides, padding):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding),
nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1),
nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1),
nn.ReLU())
@classmethod
def implementation(cls):
# step1.load dataset
batch_size = 128
vision.LoadDataset.transforms_before_load((224, 224))
train_iter, test_iter = vision.LoadDataset.load_fashion_mnist(batch_size)
# step2.build model
net = nn.Sequential(
cls.nin_block(1, 96, kernel_size=11, strides=4, padding=0),
nn.MaxPool2d(3, stride=2),
cls.nin_block(96, 256, kernel_size=5, strides=1, padding=2),
nn.MaxPool2d(3, stride=2),
cls.nin_block(256, 384, kernel_size=3, strides=1, padding=1),
nn.MaxPool2d(3, stride=2), nn.Dropout(0.5),
# There are 10 label classes
cls.nin_block(384, 10, kernel_size=3, strides=1, padding=1),
nn.AdaptiveAvgPool2d((1, 1)),
# Transform the four-dimensional output into two-dimensional output with a
# shape of (batch size, 10)
nn.Flatten())
# summary.summary_of_network(net, (1, 1, 224, 224))
# step3.train model
num_epochs = 10
utility.train_1(net, train_iter, test_iter, num_epochs, lr=0.1)
def googLeNet():
class Inceptionblock(nn.Module):
# `c1`--`c4` are the number of output channels for each path
def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):
super(Inceptionblock, self).__init__(**kwargs)
# Path 1 is a single 1 x 1 convolutional layer
self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1)
# Path 2 is a 1 x 1 convolutional layer followed by a 3 x 3
# convolutional layer
self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1)
self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)
# Path 3 is a 1 x 1 convolutional layer followed by a 5 x 5
# convolutional layer
self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1)
self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)
# Path 4 is a 3 x 3 maximum pooling layer followed by a 1 x 1
# convolutional layer
self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1)
def forward(self, x):
p1 = F.relu(self.p1_1(x))
p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))
p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))
p4 = F.relu(self.p4_2(self.p4_1(x)))
# Concatenate the outputs on the channel dimension
return torch.cat((p1, p2, p3, p4), dim=1)
# step1.load dataset
batch_size = 128
vision.LoadDataset.transforms_before_load((96, 96))
train_iter, test_iter = vision.LoadDataset.load_fashion_mnist(batch_size)
# step2.build model
b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2,
padding=1))
b2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1), nn.ReLU(),
nn.Conv2d(64, 192, kernel_size=3, padding=1), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b3 = nn.Sequential(Inceptionblock(192, 64, (96, 128), (16, 32), 32),
Inceptionblock(256, 128, (128, 192), (32, 96), 64),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b4 = nn.Sequential(Inceptionblock(480, 192, (96, 208), (16, 48), 64),
Inceptionblock(512, 160, (112, 224), (24, 64), 64),
Inceptionblock(512, 128, (128, 256), (24, 64), 64),
Inceptionblock(512, 112, (144, 288), (32, 64), 64),
Inceptionblock(528, 256, (160, 320), (32, 128), 128),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b5 = nn.Sequential(Inceptionblock(832, 256, (160, 320), (32, 128), 128),
Inceptionblock(832, 384, (192, 384), (48, 128), 128),
nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten())
net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))
# summary.summary_of_network(net, (1, 1, 96, 96))
# step3.train model
num_epochs = 10
utility.train_1(net, train_iter, test_iter, num_epochs, lr=0.1)
def run():
# launch visdom before launch this example
softmax_regression()
if __name__ == '__main__':
run() | 0.780997 | 0.425307 |
from Ui_MainWindow import Ui_MainWindow
from PyQt4.QtGui import QKeySequence, QIcon, QPixmap
from PyQt4.QtCore import Qt, SIGNAL, QTime, QString
from Serial import Serial
import util
import config
class Ui_Proxy(Ui_MainWindow):
def __init__(self, parent=None):
Ui_MainWindow.__init__(self)
def setupWidget(self, wobj):
wobj.setWindowIcon(QIcon(QPixmap(":/app/icons/app/logo.png")))
self.updatePortComBox(isOpening=True)
self.send_pushButton.setShortcut(QKeySequence(Qt.Key_Return + Qt.CTRL))
self.initByConfig()
self.onSendTypeChanged()
self.send_pushButton.setEnabled(False)
self.comSettingsWidgets = ( \
self.port_comboBox, self.baud_comboBox,
self.parity_comboBox, self.bytesize_comboBox,
self.stopbits_comboBox )
def updatePortComBox(self, isOpening=False):
""" 更新串口列表, 暂不删除已不存在的端口
若没有已打开的串口则将最新发现的串口设置为当前串口
"""
newCount = 0
for port in Serial.getActivePorts():
if self.port_comboBox.findText(port) > -1:
continue
self.port_comboBox.addItem(port)
newCount += 1
if newCount == 0:
return 0
if not isOpening:
lastIndex = self.port_comboBox.count() - 1
if lastIndex > -1:
self.port_comboBox.setCurrentIndex(lastIndex)
return newCount
def initByConfig(self):
configDict = config.getConfigDict()
try:
self.setCOMSettings(configDict.get(config.COMSETTINGS_KEY))
self.setRecvSettings(configDict.get(config.RECVSETTINGS_KEY))
self.setSendSettings(configDict.get(config.SENDSETTINGS_KEY))
except Exception as e:
print e
def setCOMSettings(self, comSettings):
if not isinstance(comSettings, dict):
return
port = comSettings.get(config.PORT_KEY, "").upper()
if port != "":
self.setIndexIfFound(self.port_comboBox, str(port))
baud = comSettings.get(config.BAUD_KEY, config.DEFAULT_BAUD)
self.setIndexIfFound(self.baud_comboBox, str(baud))
bytesize = comSettings.get(config.BYTESIZE_KEY, config.DEFAULT_BYTESIZE)
self.setIndexIfFound(self.baud_comboBox, str(bytesize))
parity = comSettings.get(config.PARITY_KEY, config.DEFAULT_PARTITY)
self.setIndexIfFound(self.parity_comboBox, parity)
stopbits = comSettings.get(config.STOPBITS_KEY, config.DEFAULT_STOPBITS)
self.setIndexIfFound(self.stopbits_comboBox, str(stopbits))
def setRecvSettings(self, recvSettings):
if not isinstance(recvSettings, dict):
return
recvtype = recvSettings.get(config.RECVTYPE_KEY, config.ASCII_TYPE)
self.setIndexIfFound(self.recvType_comboBox, recvtype)
autolinefeed = recvSettings.get(config.AUTOLINEFEED_KEY, config.DEFAULT_AUTOLINEFEED).upper()
self.autoLF_checkBox.setChecked(autolinefeed == config.YES)
hidersflag = recvSettings.get(config.HIDERSFLAG_KEY, config.DEFAULT_HIDERSFLAG).upper()
self.hideSRFlag_checkBox.setChecked(hidersflag == config.YES)
def setSendSettings(self, sendSettings):
if not isinstance(sendSettings, dict):
return
sendtype = sendSettings.get(config.SENDTYPE_KEY, config.ASCII_TYPE)
self.setIndexIfFound(self.sendType_comboBox, sendtype)
clearsenttext = sendSettings.get(config.CLEARSENTTEXT_KEY, config.DEFAULT_CLEARSENTTEXT).upper()
self.clearSentText_checkBox.setChecked(clearsenttext == config.YES)
showsent = sendSettings.get(config.SHOWSENT_KEY, config.DEFAULT_SHOWSENT).upper()
self.showSent_checkBox.setChecked(showsent == config.YES)
sendinterval = sendSettings.get(config.SENDINTERVAL_KEY, config.DEFAULT_SHOWSENT)
self.sendInterval_spinBox.setValue(int(sendinterval))
def getCurrentConfigDict(self):
configDict = {config.COMSETTINGS_KEY : {}, config.RECVSETTINGS_KEY: {}, config.SENDSETTINGS_KEY: {}}
configDict[config.COMSETTINGS_KEY][config.PORT_KEY] = util.QStringToStr(self.port_comboBox.currentText())
configDict[config.COMSETTINGS_KEY][config.BAUD_KEY] = util.QStringToStr(self.baud_comboBox.currentText())
configDict[config.COMSETTINGS_KEY][config.BYTESIZE_KEY] = util.QStringToStr(self.bytesize_comboBox.currentText())
configDict[config.COMSETTINGS_KEY][config.PARITY_KEY] = util.QStringToStr(self.parity_comboBox.currentText())
configDict[config.COMSETTINGS_KEY][config.STOPBITS_KEY] = util.QStringToStr(self.stopbits_comboBox.currentText())
configDict[config.RECVSETTINGS_KEY][config.RECVTYPE_KEY] = util.QStringToStr(self.recvType_comboBox.currentText())
configDict[config.RECVSETTINGS_KEY][config.AUTOLINEFEED_KEY] = self.autoLF_checkBox.isChecked() and config.YES or config.NO
configDict[config.RECVSETTINGS_KEY][config.HIDERSFLAG_KEY] = self.hideSRFlag_checkBox.isChecked() and config.YES or config.NO
configDict[config.RECVSETTINGS_KEY][config.MERGE_INTERVAL_KEY] = config.mergeInterval
configDict[config.SENDSETTINGS_KEY][config.SENDTYPE_KEY] = util.QStringToStr(self.sendType_comboBox.currentText())
configDict[config.SENDSETTINGS_KEY][config.CLEARSENTTEXT_KEY] = self.clearSentText_checkBox.isChecked() and config.YES or config.NO
configDict[config.SENDSETTINGS_KEY][config.SHOWSENT_KEY] = self.showSent_checkBox.isChecked() and config.YES or config.NO
configDict[config.SENDSETTINGS_KEY][config.SENDINTERVAL_KEY] = str(self.sendInterval_spinBox.value())
return configDict
def setIndexIfFound(self, combox, text):
index = combox.findText(text)
if index > -1:
combox.setCurrentIndex(index)
def addRXBytesNumber(self, num=0):
self.rx_lcdNumber.display(num + self.rx_lcdNumber.intValue())
def addTXBytesNumber(self, num=0):
self.tx_lcdNumber.display(num + self.tx_lcdNumber.intValue())
def clearLcdNumber(self):
self.rx_lcdNumber.display(0)
self.tx_lcdNumber.display(0)
def getPortSettings(self):
comPort = util.QStringToStr(self.port_comboBox.currentText())
baud = int(util.QStringToStr(self.baud_comboBox.currentText()))
parity = Serial.PARITIES[self.parity_comboBox.currentIndex()]
bytesize = Serial.BYTESIZES[self.bytesize_comboBox.currentIndex()]
stopbits = Serial.STOPBITSES[self.stopbits_comboBox.currentIndex()]
return {
"port": comPort, "baund": baud, "bytesize": bytesize,
"parity": parity, "stopbits": stopbits, "timeout": 1
}
def getDataAndType(self):
return self.send_TextEdit.toPlainText().toUtf8().data(), \
config.SEND_DATA_TYPES[self.sendType_comboBox.currentIndex()]
def onPortOpened(self):
self.open_pushButton.setText(u"关闭")
self.open_pushButton.setStyleSheet("background-color: rgb(85, 255, 0);")
self.setComSettingsEnabled(False)
self.send_pushButton.setEnabled(True)
def setComSettingsEnabled(self, enable):
map(lambda widget: widget.setEnabled(enable), self.comSettingsWidgets)
def onPortOpening(self):
pass
def onPortClosed(self):
self.open_pushButton.setText(u"打开")
self.open_pushButton.setStyleSheet("background-color: rgb(238, 238, 238);")
self.setComSettingsEnabled(True)
self.send_pushButton.setEnabled(False)
def onSendData(self, data, _type=config.ASCII_TYPE):
if self.clearSentText_checkBox.isChecked() and not self.autoSend_checkBox.isChecked():
self.send_TextEdit.clear()
if not self.showSent_checkBox.isChecked():
return
text = data
if not self.hideSRFlag_checkBox.isChecked():
text = 'SEND (%s)\n%s\n' % (util.QStringToStr(QTime.currentTime().toString()), data)
self.recv_TextBrowser.setPlainTextWithNoNL(text)
def onRecvData(self, data):
text = data
if not self.hideSRFlag_checkBox.isChecked():
text = 'RECV (%s)\n%s' % (util.QStringToStr(QTime.currentTime().toString()), data)
self.recv_TextBrowser.setPlainTextWithNoNL(text)
if self.autoLF_checkBox.isChecked():
self.recv_TextBrowser.setPlainTextWithNoNL("\n")
def clearHistory(self):
self.recv_TextBrowser.clear()
def getRecvType(self):
return config.RECV_DATA_TYPES[self.recvType_comboBox.currentIndex()]
def getSendType(self):
return config.SEND_DATA_TYPES[self.sendType_comboBox.currentIndex()]
def getAsciiTail(self):
return config.ASCII_TAIL[self.asciiTail_comboBox.currentIndex()]
def onAutoSend(self, status):
if status == 0: #Unchecked
self.send_pushButton.setText(u"发送")
self.send_pushButton.setEnabled(True)
self.send_TextEdit.setEnabled(True)
elif status == 2: #Checked
self.send_pushButton.setText(u"开始自动发送")
def onAutoSendStarted(self):
self.send_pushButton.setEnabled(False)
self.send_TextEdit.setEnabled(False)
def getAutoSendInterval(self):
return self.sendInterval_spinBox.value()
def updateAutoSendTimes(self, times):
self.send_pushButton.setText(u"已自动发送 %03d 次" % times)
def onProtoTemplSelected(self, templJson):
self.send_TextEdit.setText(templJson)
def onSendTypeChanged(self):
sendType = self.getSendType()
self.asciiTail_comboBox.setEnabled(sendType == config.ASCII_TYPE)
self.inc_checkBox.setEnabled(sendType == config.ASCII_TYPE)
self.resetStartVal_pushButton.setEnabled(sendType == config.ASCII_TYPE)
def getRecvWidgetContent(self):
return self.recv_TextBrowser.toPlainText().toUtf8().data() | Ui_Proxy.py | from Ui_MainWindow import Ui_MainWindow
from PyQt4.QtGui import QKeySequence, QIcon, QPixmap
from PyQt4.QtCore import Qt, SIGNAL, QTime, QString
from Serial import Serial
import util
import config
class Ui_Proxy(Ui_MainWindow):
def __init__(self, parent=None):
Ui_MainWindow.__init__(self)
def setupWidget(self, wobj):
wobj.setWindowIcon(QIcon(QPixmap(":/app/icons/app/logo.png")))
self.updatePortComBox(isOpening=True)
self.send_pushButton.setShortcut(QKeySequence(Qt.Key_Return + Qt.CTRL))
self.initByConfig()
self.onSendTypeChanged()
self.send_pushButton.setEnabled(False)
self.comSettingsWidgets = ( \
self.port_comboBox, self.baud_comboBox,
self.parity_comboBox, self.bytesize_comboBox,
self.stopbits_comboBox )
def updatePortComBox(self, isOpening=False):
""" 更新串口列表, 暂不删除已不存在的端口
若没有已打开的串口则将最新发现的串口设置为当前串口
"""
newCount = 0
for port in Serial.getActivePorts():
if self.port_comboBox.findText(port) > -1:
continue
self.port_comboBox.addItem(port)
newCount += 1
if newCount == 0:
return 0
if not isOpening:
lastIndex = self.port_comboBox.count() - 1
if lastIndex > -1:
self.port_comboBox.setCurrentIndex(lastIndex)
return newCount
def initByConfig(self):
configDict = config.getConfigDict()
try:
self.setCOMSettings(configDict.get(config.COMSETTINGS_KEY))
self.setRecvSettings(configDict.get(config.RECVSETTINGS_KEY))
self.setSendSettings(configDict.get(config.SENDSETTINGS_KEY))
except Exception as e:
print e
def setCOMSettings(self, comSettings):
if not isinstance(comSettings, dict):
return
port = comSettings.get(config.PORT_KEY, "").upper()
if port != "":
self.setIndexIfFound(self.port_comboBox, str(port))
baud = comSettings.get(config.BAUD_KEY, config.DEFAULT_BAUD)
self.setIndexIfFound(self.baud_comboBox, str(baud))
bytesize = comSettings.get(config.BYTESIZE_KEY, config.DEFAULT_BYTESIZE)
self.setIndexIfFound(self.baud_comboBox, str(bytesize))
parity = comSettings.get(config.PARITY_KEY, config.DEFAULT_PARTITY)
self.setIndexIfFound(self.parity_comboBox, parity)
stopbits = comSettings.get(config.STOPBITS_KEY, config.DEFAULT_STOPBITS)
self.setIndexIfFound(self.stopbits_comboBox, str(stopbits))
def setRecvSettings(self, recvSettings):
if not isinstance(recvSettings, dict):
return
recvtype = recvSettings.get(config.RECVTYPE_KEY, config.ASCII_TYPE)
self.setIndexIfFound(self.recvType_comboBox, recvtype)
autolinefeed = recvSettings.get(config.AUTOLINEFEED_KEY, config.DEFAULT_AUTOLINEFEED).upper()
self.autoLF_checkBox.setChecked(autolinefeed == config.YES)
hidersflag = recvSettings.get(config.HIDERSFLAG_KEY, config.DEFAULT_HIDERSFLAG).upper()
self.hideSRFlag_checkBox.setChecked(hidersflag == config.YES)
def setSendSettings(self, sendSettings):
if not isinstance(sendSettings, dict):
return
sendtype = sendSettings.get(config.SENDTYPE_KEY, config.ASCII_TYPE)
self.setIndexIfFound(self.sendType_comboBox, sendtype)
clearsenttext = sendSettings.get(config.CLEARSENTTEXT_KEY, config.DEFAULT_CLEARSENTTEXT).upper()
self.clearSentText_checkBox.setChecked(clearsenttext == config.YES)
showsent = sendSettings.get(config.SHOWSENT_KEY, config.DEFAULT_SHOWSENT).upper()
self.showSent_checkBox.setChecked(showsent == config.YES)
sendinterval = sendSettings.get(config.SENDINTERVAL_KEY, config.DEFAULT_SHOWSENT)
self.sendInterval_spinBox.setValue(int(sendinterval))
def getCurrentConfigDict(self):
configDict = {config.COMSETTINGS_KEY : {}, config.RECVSETTINGS_KEY: {}, config.SENDSETTINGS_KEY: {}}
configDict[config.COMSETTINGS_KEY][config.PORT_KEY] = util.QStringToStr(self.port_comboBox.currentText())
configDict[config.COMSETTINGS_KEY][config.BAUD_KEY] = util.QStringToStr(self.baud_comboBox.currentText())
configDict[config.COMSETTINGS_KEY][config.BYTESIZE_KEY] = util.QStringToStr(self.bytesize_comboBox.currentText())
configDict[config.COMSETTINGS_KEY][config.PARITY_KEY] = util.QStringToStr(self.parity_comboBox.currentText())
configDict[config.COMSETTINGS_KEY][config.STOPBITS_KEY] = util.QStringToStr(self.stopbits_comboBox.currentText())
configDict[config.RECVSETTINGS_KEY][config.RECVTYPE_KEY] = util.QStringToStr(self.recvType_comboBox.currentText())
configDict[config.RECVSETTINGS_KEY][config.AUTOLINEFEED_KEY] = self.autoLF_checkBox.isChecked() and config.YES or config.NO
configDict[config.RECVSETTINGS_KEY][config.HIDERSFLAG_KEY] = self.hideSRFlag_checkBox.isChecked() and config.YES or config.NO
configDict[config.RECVSETTINGS_KEY][config.MERGE_INTERVAL_KEY] = config.mergeInterval
configDict[config.SENDSETTINGS_KEY][config.SENDTYPE_KEY] = util.QStringToStr(self.sendType_comboBox.currentText())
configDict[config.SENDSETTINGS_KEY][config.CLEARSENTTEXT_KEY] = self.clearSentText_checkBox.isChecked() and config.YES or config.NO
configDict[config.SENDSETTINGS_KEY][config.SHOWSENT_KEY] = self.showSent_checkBox.isChecked() and config.YES or config.NO
configDict[config.SENDSETTINGS_KEY][config.SENDINTERVAL_KEY] = str(self.sendInterval_spinBox.value())
return configDict
def setIndexIfFound(self, combox, text):
index = combox.findText(text)
if index > -1:
combox.setCurrentIndex(index)
def addRXBytesNumber(self, num=0):
self.rx_lcdNumber.display(num + self.rx_lcdNumber.intValue())
def addTXBytesNumber(self, num=0):
self.tx_lcdNumber.display(num + self.tx_lcdNumber.intValue())
def clearLcdNumber(self):
self.rx_lcdNumber.display(0)
self.tx_lcdNumber.display(0)
def getPortSettings(self):
comPort = util.QStringToStr(self.port_comboBox.currentText())
baud = int(util.QStringToStr(self.baud_comboBox.currentText()))
parity = Serial.PARITIES[self.parity_comboBox.currentIndex()]
bytesize = Serial.BYTESIZES[self.bytesize_comboBox.currentIndex()]
stopbits = Serial.STOPBITSES[self.stopbits_comboBox.currentIndex()]
return {
"port": comPort, "baund": baud, "bytesize": bytesize,
"parity": parity, "stopbits": stopbits, "timeout": 1
}
def getDataAndType(self):
return self.send_TextEdit.toPlainText().toUtf8().data(), \
config.SEND_DATA_TYPES[self.sendType_comboBox.currentIndex()]
def onPortOpened(self):
self.open_pushButton.setText(u"关闭")
self.open_pushButton.setStyleSheet("background-color: rgb(85, 255, 0);")
self.setComSettingsEnabled(False)
self.send_pushButton.setEnabled(True)
def setComSettingsEnabled(self, enable):
map(lambda widget: widget.setEnabled(enable), self.comSettingsWidgets)
def onPortOpening(self):
pass
def onPortClosed(self):
self.open_pushButton.setText(u"打开")
self.open_pushButton.setStyleSheet("background-color: rgb(238, 238, 238);")
self.setComSettingsEnabled(True)
self.send_pushButton.setEnabled(False)
def onSendData(self, data, _type=config.ASCII_TYPE):
if self.clearSentText_checkBox.isChecked() and not self.autoSend_checkBox.isChecked():
self.send_TextEdit.clear()
if not self.showSent_checkBox.isChecked():
return
text = data
if not self.hideSRFlag_checkBox.isChecked():
text = 'SEND (%s)\n%s\n' % (util.QStringToStr(QTime.currentTime().toString()), data)
self.recv_TextBrowser.setPlainTextWithNoNL(text)
def onRecvData(self, data):
text = data
if not self.hideSRFlag_checkBox.isChecked():
text = 'RECV (%s)\n%s' % (util.QStringToStr(QTime.currentTime().toString()), data)
self.recv_TextBrowser.setPlainTextWithNoNL(text)
if self.autoLF_checkBox.isChecked():
self.recv_TextBrowser.setPlainTextWithNoNL("\n")
def clearHistory(self):
self.recv_TextBrowser.clear()
def getRecvType(self):
return config.RECV_DATA_TYPES[self.recvType_comboBox.currentIndex()]
def getSendType(self):
return config.SEND_DATA_TYPES[self.sendType_comboBox.currentIndex()]
def getAsciiTail(self):
return config.ASCII_TAIL[self.asciiTail_comboBox.currentIndex()]
def onAutoSend(self, status):
if status == 0: #Unchecked
self.send_pushButton.setText(u"发送")
self.send_pushButton.setEnabled(True)
self.send_TextEdit.setEnabled(True)
elif status == 2: #Checked
self.send_pushButton.setText(u"开始自动发送")
def onAutoSendStarted(self):
self.send_pushButton.setEnabled(False)
self.send_TextEdit.setEnabled(False)
def getAutoSendInterval(self):
return self.sendInterval_spinBox.value()
def updateAutoSendTimes(self, times):
self.send_pushButton.setText(u"已自动发送 %03d 次" % times)
def onProtoTemplSelected(self, templJson):
self.send_TextEdit.setText(templJson)
def onSendTypeChanged(self):
sendType = self.getSendType()
self.asciiTail_comboBox.setEnabled(sendType == config.ASCII_TYPE)
self.inc_checkBox.setEnabled(sendType == config.ASCII_TYPE)
self.resetStartVal_pushButton.setEnabled(sendType == config.ASCII_TYPE)
def getRecvWidgetContent(self):
return self.recv_TextBrowser.toPlainText().toUtf8().data() | 0.29696 | 0.066995 |
import numpy as np
import scipy.io
import os
import gzip
import pickle
path = os.environ['ML_DATA_PATH']+'/norb/'
#export PYLEARN2_DATA_PATH=~/ws/python/pylearn2data
def load_resized(size=24, binarize_y=False):
# resized NORB dataset
f = gzip.open(path+'norb_'+str(size)+'.pkl.gz', 'rb')
train, valid = pickle.load(f)
f.close()
train_x, train_y = train
valid_x, valid_y = valid
if binarize_y:
train_y = binarize_labels(train_y, n_classes=5)
valid_y = binarize_labels(valid_y, n_classes=5)
return train_x, train_y, valid_x, valid_y
# Loads data where data is split into class labels
def load_resized_split(size=24, binarize_y=False):
train_x, train_y, test_x, test_y = load_resized(size,False)
def split_by_class(x, y, num_classes):
result_x = [0]*num_classes
result_y = [0]*num_classes
for i in range(num_classes):
idx_i = np.where(y == i)[0]
result_x[i] = x[:,idx_i]
result_y[i] = y[idx_i]
return result_x, result_y
train_x, train_y = split_by_class(train_x, train_y, 5)
if binarize_y:
test_y = binarize_labels(test_y)
for i in range(10):
train_y[i] = binarize_labels(train_y[i])
return train_x, train_y, test_x, test_y
def load_numpy(toFloat=True, binarize_y=False):
train = np.load(path+'norb_train.npz')
train_x = train['arr_0'].T[:9216,:]
train_y = train['arr_1']
test = np.load(path+'norb_test.npz')
test_x = test['arr_0'].T[:9216,:]
test_y = test['arr_1']
if toFloat:
train_x = train_x.astype('float16')/256.
test_x = test_x.astype('float16')/256.
if binarize_y:
train_y = binarize_labels(train_y)
test_y = binarize_labels(test_y)
raise Exception()
return train_x, train_y, test_x, test_y
# Converts integer labels to binarized labels (1-of-K coding)
def binarize_labels(y, n_classes=10):
new_y = np.zeros((n_classes, y.shape[0]))
for i in range(y.shape[0]):
new_y[y[i], i] = 1
return new_y
# Save rescaled images
def save_reshaped(shape):
orig_shape = 96,96
def reshape_digits(x, shape):
x = x.T
def rebin(_a, shape):
sh = shape[0],_a.shape[0]//shape[0],shape[1],_a.shape[1]//shape[1]
return _a.reshape(sh).mean(-1).mean(1)
nrows = x.shape[0]
ncols = shape[0]*shape[1]
result = np.zeros((nrows, ncols))
for i in range(nrows):
result[i,:] = rebin(x[i,:].reshape(orig_shape), shape).reshape((1, ncols))
return result.T
# MNIST dataset
train_x, train_y, test_x, test_y = load_numpy()
train = reshape_digits(train_x, shape), train_y
test = reshape_digits(test_x, shape), test_y
f = gzip.open(path+'norb_'+str(shape[0])+'.pkl.gz','wb')
import pickle
pickle.dump((train, test), f)
f.close() | anglepy/data/norb.py | import numpy as np
import scipy.io
import os
import gzip
import pickle
path = os.environ['ML_DATA_PATH']+'/norb/'
#export PYLEARN2_DATA_PATH=~/ws/python/pylearn2data
def load_resized(size=24, binarize_y=False):
# resized NORB dataset
f = gzip.open(path+'norb_'+str(size)+'.pkl.gz', 'rb')
train, valid = pickle.load(f)
f.close()
train_x, train_y = train
valid_x, valid_y = valid
if binarize_y:
train_y = binarize_labels(train_y, n_classes=5)
valid_y = binarize_labels(valid_y, n_classes=5)
return train_x, train_y, valid_x, valid_y
# Loads data where data is split into class labels
def load_resized_split(size=24, binarize_y=False):
train_x, train_y, test_x, test_y = load_resized(size,False)
def split_by_class(x, y, num_classes):
result_x = [0]*num_classes
result_y = [0]*num_classes
for i in range(num_classes):
idx_i = np.where(y == i)[0]
result_x[i] = x[:,idx_i]
result_y[i] = y[idx_i]
return result_x, result_y
train_x, train_y = split_by_class(train_x, train_y, 5)
if binarize_y:
test_y = binarize_labels(test_y)
for i in range(10):
train_y[i] = binarize_labels(train_y[i])
return train_x, train_y, test_x, test_y
def load_numpy(toFloat=True, binarize_y=False):
train = np.load(path+'norb_train.npz')
train_x = train['arr_0'].T[:9216,:]
train_y = train['arr_1']
test = np.load(path+'norb_test.npz')
test_x = test['arr_0'].T[:9216,:]
test_y = test['arr_1']
if toFloat:
train_x = train_x.astype('float16')/256.
test_x = test_x.astype('float16')/256.
if binarize_y:
train_y = binarize_labels(train_y)
test_y = binarize_labels(test_y)
raise Exception()
return train_x, train_y, test_x, test_y
# Converts integer labels to binarized labels (1-of-K coding)
def binarize_labels(y, n_classes=10):
new_y = np.zeros((n_classes, y.shape[0]))
for i in range(y.shape[0]):
new_y[y[i], i] = 1
return new_y
# Save rescaled images
def save_reshaped(shape):
orig_shape = 96,96
def reshape_digits(x, shape):
x = x.T
def rebin(_a, shape):
sh = shape[0],_a.shape[0]//shape[0],shape[1],_a.shape[1]//shape[1]
return _a.reshape(sh).mean(-1).mean(1)
nrows = x.shape[0]
ncols = shape[0]*shape[1]
result = np.zeros((nrows, ncols))
for i in range(nrows):
result[i,:] = rebin(x[i,:].reshape(orig_shape), shape).reshape((1, ncols))
return result.T
# MNIST dataset
train_x, train_y, test_x, test_y = load_numpy()
train = reshape_digits(train_x, shape), train_y
test = reshape_digits(test_x, shape), test_y
f = gzip.open(path+'norb_'+str(shape[0])+'.pkl.gz','wb')
import pickle
pickle.dump((train, test), f)
f.close() | 0.411466 | 0.355635 |
from socket import AF_INET, SOCK_STREAM, socket
from flask import Flask, request
import os
import json
import random
import string
from enum import IntEnum
from flask.json import jsonify
import base64
from werkzeug.utils import secure_filename
NAME = 'fpm_sandbox_proxy'
LISTEN_PORT = os.environ.get('PROXY_LISTEN_PORT') if 'PROXY_LISTEN_PORT' in os.environ else 9001
SCRIPT_PATH = '/tmp/scripts'
class status(IntEnum):
PENDING = 0
DETECTED = 1
app = Flask(NAME)
results = {}
info = {}
@app.route('/', methods=['post'])
def new_execution():
'''Start a new testing execution'''
request_id = ''.join(random.sample(string.ascii_letters, 6))
params = json.loads(request.form['params'])
script = request.files['script']
stdin = request.form['stdin'].encode()
save_path = os.path.join(SCRIPT_PATH, request_id)
os.makedirs(save_path, 755)
script_path = os.path.join(save_path, secure_filename(script.filename))
script.save(script_path)
params['SCRIPT_FILENAME'] = script_path
params['DOCUMENT_ROOT'] = save_path
params['REQUEST_ID'] = request_id
results[request_id] = status.PENDING
info[request_id] = ''
execute(params, stdin)
detected = False
if results[request_id] == status.DETECTED:
detected = True
return jsonify({ 'detected': detected, 'info': base64.b64encode(info[request_id].encode()) })
@app.route('/<request_id>', methods=['get'])
def detected(request_id):
'''Inform detected by php_sandbox extension'''
results[request_id] = status.DETECTED
info[request_id] = request.args.get('info')
return jsonify({ 'success': True })
class _fcgi_request_type(IntEnum):
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
def generate_fpm_packet(request_id, type, params = None, version = 1):
'''Generate FastCGI packet'''
content = b''
if type == _fcgi_request_type.FCGI_BEGIN_REQUEST:
content = b'\x00\x01\x00\x00\x00\x00\x00\x00'
elif type == _fcgi_request_type.FCGI_PARAMS:
for key, value in params.items():
key = key.encode()
value = value.encode()
key_length = len(key)
value_length = len(value)
if key_length < 0x80:
content += key_length.to_bytes(1, 'big')
else:
content += (key_length | 0x80000000).to_bytes(4, 'big')
if value_length < 0x80:
content += value_length.to_bytes(1, 'big')
else:
content += (value_length | 0x80000000).to_bytes(4, 'big')
content += key + value
elif type == _fcgi_request_type.FCGI_STDIN:
content = params
packet = b''
while True:
packet += version.to_bytes(1, 'big') + type.to_bytes(1, 'big') + request_id.to_bytes(2, 'big')
if len(content) > 65535:
current_content = content[:65535]
content = content[65535:]
else:
current_content = content
content = b''
packet += len(current_content).to_bytes(2, 'big') + b'\x00\x00'
packet += current_content
if len(content) == 0:
break
return packet
def parse_header(raw):
type = raw[1]
contentLength = (raw[4] << 8) + raw[5]
paddingLength = raw[6]
return type, contentLength, paddingLength
def get_response(sock):
'''Wait for PHP-FPM response'''
header_raw = sock.recv(8)
if header_raw == None:
return None, None
type, contentLength, paddingLength = parse_header(header_raw)
content = b''
if contentLength != 0:
content = sock.recv(contentLength)
if paddingLength != 0:
sock.recv(paddingLength)
return type, content
def execute(params, stdin):
'''Build and send FastCGI protocol packet to PHP-FPM service'''
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', 9000))
sock.send(generate_fpm_packet(1, _fcgi_request_type.FCGI_BEGIN_REQUEST))
sock.send(generate_fpm_packet(1, _fcgi_request_type.FCGI_PARAMS, params))
sock.send(generate_fpm_packet(1, _fcgi_request_type.FCGI_STDIN, stdin))
stdout = b''
stderr = b''
success = False
while True:
type, content = get_response(sock)
if type == None:
break
if type == _fcgi_request_type.FCGI_STDOUT:
stdout += content
elif type == _fcgi_request_type.FCGI_STDERR:
stderr += content
elif type == _fcgi_request_type.FCGI_END_REQUEST:
success = True
break
return success, stdout, stderr
if __name__ == '__main__':
if not os.path.exists(SCRIPT_PATH):
os.makedirs(SCRIPT_PATH, 755, True)
app.run('0.0.0.0', LISTEN_PORT) | external/php_fpm_sandbox/proxy.py |
from socket import AF_INET, SOCK_STREAM, socket
from flask import Flask, request
import os
import json
import random
import string
from enum import IntEnum
from flask.json import jsonify
import base64
from werkzeug.utils import secure_filename
NAME = 'fpm_sandbox_proxy'
LISTEN_PORT = os.environ.get('PROXY_LISTEN_PORT') if 'PROXY_LISTEN_PORT' in os.environ else 9001
SCRIPT_PATH = '/tmp/scripts'
class status(IntEnum):
PENDING = 0
DETECTED = 1
app = Flask(NAME)
results = {}
info = {}
@app.route('/', methods=['post'])
def new_execution():
'''Start a new testing execution'''
request_id = ''.join(random.sample(string.ascii_letters, 6))
params = json.loads(request.form['params'])
script = request.files['script']
stdin = request.form['stdin'].encode()
save_path = os.path.join(SCRIPT_PATH, request_id)
os.makedirs(save_path, 755)
script_path = os.path.join(save_path, secure_filename(script.filename))
script.save(script_path)
params['SCRIPT_FILENAME'] = script_path
params['DOCUMENT_ROOT'] = save_path
params['REQUEST_ID'] = request_id
results[request_id] = status.PENDING
info[request_id] = ''
execute(params, stdin)
detected = False
if results[request_id] == status.DETECTED:
detected = True
return jsonify({ 'detected': detected, 'info': base64.b64encode(info[request_id].encode()) })
@app.route('/<request_id>', methods=['get'])
def detected(request_id):
'''Inform detected by php_sandbox extension'''
results[request_id] = status.DETECTED
info[request_id] = request.args.get('info')
return jsonify({ 'success': True })
class _fcgi_request_type(IntEnum):
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
def generate_fpm_packet(request_id, type, params = None, version = 1):
'''Generate FastCGI packet'''
content = b''
if type == _fcgi_request_type.FCGI_BEGIN_REQUEST:
content = b'\x00\x01\x00\x00\x00\x00\x00\x00'
elif type == _fcgi_request_type.FCGI_PARAMS:
for key, value in params.items():
key = key.encode()
value = value.encode()
key_length = len(key)
value_length = len(value)
if key_length < 0x80:
content += key_length.to_bytes(1, 'big')
else:
content += (key_length | 0x80000000).to_bytes(4, 'big')
if value_length < 0x80:
content += value_length.to_bytes(1, 'big')
else:
content += (value_length | 0x80000000).to_bytes(4, 'big')
content += key + value
elif type == _fcgi_request_type.FCGI_STDIN:
content = params
packet = b''
while True:
packet += version.to_bytes(1, 'big') + type.to_bytes(1, 'big') + request_id.to_bytes(2, 'big')
if len(content) > 65535:
current_content = content[:65535]
content = content[65535:]
else:
current_content = content
content = b''
packet += len(current_content).to_bytes(2, 'big') + b'\x00\x00'
packet += current_content
if len(content) == 0:
break
return packet
def parse_header(raw):
type = raw[1]
contentLength = (raw[4] << 8) + raw[5]
paddingLength = raw[6]
return type, contentLength, paddingLength
def get_response(sock):
'''Wait for PHP-FPM response'''
header_raw = sock.recv(8)
if header_raw == None:
return None, None
type, contentLength, paddingLength = parse_header(header_raw)
content = b''
if contentLength != 0:
content = sock.recv(contentLength)
if paddingLength != 0:
sock.recv(paddingLength)
return type, content
def execute(params, stdin):
'''Build and send FastCGI protocol packet to PHP-FPM service'''
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', 9000))
sock.send(generate_fpm_packet(1, _fcgi_request_type.FCGI_BEGIN_REQUEST))
sock.send(generate_fpm_packet(1, _fcgi_request_type.FCGI_PARAMS, params))
sock.send(generate_fpm_packet(1, _fcgi_request_type.FCGI_STDIN, stdin))
stdout = b''
stderr = b''
success = False
while True:
type, content = get_response(sock)
if type == None:
break
if type == _fcgi_request_type.FCGI_STDOUT:
stdout += content
elif type == _fcgi_request_type.FCGI_STDERR:
stderr += content
elif type == _fcgi_request_type.FCGI_END_REQUEST:
success = True
break
return success, stdout, stderr
if __name__ == '__main__':
if not os.path.exists(SCRIPT_PATH):
os.makedirs(SCRIPT_PATH, 755, True)
app.run('0.0.0.0', LISTEN_PORT) | 0.218003 | 0.065575 |
import htmlmth.mods.http
from . import TransformFunction, http_payload_to_tfarg_function
def _generate_encode_chunked_equisize(*args, **kwargs):
chunksize = kwargs.get("chunksize", 256)
assert(chunksize > 0)
return TransformFunction("",
"chunked encoding (equisize)",
http_payload_to_tfarg_function(lambda x: htmlmth.mods.http.encode_chunked_equisize(x, chunksize=chunksize))
)
encode_chunked_equisize = _generate_encode_chunked_equisize()
encode_chunked_equisize.parameterize = _generate_encode_chunked_equisize
def _generate_encode_chunked_equisize_leadingzeros(*args, **kwargs):
chunksize = kwargs.get("chunksize", 256)
leadingzeros = kwargs.get("leadingzeros", 10)
assert(chunksize > 0)
assert(leadingzeros > 1)
return TransformFunction("",
"chunked encoding (equisize, chunk sizes with leading zeros)",
http_payload_to_tfarg_function(lambda x: htmlmth.mods.http.encode_chunked_equisize(x, chunksize=chunksize, leadingzeros=leadingzeros))
)
encode_chunked_equisize_leadingzeros = _generate_encode_chunked_equisize_leadingzeros()
encode_chunked_equisize_leadingzeros.parameterize = _generate_encode_chunked_equisize_leadingzeros
def _generate_encode_chunked_varysize(*args, **kwargs):
min_chunksize = kwargs.get("min_chunksize", 128)
max_chunksize = kwargs.get("max_chunksize", 256)
assert(min_chunksize > 0)
assert(max_chunksize > 0)
assert(min_chunksize < max_chunksize)
return TransformFunction("",
"chunked encoding (various sizes)",
http_payload_to_tfarg_function(lambda x: htmlmth.mods.http.encode_chunked_varysize(x, min_chunksize=min_chunksize, max_chunksize=max_chunksize))
)
encode_chunked_varysize = _generate_encode_chunked_varysize()
encode_chunked_varysize.parameterize = _generate_encode_chunked_varysize
def _generate_encode_chunked_equisize_leadingzeros(*args, **kwargs):
min_chunksize = kwargs.get("min_chunksize", 128)
max_chunksize = kwargs.get("max_chunksize", 256)
leadingzeros = kwargs.get("leadingzeros", 10)
assert (min_chunksize > 0)
assert (max_chunksize > 0)
assert (min_chunksize < max_chunksize)
assert(leadingzeros > 1)
return TransformFunction("",
"chunked encoding (various sizes, chunk sizes with leading zeros)",
http_payload_to_tfarg_function(
lambda x: htmlmth.mods.http.encode_chunked_varysize(x, min_chunksize=min_chunksize,
max_chunksize=max_chunksize, leadingzeros=leadingzeros))
)
encode_chunked_varysize_leadingzeros = _generate_encode_chunked_equisize_leadingzeros()
encode_chunked_varysize_leadingzeros.parameterize = _generate_encode_chunked_equisize_leadingzeros | htmlmth/evasions/http/chunked.py | import htmlmth.mods.http
from . import TransformFunction, http_payload_to_tfarg_function
def _generate_encode_chunked_equisize(*args, **kwargs):
chunksize = kwargs.get("chunksize", 256)
assert(chunksize > 0)
return TransformFunction("",
"chunked encoding (equisize)",
http_payload_to_tfarg_function(lambda x: htmlmth.mods.http.encode_chunked_equisize(x, chunksize=chunksize))
)
encode_chunked_equisize = _generate_encode_chunked_equisize()
encode_chunked_equisize.parameterize = _generate_encode_chunked_equisize
def _generate_encode_chunked_equisize_leadingzeros(*args, **kwargs):
chunksize = kwargs.get("chunksize", 256)
leadingzeros = kwargs.get("leadingzeros", 10)
assert(chunksize > 0)
assert(leadingzeros > 1)
return TransformFunction("",
"chunked encoding (equisize, chunk sizes with leading zeros)",
http_payload_to_tfarg_function(lambda x: htmlmth.mods.http.encode_chunked_equisize(x, chunksize=chunksize, leadingzeros=leadingzeros))
)
encode_chunked_equisize_leadingzeros = _generate_encode_chunked_equisize_leadingzeros()
encode_chunked_equisize_leadingzeros.parameterize = _generate_encode_chunked_equisize_leadingzeros
def _generate_encode_chunked_varysize(*args, **kwargs):
min_chunksize = kwargs.get("min_chunksize", 128)
max_chunksize = kwargs.get("max_chunksize", 256)
assert(min_chunksize > 0)
assert(max_chunksize > 0)
assert(min_chunksize < max_chunksize)
return TransformFunction("",
"chunked encoding (various sizes)",
http_payload_to_tfarg_function(lambda x: htmlmth.mods.http.encode_chunked_varysize(x, min_chunksize=min_chunksize, max_chunksize=max_chunksize))
)
encode_chunked_varysize = _generate_encode_chunked_varysize()
encode_chunked_varysize.parameterize = _generate_encode_chunked_varysize
def _generate_encode_chunked_equisize_leadingzeros(*args, **kwargs):
min_chunksize = kwargs.get("min_chunksize", 128)
max_chunksize = kwargs.get("max_chunksize", 256)
leadingzeros = kwargs.get("leadingzeros", 10)
assert (min_chunksize > 0)
assert (max_chunksize > 0)
assert (min_chunksize < max_chunksize)
assert(leadingzeros > 1)
return TransformFunction("",
"chunked encoding (various sizes, chunk sizes with leading zeros)",
http_payload_to_tfarg_function(
lambda x: htmlmth.mods.http.encode_chunked_varysize(x, min_chunksize=min_chunksize,
max_chunksize=max_chunksize, leadingzeros=leadingzeros))
)
encode_chunked_varysize_leadingzeros = _generate_encode_chunked_equisize_leadingzeros()
encode_chunked_varysize_leadingzeros.parameterize = _generate_encode_chunked_equisize_leadingzeros | 0.577376 | 0.494995 |
import datetime
import uuid
import mock
from oslo_config import fixture as fixture_config
from oslotest import base
import requests
from ceilometer.dispatcher import http
from ceilometer.event.storage import models as event_models
from ceilometer.publisher import utils
class TestDispatcherHttp(base.BaseTestCase):
def setUp(self):
super(TestDispatcherHttp, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
}
self.msg['message_signature'] = utils.compute_signature(
self.msg, self.CONF.publisher.telemetry_secret,
)
def test_http_dispatcher_config_options(self):
self.CONF.dispatcher_http.target = 'fake'
self.CONF.dispatcher_http.timeout = 2
dispatcher = http.HttpDispatcher(self.CONF)
self.assertEqual('fake', dispatcher.target)
self.assertEqual(2, dispatcher.timeout)
def test_http_dispatcher_with_no_target(self):
self.CONF.dispatcher_http.target = ''
dispatcher = http.HttpDispatcher(self.CONF)
# The target should be None
self.assertEqual('', dispatcher.target)
with mock.patch.object(requests, 'post') as post:
dispatcher.record_metering_data(self.msg)
# Since the target is not set, no http post should occur, thus the
# call_count should be zero.
self.assertEqual(0, post.call_count)
def test_http_dispatcher_with_no_metadata(self):
self.CONF.dispatcher_http.target = 'fake'
dispatcher = http.HttpDispatcher(self.CONF)
with mock.patch.object(requests, 'post') as post:
dispatcher.record_metering_data(self.msg)
self.assertEqual(1, post.call_count)
class TestEventDispatcherHttp(base.BaseTestCase):
def setUp(self):
super(TestEventDispatcherHttp, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
def test_http_dispatcher(self):
self.CONF.dispatcher_http.event_target = 'fake'
dispatcher = http.HttpDispatcher(self.CONF)
event = event_models.Event(uuid.uuid4(), 'test',
datetime.datetime(2012, 7, 2, 13, 53, 40),
[], {})
event = utils.message_from_event(event,
self.CONF.publisher.telemetry_secret)
with mock.patch.object(requests, 'post') as post:
dispatcher.record_events(event)
self.assertEqual(1, post.call_count)
def test_http_dispatcher_bad(self):
self.CONF.dispatcher_http.event_target = ''
dispatcher = http.HttpDispatcher(self.CONF)
event = event_models.Event(uuid.uuid4(), 'test',
datetime.datetime(2012, 7, 2, 13, 53, 40),
[], {})
event = utils.message_from_event(event,
self.CONF.publisher.telemetry_secret)
with mock.patch('ceilometer.dispatcher.http.LOG',
mock.MagicMock()) as LOG:
dispatcher.record_events(event)
self.assertTrue(LOG.exception.called)
def test_http_dispatcher_share_target(self):
self.CONF.dispatcher_http.target = 'fake'
dispatcher = http.HttpDispatcher(self.CONF)
event = event_models.Event(uuid.uuid4(), 'test',
datetime.datetime(2012, 7, 2, 13, 53, 40),
[], {})
event = utils.message_from_event(event,
self.CONF.publisher.telemetry_secret)
with mock.patch.object(requests, 'post') as post:
dispatcher.record_events(event)
self.assertEqual('fake', post.call_args[0][0]) | ceilometer/tests/unit/dispatcher/test_http.py |
import datetime
import uuid
import mock
from oslo_config import fixture as fixture_config
from oslotest import base
import requests
from ceilometer.dispatcher import http
from ceilometer.event.storage import models as event_models
from ceilometer.publisher import utils
class TestDispatcherHttp(base.BaseTestCase):
def setUp(self):
super(TestDispatcherHttp, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
}
self.msg['message_signature'] = utils.compute_signature(
self.msg, self.CONF.publisher.telemetry_secret,
)
def test_http_dispatcher_config_options(self):
self.CONF.dispatcher_http.target = 'fake'
self.CONF.dispatcher_http.timeout = 2
dispatcher = http.HttpDispatcher(self.CONF)
self.assertEqual('fake', dispatcher.target)
self.assertEqual(2, dispatcher.timeout)
def test_http_dispatcher_with_no_target(self):
self.CONF.dispatcher_http.target = ''
dispatcher = http.HttpDispatcher(self.CONF)
# The target should be None
self.assertEqual('', dispatcher.target)
with mock.patch.object(requests, 'post') as post:
dispatcher.record_metering_data(self.msg)
# Since the target is not set, no http post should occur, thus the
# call_count should be zero.
self.assertEqual(0, post.call_count)
def test_http_dispatcher_with_no_metadata(self):
self.CONF.dispatcher_http.target = 'fake'
dispatcher = http.HttpDispatcher(self.CONF)
with mock.patch.object(requests, 'post') as post:
dispatcher.record_metering_data(self.msg)
self.assertEqual(1, post.call_count)
class TestEventDispatcherHttp(base.BaseTestCase):
def setUp(self):
super(TestEventDispatcherHttp, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
def test_http_dispatcher(self):
self.CONF.dispatcher_http.event_target = 'fake'
dispatcher = http.HttpDispatcher(self.CONF)
event = event_models.Event(uuid.uuid4(), 'test',
datetime.datetime(2012, 7, 2, 13, 53, 40),
[], {})
event = utils.message_from_event(event,
self.CONF.publisher.telemetry_secret)
with mock.patch.object(requests, 'post') as post:
dispatcher.record_events(event)
self.assertEqual(1, post.call_count)
def test_http_dispatcher_bad(self):
self.CONF.dispatcher_http.event_target = ''
dispatcher = http.HttpDispatcher(self.CONF)
event = event_models.Event(uuid.uuid4(), 'test',
datetime.datetime(2012, 7, 2, 13, 53, 40),
[], {})
event = utils.message_from_event(event,
self.CONF.publisher.telemetry_secret)
with mock.patch('ceilometer.dispatcher.http.LOG',
mock.MagicMock()) as LOG:
dispatcher.record_events(event)
self.assertTrue(LOG.exception.called)
def test_http_dispatcher_share_target(self):
self.CONF.dispatcher_http.target = 'fake'
dispatcher = http.HttpDispatcher(self.CONF)
event = event_models.Event(uuid.uuid4(), 'test',
datetime.datetime(2012, 7, 2, 13, 53, 40),
[], {})
event = utils.message_from_event(event,
self.CONF.publisher.telemetry_secret)
with mock.patch.object(requests, 'post') as post:
dispatcher.record_events(event)
self.assertEqual('fake', post.call_args[0][0]) | 0.69035 | 0.127245 |
from src.main import *
from src.header import *
from core.protocols import *
from core.xmpp import *
from core.web import *
class Bruteforce(object):
def __init__(self, service, username, wordlist, address, port, delay, proxy):
self.service = service
self.username = username
self.wordlist = wordlist
self.address = address
self.port = port
self.delay = delay
self.proxy = proxy
self.get_args(self.service, self.username, self.wordlist, self.address, self.port, self.delay, self.proxy)
print choice(headers)
print (G + "[*] Username: %s " % self.username) + W
sleep(0.5)
print (G + "[*] Wordlist: %s " % self.wordlist) + W
sleep(0.5)
if os.path.exists(self.wordlist) == False:
print R + "[!] Wordlist not found! [!]" + W
exit()
print (C + "[*] Service: %s " % self.service) + W
if self.service is None:
print R + "[!] No service provided! [!]" + W
if self.proxy is not None:
print (C + "[*] Proxy file: %s " % self.proxy) + W
print O + "Checking if proxies are active...\n" + W
self.proxyServer(self.proxy)
sleep(0.5)
def get_args(self, service, username, wordlist, address, port, delay, proxy):
parser = argparse.ArgumentParser(description='Bruteforce framework written in Python')
required = parser.add_argument_group('required arguments')
required.add_argument('-s', '--service', dest='service', help="Provide a service being attacked. Several protocols and services are supported")
required.add_argument('-u', '--username', dest='username', help='Provide a valid username for service/protocol being executed')
required.add_argument('-w', '--wordlist', dest='password', help='Provide a wordlist or directory to a wordlist')
parser.add_argument('-a', '--address', dest='address', help='Provide host address for specified service. Required for certain protocols')
parser.add_argument('-p', '--port', type=int, dest='port', help='Provide port for host address for specified service. If not specified, will be automatically set')
parser.add_argument('-d', '--delay', type=int, dest='delay', help='Provide the number of seconds the program delays as each password is tried')
parser.add_argument('--proxy', dest='proxy', help="Providing a proxy for anonymization and avoiding time-outs")
args = parser.parse_args()
man_options = ['username', 'password']
for m in man_options:
if not args.__dict__[m]:
print R + "[!] You have to specify a username AND a wordlist! [!]" + W
exit()
self.service = args.service
self.username = args.username
self.wordlist = args.password
self.address = args.address
self.port = args.port
self.delay = args.delay
self.proxy = args.proxy
if self.delay is None:
self.delay = 1
def proxyServer(proxy):
proxy = open(proxy, 'r')
for i in proxy.readlines():
proxyaddr = i.strip("\n")
try:
proxies = {"http" : "http://" + str(proxyaddr) }
r = requests.get("http://google.com", proxies=proxies)
print G + "[v]" + W + (" Proxy %s is found! " % proxyaddr)
except requests.exceptions.ProxyError:
print R + "[X]" + W + (" Proxy %s is NOT found!" % proxyaddr)
proxy.close() | brut3k1t/src/brut3k1t.py | from src.main import *
from src.header import *
from core.protocols import *
from core.xmpp import *
from core.web import *
class Bruteforce(object):
def __init__(self, service, username, wordlist, address, port, delay, proxy):
self.service = service
self.username = username
self.wordlist = wordlist
self.address = address
self.port = port
self.delay = delay
self.proxy = proxy
self.get_args(self.service, self.username, self.wordlist, self.address, self.port, self.delay, self.proxy)
print choice(headers)
print (G + "[*] Username: %s " % self.username) + W
sleep(0.5)
print (G + "[*] Wordlist: %s " % self.wordlist) + W
sleep(0.5)
if os.path.exists(self.wordlist) == False:
print R + "[!] Wordlist not found! [!]" + W
exit()
print (C + "[*] Service: %s " % self.service) + W
if self.service is None:
print R + "[!] No service provided! [!]" + W
if self.proxy is not None:
print (C + "[*] Proxy file: %s " % self.proxy) + W
print O + "Checking if proxies are active...\n" + W
self.proxyServer(self.proxy)
sleep(0.5)
def get_args(self, service, username, wordlist, address, port, delay, proxy):
parser = argparse.ArgumentParser(description='Bruteforce framework written in Python')
required = parser.add_argument_group('required arguments')
required.add_argument('-s', '--service', dest='service', help="Provide a service being attacked. Several protocols and services are supported")
required.add_argument('-u', '--username', dest='username', help='Provide a valid username for service/protocol being executed')
required.add_argument('-w', '--wordlist', dest='password', help='Provide a wordlist or directory to a wordlist')
parser.add_argument('-a', '--address', dest='address', help='Provide host address for specified service. Required for certain protocols')
parser.add_argument('-p', '--port', type=int, dest='port', help='Provide port for host address for specified service. If not specified, will be automatically set')
parser.add_argument('-d', '--delay', type=int, dest='delay', help='Provide the number of seconds the program delays as each password is tried')
parser.add_argument('--proxy', dest='proxy', help="Providing a proxy for anonymization and avoiding time-outs")
args = parser.parse_args()
man_options = ['username', 'password']
for m in man_options:
if not args.__dict__[m]:
print R + "[!] You have to specify a username AND a wordlist! [!]" + W
exit()
self.service = args.service
self.username = args.username
self.wordlist = args.password
self.address = args.address
self.port = args.port
self.delay = args.delay
self.proxy = args.proxy
if self.delay is None:
self.delay = 1
def proxyServer(proxy):
proxy = open(proxy, 'r')
for i in proxy.readlines():
proxyaddr = i.strip("\n")
try:
proxies = {"http" : "http://" + str(proxyaddr) }
r = requests.get("http://google.com", proxies=proxies)
print G + "[v]" + W + (" Proxy %s is found! " % proxyaddr)
except requests.exceptions.ProxyError:
print R + "[X]" + W + (" Proxy %s is NOT found!" % proxyaddr)
proxy.close() | 0.408631 | 0.065875 |
import numpy as np
import random
import sys
import math
import ot
from datetime import datetime, timedelta
import pickle
import numpy as np
import pandas as pd
import torch
from .drift import DriftSampling
from scipy.stats import anderson_ksamp
from utils import timer_func
class pvalueSampling(DriftSampling):
""" Anderson-Darling test is used to measure the concept drift.
Reference: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.anderson_ksamp.html """
def __init__(self, args):
super(pvalueSampling, self).__init__(args)
assert len(self.subsamps) == 2
def small_shift(self, xs, xt):
xs = xs.data.cpu().numpy()
xt = xt.data.cpu().numpy()
result = []
for i in range(16):
lol = anderson_ksamp([list(xs[:, i]), list(xt[:, i])])[2]
result.append(lol)
return result
def concept_drift(self):
# Measure concept drift between validation data and test data.
valid_embeddings, test_embeddings = self.generate_DATE_embeddings()
stack = []
for j in range(60): # 60
num_sample_valid = min(len(valid_embeddings), 500)
num_sample_test = min(len(test_embeddings), 500)
ind_valid = torch.tensor(random.sample(range(len(valid_embeddings)), num_sample_valid)).cuda()
ind_test = torch.tensor(random.sample(range(len(test_embeddings)), num_sample_test)).cuda()
xv = torch.index_select(valid_embeddings, 0, ind_valid)
xt = torch.index_select(test_embeddings, 0, ind_test)
stack.append(self.small_shift(xv, xt))
xd = np.mean(stack, axis = 0) # smaller value means greater shift :|
xd = (xd < 0.05).sum()/ 16 # 16 is the dimension
# xd = 1 - min(1, xd.mean()/0.1)
return xd.item()
@timer_func
def query(self, k):
# Drift sampler should measure the concept drift and update subsampler weights before the query selection is made.
self.update_subsampler_weights()
super(pvalueSampling, self).query(k)
return self.chosen | query_strategies/p_value.py | import numpy as np
import random
import sys
import math
import ot
from datetime import datetime, timedelta
import pickle
import numpy as np
import pandas as pd
import torch
from .drift import DriftSampling
from scipy.stats import anderson_ksamp
from utils import timer_func
class pvalueSampling(DriftSampling):
""" Anderson-Darling test is used to measure the concept drift.
Reference: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.anderson_ksamp.html """
def __init__(self, args):
super(pvalueSampling, self).__init__(args)
assert len(self.subsamps) == 2
def small_shift(self, xs, xt):
xs = xs.data.cpu().numpy()
xt = xt.data.cpu().numpy()
result = []
for i in range(16):
lol = anderson_ksamp([list(xs[:, i]), list(xt[:, i])])[2]
result.append(lol)
return result
def concept_drift(self):
# Measure concept drift between validation data and test data.
valid_embeddings, test_embeddings = self.generate_DATE_embeddings()
stack = []
for j in range(60): # 60
num_sample_valid = min(len(valid_embeddings), 500)
num_sample_test = min(len(test_embeddings), 500)
ind_valid = torch.tensor(random.sample(range(len(valid_embeddings)), num_sample_valid)).cuda()
ind_test = torch.tensor(random.sample(range(len(test_embeddings)), num_sample_test)).cuda()
xv = torch.index_select(valid_embeddings, 0, ind_valid)
xt = torch.index_select(test_embeddings, 0, ind_test)
stack.append(self.small_shift(xv, xt))
xd = np.mean(stack, axis = 0) # smaller value means greater shift :|
xd = (xd < 0.05).sum()/ 16 # 16 is the dimension
# xd = 1 - min(1, xd.mean()/0.1)
return xd.item()
@timer_func
def query(self, k):
# Drift sampler should measure the concept drift and update subsampler weights before the query selection is made.
self.update_subsampler_weights()
super(pvalueSampling, self).query(k)
return self.chosen | 0.521959 | 0.406567 |
from discord.ext import commands
import discord
import json
from loguru import logger
from discord.ext import tasks
import base64
import aiohttp
import asyncio
with open('data/database.json') as d:
database = json.load(d)
class Admin(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print('Jerry is ready.')
link = database["shortlink"]
activity = discord.Game(name=f"j.help | {link}")
# activity = discord.Game(name="Bot testing in progress.")
await self.client.change_presence(status=discord.Status.online, activity=activity)
await asyncio.sleep(43200)
self.push.start()
@commands.command()
async def hi(self, ctx):
if ctx.author != self.client.get_user(430079880353546242):
return
await ctx.send('Jerry.')
@commands.command()
async def invite(self, ctx):
await ctx.send('Invite Jerry to your server using this link:\n' + database["shortlink"])
@commands.command()
async def vote(self, ctx):
await ctx.send('Vote for Jerry using this link:\n' + database["votelink"])
@commands.command()
async def support(self, ctx):
await ctx.send('Join the community/support server here:\n' + database["supportlink"])
@commands.command()
async def help(self, ctx):
embed = discord.Embed(
title="Jerry's Commands",
color=0xf00000
)
embed.set_footer(
text='More features in development!'
)
embed.set_thumbnail(url="https://i.imgur.com/6lOTQhe.png")
embed.add_field(
name="j.help",
value="Responds with this message",
inline=False
)
embed.add_field(
name="j.invite",
value="Responds with the invite link for Jerry!",
inline=False
)
embed.add_field(
name="j.vote",
value="Responds with the vote link for Jerry!",
inline=False
)
embed.add_field(
name="j.support",
value="Responds with the invite to the support server.",
inline=False
)
embed.add_field(
name="j.skills (username) (profile)",
value="Shows the user's Skyblock skill levels.",
inline=False
)
embed.add_field(
name="j.accessories (username) (profile)",
value="Shows the user's missing accessories.",
inline=False
)
embed.add_field(
name="j.armor (username) (profile)",
value="Shows the user's equipped armor.",
inline=False
)
embed.add_field(
name="j.inventory (username) (profile)",
value="Shows the user's inventory."
"\n Aliases: `j.inv`",
inline=False
)
embed.add_field(
name="j.dungeons (username) (profile)",
value="Shows the user's dungeon stats"
"\n Aliases: `j.dungeon`",
inline=False
)
embed.add_field(
name="j.auctions (username)",
value="Shows the user's active auctions"
"\n Aliases: `j.ah`",
inline=False
)
embed.add_field(
name="j.lowestbin (item name)",
value="Returns the lowest BIN price for the given item."
"\n Aliases: `j.bin`",
inline=False
)
embed.add_field(
name="j.bazaar (item name)",
value="Returns the bazaar prices for the given item."
"\n Aliases: `j.bz`",
inline=False
)
embed.add_field(
name="j.setupreminders/j.disabledreminders",
value="Allows you to set up Skyblock event reminders for the channel.",
inline=False
)
await ctx.send(embed=embed)
@tasks.loop(minutes=120)
async def push(self):
logger.info('Pushing files to Github...')
await self.pushdata()
async def pushdata(self):
filenames = ["data/eventchannels.json"]
for filename in filenames:
try:
token = database["github_oath"]
repo = "amymainyc/jerrybot"
branch = "master"
url = "https://api.github.com/repos/" + repo + "/contents/" + filename
base64content = base64.b64encode(open(filename, "rb").read())
async with aiohttp.ClientSession() as session:
async with session.get(url + '?ref=' + branch, headers={"Authorization": "token " + token}) as data:
data = await data.json()
sha = data['sha']
if base64content.decode('utf-8') + "\n" != data['content']:
message = json.dumps(
{"message": "Automatic data update.",
"branch": branch,
"content": base64content.decode("utf-8"),
"sha": sha}
)
async with aiohttp.ClientSession() as session:
async with session.put(url, data=message,
headers={"Content-Type": "application/json",
"Authorization": "token " + token}) as resp:
print(resp)
else:
print("Nothing to update.")
except Exception as e:
logger.exception(e)
@commands.command()
async def gitPush(self, ctx):
if ctx.author != self.client.get_user(430079880353546242):
return
filenames = ["data/eventchannels.json"]
for filename in filenames:
try:
token = database["github_oath"]
repo = "amymainyc/jerrybot"
branch = "master"
url = "https://api.github.com/repos/" + repo + "/contents/" + filename
base64content = base64.b64encode(open(filename, "rb").read())
async with aiohttp.ClientSession() as session:
async with session.get(url + '?ref=' + branch, headers={"Authorization": "token " + token}) as data:
data = await data.json()
sha = data['sha']
if base64content.decode('utf-8') + "\n" != data['content']:
message = json.dumps(
{"message": "Automatic data update.",
"branch": branch,
"content": base64content.decode("utf-8"),
"sha": sha}
)
async with aiohttp.ClientSession() as session:
async with session.put(url, data=message,
headers={"Content-Type": "application/json",
"Authorization": "token " + token}) as resp:
print(resp)
else:
print("Nothing to update.")
except Exception as e:
logger.exception(e)
await ctx.send("Pushed latest data to GitHub.")
def setup(client):
client.add_cog(Admin(client)) | cogs/admin.py | from discord.ext import commands
import discord
import json
from loguru import logger
from discord.ext import tasks
import base64
import aiohttp
import asyncio
with open('data/database.json') as d:
database = json.load(d)
class Admin(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print('Jerry is ready.')
link = database["shortlink"]
activity = discord.Game(name=f"j.help | {link}")
# activity = discord.Game(name="Bot testing in progress.")
await self.client.change_presence(status=discord.Status.online, activity=activity)
await asyncio.sleep(43200)
self.push.start()
@commands.command()
async def hi(self, ctx):
if ctx.author != self.client.get_user(430079880353546242):
return
await ctx.send('Jerry.')
@commands.command()
async def invite(self, ctx):
await ctx.send('Invite Jerry to your server using this link:\n' + database["shortlink"])
@commands.command()
async def vote(self, ctx):
await ctx.send('Vote for Jerry using this link:\n' + database["votelink"])
@commands.command()
async def support(self, ctx):
await ctx.send('Join the community/support server here:\n' + database["supportlink"])
@commands.command()
async def help(self, ctx):
embed = discord.Embed(
title="Jerry's Commands",
color=0xf00000
)
embed.set_footer(
text='More features in development!'
)
embed.set_thumbnail(url="https://i.imgur.com/6lOTQhe.png")
embed.add_field(
name="j.help",
value="Responds with this message",
inline=False
)
embed.add_field(
name="j.invite",
value="Responds with the invite link for Jerry!",
inline=False
)
embed.add_field(
name="j.vote",
value="Responds with the vote link for Jerry!",
inline=False
)
embed.add_field(
name="j.support",
value="Responds with the invite to the support server.",
inline=False
)
embed.add_field(
name="j.skills (username) (profile)",
value="Shows the user's Skyblock skill levels.",
inline=False
)
embed.add_field(
name="j.accessories (username) (profile)",
value="Shows the user's missing accessories.",
inline=False
)
embed.add_field(
name="j.armor (username) (profile)",
value="Shows the user's equipped armor.",
inline=False
)
embed.add_field(
name="j.inventory (username) (profile)",
value="Shows the user's inventory."
"\n Aliases: `j.inv`",
inline=False
)
embed.add_field(
name="j.dungeons (username) (profile)",
value="Shows the user's dungeon stats"
"\n Aliases: `j.dungeon`",
inline=False
)
embed.add_field(
name="j.auctions (username)",
value="Shows the user's active auctions"
"\n Aliases: `j.ah`",
inline=False
)
embed.add_field(
name="j.lowestbin (item name)",
value="Returns the lowest BIN price for the given item."
"\n Aliases: `j.bin`",
inline=False
)
embed.add_field(
name="j.bazaar (item name)",
value="Returns the bazaar prices for the given item."
"\n Aliases: `j.bz`",
inline=False
)
embed.add_field(
name="j.setupreminders/j.disabledreminders",
value="Allows you to set up Skyblock event reminders for the channel.",
inline=False
)
await ctx.send(embed=embed)
@tasks.loop(minutes=120)
async def push(self):
logger.info('Pushing files to Github...')
await self.pushdata()
async def pushdata(self):
filenames = ["data/eventchannels.json"]
for filename in filenames:
try:
token = database["github_oath"]
repo = "amymainyc/jerrybot"
branch = "master"
url = "https://api.github.com/repos/" + repo + "/contents/" + filename
base64content = base64.b64encode(open(filename, "rb").read())
async with aiohttp.ClientSession() as session:
async with session.get(url + '?ref=' + branch, headers={"Authorization": "token " + token}) as data:
data = await data.json()
sha = data['sha']
if base64content.decode('utf-8') + "\n" != data['content']:
message = json.dumps(
{"message": "Automatic data update.",
"branch": branch,
"content": base64content.decode("utf-8"),
"sha": sha}
)
async with aiohttp.ClientSession() as session:
async with session.put(url, data=message,
headers={"Content-Type": "application/json",
"Authorization": "token " + token}) as resp:
print(resp)
else:
print("Nothing to update.")
except Exception as e:
logger.exception(e)
@commands.command()
async def gitPush(self, ctx):
if ctx.author != self.client.get_user(430079880353546242):
return
filenames = ["data/eventchannels.json"]
for filename in filenames:
try:
token = database["github_oath"]
repo = "amymainyc/jerrybot"
branch = "master"
url = "https://api.github.com/repos/" + repo + "/contents/" + filename
base64content = base64.b64encode(open(filename, "rb").read())
async with aiohttp.ClientSession() as session:
async with session.get(url + '?ref=' + branch, headers={"Authorization": "token " + token}) as data:
data = await data.json()
sha = data['sha']
if base64content.decode('utf-8') + "\n" != data['content']:
message = json.dumps(
{"message": "Automatic data update.",
"branch": branch,
"content": base64content.decode("utf-8"),
"sha": sha}
)
async with aiohttp.ClientSession() as session:
async with session.put(url, data=message,
headers={"Content-Type": "application/json",
"Authorization": "token " + token}) as resp:
print(resp)
else:
print("Nothing to update.")
except Exception as e:
logger.exception(e)
await ctx.send("Pushed latest data to GitHub.")
def setup(client):
client.add_cog(Admin(client)) | 0.434941 | 0.077065 |
import json
from django.urls import reverse
from unittest import skipIf
# Projectroles dependency
from projectroles.models import SODAR_CONSTANTS
from projectroles.plugins import get_backend_api
from projectroles.tests.test_views_api import TestAPIViewsBase
# Samplesheets dependency
from samplesheets.tests.test_io import SampleSheetIOMixin, SHEET_DIR
from landingzones.tests.test_models import LandingZoneMixin
from landingzones.tests.test_views import (
IRODS_BACKEND_ENABLED,
IRODS_BACKEND_SKIP_MSG,
)
from landingzones.tests.test_views_taskflow import (
ZONE_TITLE,
ZONE_DESC,
)
# Global constants
PROJECT_ROLE_OWNER = SODAR_CONSTANTS['PROJECT_ROLE_OWNER']
PROJECT_ROLE_DELEGATE = SODAR_CONSTANTS['PROJECT_ROLE_DELEGATE']
PROJECT_ROLE_CONTRIBUTOR = SODAR_CONSTANTS['PROJECT_ROLE_CONTRIBUTOR']
PROJECT_ROLE_GUEST = SODAR_CONSTANTS['PROJECT_ROLE_GUEST']
PROJECT_TYPE_CATEGORY = SODAR_CONSTANTS['PROJECT_TYPE_CATEGORY']
PROJECT_TYPE_PROJECT = SODAR_CONSTANTS['PROJECT_TYPE_PROJECT']
# Local constants
SHEET_PATH = SHEET_DIR + 'i_small.zip'
ZONE_STATUS = 'VALIDATING'
ZONE_STATUS_INFO = 'Testing'
INVALID_UUID = '11111111-1111-1111-1111-111111111111'
# Base Views and Classes -------------------------------------------------------
class TestLandingZoneAPIViewsBase(
LandingZoneMixin, SampleSheetIOMixin, TestAPIViewsBase
):
"""Base class for Landingzones API view testing"""
def setUp(self):
super().setUp()
# Init contributor user and assignment
self.user_contrib = self.make_user('user_contrib')
self.contrib_as = self._make_assignment(
self.project, self.user_contrib, self.role_contributor
)
# Import investigation
self.investigation = self._import_isa_from_file(
SHEET_PATH, self.project
)
self.study = self.investigation.studies.first()
self.assay = self.study.assays.first()
# Create LandingZone
self.landing_zone = self._make_landing_zone(
title=ZONE_TITLE,
project=self.project,
user=self.owner_as.user,
assay=self.assay,
description=ZONE_DESC,
status='ACTIVE',
)
@skipIf(not IRODS_BACKEND_ENABLED, IRODS_BACKEND_SKIP_MSG)
class TestLandingZoneListAPIView(TestLandingZoneAPIViewsBase):
"""Tests for LandingZoneListAPIView"""
def test_get_owner(self):
"""Test LandingZoneListAPIView get() as project owner"""
irods_backend = get_backend_api('omics_irods', conn=False)
url = reverse(
'landingzones:api_list', kwargs={'project': self.project.sodar_uuid}
)
response = self.request_knox(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
expected = {
'title': self.landing_zone.title,
'project': str(self.project.sodar_uuid),
'user': self.get_serialized_user(self.user),
'assay': str(self.assay.sodar_uuid),
'status': self.landing_zone.status,
'status_info': self.landing_zone.status_info,
'status_locked': False,
'date_modified': self.get_drf_datetime(
self.landing_zone.date_modified
),
'description': self.landing_zone.description,
'user_message': self.landing_zone.user_message,
'configuration': self.landing_zone.configuration,
'config_data': self.landing_zone.config_data,
'irods_path': irods_backend.get_path(self.landing_zone),
'sodar_uuid': str(self.landing_zone.sodar_uuid),
}
self.assertEqual(json.loads(response.content)[0], expected)
def test_get_no_own_zones(self):
"""Test LandingZoneListAPIView get() as user with no own zones"""
url = reverse(
'landingzones:api_list', kwargs={'project': self.project.sodar_uuid}
)
response = self.request_knox(
url, token=self.get_token(self.user_contrib)
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
def test_get_finished_default(self):
"""Test get() with a finished zone and no finished parameter"""
self._make_landing_zone(
title=ZONE_TITLE + '_moved',
project=self.project,
user=self.owner_as.user,
assay=self.assay,
description=ZONE_DESC,
status='MOVED',
)
url = reverse(
'landingzones:api_list', kwargs={'project': self.project.sodar_uuid}
)
response = self.request_knox(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
self.assertEqual(
json.loads(response.content)[0]['sodar_uuid'],
str(self.landing_zone.sodar_uuid),
)
def test_get_finished_false(self):
"""Test get() with a finished zone and finished=0"""
self._make_landing_zone(
title=ZONE_TITLE + '_moved',
project=self.project,
user=self.owner_as.user,
assay=self.assay,
description=ZONE_DESC,
status='MOVED',
)
url = (
reverse(
'landingzones:api_list',
kwargs={'project': self.project.sodar_uuid},
)
+ '?finished=0'
)
response = self.request_knox(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
self.assertEqual(
json.loads(response.content)[0]['sodar_uuid'],
str(self.landing_zone.sodar_uuid),
)
def test_get_finished_true(self):
"""Test get() with a finished zone and finished=1"""
self._make_landing_zone(
title=ZONE_TITLE + '_moved',
project=self.project,
user=self.owner_as.user,
assay=self.assay,
description=ZONE_DESC,
status='MOVED',
)
url = (
reverse(
'landingzones:api_list',
kwargs={'project': self.project.sodar_uuid},
)
+ '?finished=1'
)
response = self.request_knox(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
@skipIf(not IRODS_BACKEND_ENABLED, IRODS_BACKEND_SKIP_MSG)
class TestLandingZoneRetrieveAPIView(TestLandingZoneAPIViewsBase):
"""Tests for LandingZoneRetrieveAPIView"""
def test_get(self):
"""Test LandingZoneRetrieveAPIView get() as zone owner"""
irods_backend = get_backend_api('omics_irods', conn=False)
url = reverse(
'landingzones:api_retrieve',
kwargs={'landingzone': self.landing_zone.sodar_uuid},
)
response = self.request_knox(url)
self.assertEqual(response.status_code, 200)
expected = {
'title': self.landing_zone.title,
'project': str(self.project.sodar_uuid),
'user': self.get_serialized_user(self.user),
'assay': str(self.assay.sodar_uuid),
'status': self.landing_zone.status,
'status_info': self.landing_zone.status_info,
'status_locked': False,
'date_modified': self.get_drf_datetime(
self.landing_zone.date_modified
),
'description': self.landing_zone.description,
'user_message': self.landing_zone.user_message,
'configuration': self.landing_zone.configuration,
'config_data': self.landing_zone.config_data,
'irods_path': irods_backend.get_path(self.landing_zone),
'sodar_uuid': str(self.landing_zone.sodar_uuid),
}
self.assertEqual(json.loads(response.content), expected)
def test_get_locked(self):
"""Test get() with locked landing zone status"""
self.landing_zone.status = 'MOVING'
self.landing_zone.save()
url = reverse(
'landingzones:api_retrieve',
kwargs={'landingzone': self.landing_zone.sodar_uuid},
)
response = self.request_knox(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['status_locked'], True) | landingzones/tests/test_views_api.py |
import json
from django.urls import reverse
from unittest import skipIf
# Projectroles dependency
from projectroles.models import SODAR_CONSTANTS
from projectroles.plugins import get_backend_api
from projectroles.tests.test_views_api import TestAPIViewsBase
# Samplesheets dependency
from samplesheets.tests.test_io import SampleSheetIOMixin, SHEET_DIR
from landingzones.tests.test_models import LandingZoneMixin
from landingzones.tests.test_views import (
IRODS_BACKEND_ENABLED,
IRODS_BACKEND_SKIP_MSG,
)
from landingzones.tests.test_views_taskflow import (
ZONE_TITLE,
ZONE_DESC,
)
# Global constants
PROJECT_ROLE_OWNER = SODAR_CONSTANTS['PROJECT_ROLE_OWNER']
PROJECT_ROLE_DELEGATE = SODAR_CONSTANTS['PROJECT_ROLE_DELEGATE']
PROJECT_ROLE_CONTRIBUTOR = SODAR_CONSTANTS['PROJECT_ROLE_CONTRIBUTOR']
PROJECT_ROLE_GUEST = SODAR_CONSTANTS['PROJECT_ROLE_GUEST']
PROJECT_TYPE_CATEGORY = SODAR_CONSTANTS['PROJECT_TYPE_CATEGORY']
PROJECT_TYPE_PROJECT = SODAR_CONSTANTS['PROJECT_TYPE_PROJECT']
# Local constants
SHEET_PATH = SHEET_DIR + 'i_small.zip'
ZONE_STATUS = 'VALIDATING'
ZONE_STATUS_INFO = 'Testing'
INVALID_UUID = '11111111-1111-1111-1111-111111111111'
# Base Views and Classes -------------------------------------------------------
class TestLandingZoneAPIViewsBase(
LandingZoneMixin, SampleSheetIOMixin, TestAPIViewsBase
):
"""Base class for Landingzones API view testing"""
def setUp(self):
super().setUp()
# Init contributor user and assignment
self.user_contrib = self.make_user('user_contrib')
self.contrib_as = self._make_assignment(
self.project, self.user_contrib, self.role_contributor
)
# Import investigation
self.investigation = self._import_isa_from_file(
SHEET_PATH, self.project
)
self.study = self.investigation.studies.first()
self.assay = self.study.assays.first()
# Create LandingZone
self.landing_zone = self._make_landing_zone(
title=ZONE_TITLE,
project=self.project,
user=self.owner_as.user,
assay=self.assay,
description=ZONE_DESC,
status='ACTIVE',
)
@skipIf(not IRODS_BACKEND_ENABLED, IRODS_BACKEND_SKIP_MSG)
class TestLandingZoneListAPIView(TestLandingZoneAPIViewsBase):
"""Tests for LandingZoneListAPIView"""
def test_get_owner(self):
"""Test LandingZoneListAPIView get() as project owner"""
irods_backend = get_backend_api('omics_irods', conn=False)
url = reverse(
'landingzones:api_list', kwargs={'project': self.project.sodar_uuid}
)
response = self.request_knox(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
expected = {
'title': self.landing_zone.title,
'project': str(self.project.sodar_uuid),
'user': self.get_serialized_user(self.user),
'assay': str(self.assay.sodar_uuid),
'status': self.landing_zone.status,
'status_info': self.landing_zone.status_info,
'status_locked': False,
'date_modified': self.get_drf_datetime(
self.landing_zone.date_modified
),
'description': self.landing_zone.description,
'user_message': self.landing_zone.user_message,
'configuration': self.landing_zone.configuration,
'config_data': self.landing_zone.config_data,
'irods_path': irods_backend.get_path(self.landing_zone),
'sodar_uuid': str(self.landing_zone.sodar_uuid),
}
self.assertEqual(json.loads(response.content)[0], expected)
def test_get_no_own_zones(self):
"""Test LandingZoneListAPIView get() as user with no own zones"""
url = reverse(
'landingzones:api_list', kwargs={'project': self.project.sodar_uuid}
)
response = self.request_knox(
url, token=self.get_token(self.user_contrib)
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
def test_get_finished_default(self):
"""Test get() with a finished zone and no finished parameter"""
self._make_landing_zone(
title=ZONE_TITLE + '_moved',
project=self.project,
user=self.owner_as.user,
assay=self.assay,
description=ZONE_DESC,
status='MOVED',
)
url = reverse(
'landingzones:api_list', kwargs={'project': self.project.sodar_uuid}
)
response = self.request_knox(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
self.assertEqual(
json.loads(response.content)[0]['sodar_uuid'],
str(self.landing_zone.sodar_uuid),
)
def test_get_finished_false(self):
"""Test get() with a finished zone and finished=0"""
self._make_landing_zone(
title=ZONE_TITLE + '_moved',
project=self.project,
user=self.owner_as.user,
assay=self.assay,
description=ZONE_DESC,
status='MOVED',
)
url = (
reverse(
'landingzones:api_list',
kwargs={'project': self.project.sodar_uuid},
)
+ '?finished=0'
)
response = self.request_knox(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
self.assertEqual(
json.loads(response.content)[0]['sodar_uuid'],
str(self.landing_zone.sodar_uuid),
)
def test_get_finished_true(self):
"""Test get() with a finished zone and finished=1"""
self._make_landing_zone(
title=ZONE_TITLE + '_moved',
project=self.project,
user=self.owner_as.user,
assay=self.assay,
description=ZONE_DESC,
status='MOVED',
)
url = (
reverse(
'landingzones:api_list',
kwargs={'project': self.project.sodar_uuid},
)
+ '?finished=1'
)
response = self.request_knox(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
@skipIf(not IRODS_BACKEND_ENABLED, IRODS_BACKEND_SKIP_MSG)
class TestLandingZoneRetrieveAPIView(TestLandingZoneAPIViewsBase):
"""Tests for LandingZoneRetrieveAPIView"""
def test_get(self):
"""Test LandingZoneRetrieveAPIView get() as zone owner"""
irods_backend = get_backend_api('omics_irods', conn=False)
url = reverse(
'landingzones:api_retrieve',
kwargs={'landingzone': self.landing_zone.sodar_uuid},
)
response = self.request_knox(url)
self.assertEqual(response.status_code, 200)
expected = {
'title': self.landing_zone.title,
'project': str(self.project.sodar_uuid),
'user': self.get_serialized_user(self.user),
'assay': str(self.assay.sodar_uuid),
'status': self.landing_zone.status,
'status_info': self.landing_zone.status_info,
'status_locked': False,
'date_modified': self.get_drf_datetime(
self.landing_zone.date_modified
),
'description': self.landing_zone.description,
'user_message': self.landing_zone.user_message,
'configuration': self.landing_zone.configuration,
'config_data': self.landing_zone.config_data,
'irods_path': irods_backend.get_path(self.landing_zone),
'sodar_uuid': str(self.landing_zone.sodar_uuid),
}
self.assertEqual(json.loads(response.content), expected)
def test_get_locked(self):
"""Test get() with locked landing zone status"""
self.landing_zone.status = 'MOVING'
self.landing_zone.save()
url = reverse(
'landingzones:api_retrieve',
kwargs={'landingzone': self.landing_zone.sodar_uuid},
)
response = self.request_knox(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['status_locked'], True) | 0.547706 | 0.133302 |
# noqa
from django import forms
from components.component import Component, SetupConfMixin
from common.forms import BaseComponentForm, TypeCheckField
from common.constants import API_TYPE_OP
from .toolkit import tools, configs
class SendVoiceMsg(Component, SetupConfMixin):
""""""
sys_name = configs.SYSTEM_NAME
api_type = API_TYPE_OP
class Form(BaseComponentForm):
qcloud_app_id = forms.CharField(label='qcloud app id', required=True)
qcloud_app_key = forms.CharField(label='qcloud app key', required=True)
auto_read_message = forms.CharField(label='auto voice reading info', required=True)
user_list_information = TypeCheckField(label='user list', promise_type=list, required=True)
ext = forms.CharField(label='ext', required=False)
def kwargs_generator(self, data):
for user in data['user_list_information']:
yield {
"user": user,
"promptfile": data['auto_read_message'],
"playtimes": configs.voice_playtimes,
"prompttype": configs.voice_prompttype,
"tel": {
"mobile": user['mobile_phone'],
"nationcode": configs.default_nation_code
},
"ext": data['ext']
}
def clean(self):
data = self.cleaned_data
return {
"kwargs_generator": self.kwargs_generator(data),
"qcloud_app_id": data["qcloud_app_id"],
"qcloud_app_key": data["qcloud_app_key"]
}
def handle(self):
data = self.request.kwargs['kwargs_generator']
client = tools.QCloudVoiceClient(self.outgoing.http_client)
result = {
"successed": [],
"failed": []
}
for kwargs in data:
rnd = client.get_random()
cur_time = client.get_cur_time()
kwargs['time'] = cur_time
kwargs['sig'] = client.generate_sig(
self.request.kwargs['qcloud_app_key'],
kwargs['tel']['mobile'],
rnd,
cur_time
)
user = kwargs.pop('user')
ret = client.post(
'/v5/tlsvoicesvr/sendvoiceprompt?sdkappid=%s&random=%s' % (self.request.kwargs['qcloud_app_id'], rnd),
data=kwargs
)
user.update(ret)
result['successed'].append(user) if ret['result'] == 0 else result['failed'].append(user)
self.response.payload = result | paas-ce/paas/esb/components/generic/templates/qcloud_voice/send_voice_msg.py | # noqa
from django import forms
from components.component import Component, SetupConfMixin
from common.forms import BaseComponentForm, TypeCheckField
from common.constants import API_TYPE_OP
from .toolkit import tools, configs
class SendVoiceMsg(Component, SetupConfMixin):
""""""
sys_name = configs.SYSTEM_NAME
api_type = API_TYPE_OP
class Form(BaseComponentForm):
qcloud_app_id = forms.CharField(label='qcloud app id', required=True)
qcloud_app_key = forms.CharField(label='qcloud app key', required=True)
auto_read_message = forms.CharField(label='auto voice reading info', required=True)
user_list_information = TypeCheckField(label='user list', promise_type=list, required=True)
ext = forms.CharField(label='ext', required=False)
def kwargs_generator(self, data):
for user in data['user_list_information']:
yield {
"user": user,
"promptfile": data['auto_read_message'],
"playtimes": configs.voice_playtimes,
"prompttype": configs.voice_prompttype,
"tel": {
"mobile": user['mobile_phone'],
"nationcode": configs.default_nation_code
},
"ext": data['ext']
}
def clean(self):
data = self.cleaned_data
return {
"kwargs_generator": self.kwargs_generator(data),
"qcloud_app_id": data["qcloud_app_id"],
"qcloud_app_key": data["qcloud_app_key"]
}
def handle(self):
data = self.request.kwargs['kwargs_generator']
client = tools.QCloudVoiceClient(self.outgoing.http_client)
result = {
"successed": [],
"failed": []
}
for kwargs in data:
rnd = client.get_random()
cur_time = client.get_cur_time()
kwargs['time'] = cur_time
kwargs['sig'] = client.generate_sig(
self.request.kwargs['qcloud_app_key'],
kwargs['tel']['mobile'],
rnd,
cur_time
)
user = kwargs.pop('user')
ret = client.post(
'/v5/tlsvoicesvr/sendvoiceprompt?sdkappid=%s&random=%s' % (self.request.kwargs['qcloud_app_id'], rnd),
data=kwargs
)
user.update(ret)
result['successed'].append(user) if ret['result'] == 0 else result['failed'].append(user)
self.response.payload = result | 0.282988 | 0.130452 |
from __future__ import print_function
__author__ = '<NAME> (<EMAIL>)'
__version__ = '0.8r1'
import logging
import os
import sys
# Allow the local advanced_shell_history library to be imported.
_LIB = '/usr/local/lib'
if _LIB not in sys.path:
sys.path.append(_LIB)
from advanced_shell_history import unix
from advanced_shell_history import util
class Flags(util.Flags):
"""The flags needed for the _ash_log.py script to work."""
arguments = (
('a', 'alert', 'MSG', str, 'a message to display to the user'),
('c', 'command', 'CMD', str, 'a command to log'),
('e', 'command_exit', 'CODE', int, 'the exit code of the command to log'),
('p', 'command_pipe_status', 'CSV', str, 'the pipe states of the command to log'),
('s', 'command_start', 'TS', int, 'the timestamp when the command started'),
('f', 'command_finish', 'TS', int, 'the timestamp when the command stopped'),
('n', 'command_number', 'NUM', int, 'the builtin shell history command number'),
('x', 'exit', 'CODE', int, 'the exit code to use when exiting'),
)
flags = (
('S', 'get_session_id', 'emits the session ID (or creates one)'),
('E', 'end_session', 'ends the current session'),
)
def __init__(self):
util.Flags.__init__(self, Flags.arguments, Flags.flags)
class Session(util.Database.Object):
"""An abstraction of a shell session to store to the history database."""
def __init__(self):
"""Initialize a Session, populating session values."""
util.Database.Object.__init__(self, 'sessions')
self.values = {
'time_zone': unix.GetTimeZone(),
'start_time': unix.GetTime(),
'ppid': unix.GetPPID(),
'pid': unix.GetPID(),
'tty': unix.GetTTY(),
'uid': unix.GetUID(),
'euid': unix.GetEUID(),
'logname': unix.GetLoginName(),
'hostname': unix.GetHostName(),
'host_ip': unix.GetHostIp(),
'shell': unix.GetShell(),
'sudo_user': unix.GetEnv('SUDO_USER'),
'sudo_uid': unix.GetEnv('SUDO_UID'),
'ssh_client': unix.GetEnv('SSH_CLIENT'),
'ssh_connection': unix.GetEnv('SSH_CONNECTION')
}
def GetCreateTableSql(self):
return '''
CREATE TABLE sessions (
id integer primary key autoincrement,
hostname varchar(128),
host_ip varchar(40),
ppid int(5) not null,
pid int(5) not null,
time_zone str(3) not null,
start_time integer not null,
end_time integer,
duration integer,
tty varchar(20) not null,
uid int(16) not null,
euid int(16) not null,
logname varchar(48),
shell varchar(50) not null,
sudo_user varchar(48),
sudo_uid int(16),
ssh_client varchar(60),
ssh_connection varchar(100)
)'''
def Close(self):
"""Closes this session in the database."""
sql = '''
UPDATE sessions
SET
end_time = ?,
duration = ? - start_time
WHERE id == ?;
'''
ts = unix.GetTime()
util.Database().Execute(sql, (ts, ts, unix.GetEnvInt('ASH_SESSION_ID'),))
class Command(util.Database.Object):
"""An abstraction of a command to store to the history database."""
def __init__(self, command, rval, start, finish, number, pipes):
util.Database.Object.__init__(self, 'commands')
self.values = {
'session_id': unix.GetEnvInt('ASH_SESSION_ID'),
'shell_level': unix.GetEnvInt('SHLVL'),
'command_no': number,
'tty': unix.GetTTY(),
'euid': unix.GetEUID(),
'cwd': unix.GetCWD(),
'rval': rval,
'start_time': start,
'end_time': finish,
'duration': finish - start,
'pipe_cnt': len(pipes.split('_')),
'pipe_vals': pipes,
'command': command
}
# If the user changed directories, CWD will be the new directory, not the
# one where the command was actually entered.
if rval == 0 and (command == 'cd' or command.startswith('cd ')):
self.values['cwd'] = unix.GetEnv('OLDPWD')
def GetCreateTableSql(self):
return '''
CREATE TABLE commands (
id integer primary key autoincrement,
session_id integer not null,
shell_level integer not null,
command_no integer,
tty varchar(20) not null,
euid int(16) not null,
cwd varchar(256) not null,
rval int(5) not null,
start_time integer not null,
end_time integer not null,
duration integer not null,
pipe_cnt int(3),
pipe_vals varchar(80),
command varchar(1000) not null,
UNIQUE(session_id, command_no)
)'''
def main(argv):
# If ASH_DISABLED is set, we skip everything and exit without error.
if os.getenv('ASH_DISABLED'): return 0
# Setup.
util.InitLogging()
# Log the command, if debug logging is enabled.
if logging.getLogger().isEnabledFor(logging.DEBUG):
command = []
for arg in argv:
command.append('[%d]=\'%s\'' % (len(command), arg))
logging.debug('argv = "' + ','.join(command) + '"')
# Print an alert if one was specified.
flags = Flags()
if flags.alert:
print(flags.alert, file=sys.stderr)
# If no arguments were given, it may be best to show --help.
if len(argv) == 1 and not util.Config().GetBool('HIDE_USAGE_FOR_NO_ARGS'):
flags.PrintHelp()
# Create the session id, if not already set in the environment.
session_id = os.getenv('ASH_SESSION_ID')
if flags.get_session_id:
if session_id is None:
session_id = Session().Insert()
print(session_id)
# Insert a new command into the database, if one was supplied.
command_flag_used = bool(flags.command
or flags.command_exit
or flags.command_pipe_status
or flags.command_start
or flags.command_finish
or flags.command_number)
if command_flag_used:
Command(
flags.command, flags.command_exit, flags.command_start,
flags.command_finish, flags.command_number, flags.command_pipe_status
).Insert()
# End the current session.
if flags.end_session:
Session().Close()
# Return the desired exit code.
return flags.exit
if __name__ == '__main__':
sys.exit(main(sys.argv)) | python/_ash_log.py | from __future__ import print_function
__author__ = '<NAME> (<EMAIL>)'
__version__ = '0.8r1'
import logging
import os
import sys
# Allow the local advanced_shell_history library to be imported.
_LIB = '/usr/local/lib'
if _LIB not in sys.path:
sys.path.append(_LIB)
from advanced_shell_history import unix
from advanced_shell_history import util
class Flags(util.Flags):
"""The flags needed for the _ash_log.py script to work."""
arguments = (
('a', 'alert', 'MSG', str, 'a message to display to the user'),
('c', 'command', 'CMD', str, 'a command to log'),
('e', 'command_exit', 'CODE', int, 'the exit code of the command to log'),
('p', 'command_pipe_status', 'CSV', str, 'the pipe states of the command to log'),
('s', 'command_start', 'TS', int, 'the timestamp when the command started'),
('f', 'command_finish', 'TS', int, 'the timestamp when the command stopped'),
('n', 'command_number', 'NUM', int, 'the builtin shell history command number'),
('x', 'exit', 'CODE', int, 'the exit code to use when exiting'),
)
flags = (
('S', 'get_session_id', 'emits the session ID (or creates one)'),
('E', 'end_session', 'ends the current session'),
)
def __init__(self):
util.Flags.__init__(self, Flags.arguments, Flags.flags)
class Session(util.Database.Object):
"""An abstraction of a shell session to store to the history database."""
def __init__(self):
"""Initialize a Session, populating session values."""
util.Database.Object.__init__(self, 'sessions')
self.values = {
'time_zone': unix.GetTimeZone(),
'start_time': unix.GetTime(),
'ppid': unix.GetPPID(),
'pid': unix.GetPID(),
'tty': unix.GetTTY(),
'uid': unix.GetUID(),
'euid': unix.GetEUID(),
'logname': unix.GetLoginName(),
'hostname': unix.GetHostName(),
'host_ip': unix.GetHostIp(),
'shell': unix.GetShell(),
'sudo_user': unix.GetEnv('SUDO_USER'),
'sudo_uid': unix.GetEnv('SUDO_UID'),
'ssh_client': unix.GetEnv('SSH_CLIENT'),
'ssh_connection': unix.GetEnv('SSH_CONNECTION')
}
def GetCreateTableSql(self):
return '''
CREATE TABLE sessions (
id integer primary key autoincrement,
hostname varchar(128),
host_ip varchar(40),
ppid int(5) not null,
pid int(5) not null,
time_zone str(3) not null,
start_time integer not null,
end_time integer,
duration integer,
tty varchar(20) not null,
uid int(16) not null,
euid int(16) not null,
logname varchar(48),
shell varchar(50) not null,
sudo_user varchar(48),
sudo_uid int(16),
ssh_client varchar(60),
ssh_connection varchar(100)
)'''
def Close(self):
"""Closes this session in the database."""
sql = '''
UPDATE sessions
SET
end_time = ?,
duration = ? - start_time
WHERE id == ?;
'''
ts = unix.GetTime()
util.Database().Execute(sql, (ts, ts, unix.GetEnvInt('ASH_SESSION_ID'),))
class Command(util.Database.Object):
"""An abstraction of a command to store to the history database."""
def __init__(self, command, rval, start, finish, number, pipes):
util.Database.Object.__init__(self, 'commands')
self.values = {
'session_id': unix.GetEnvInt('ASH_SESSION_ID'),
'shell_level': unix.GetEnvInt('SHLVL'),
'command_no': number,
'tty': unix.GetTTY(),
'euid': unix.GetEUID(),
'cwd': unix.GetCWD(),
'rval': rval,
'start_time': start,
'end_time': finish,
'duration': finish - start,
'pipe_cnt': len(pipes.split('_')),
'pipe_vals': pipes,
'command': command
}
# If the user changed directories, CWD will be the new directory, not the
# one where the command was actually entered.
if rval == 0 and (command == 'cd' or command.startswith('cd ')):
self.values['cwd'] = unix.GetEnv('OLDPWD')
def GetCreateTableSql(self):
return '''
CREATE TABLE commands (
id integer primary key autoincrement,
session_id integer not null,
shell_level integer not null,
command_no integer,
tty varchar(20) not null,
euid int(16) not null,
cwd varchar(256) not null,
rval int(5) not null,
start_time integer not null,
end_time integer not null,
duration integer not null,
pipe_cnt int(3),
pipe_vals varchar(80),
command varchar(1000) not null,
UNIQUE(session_id, command_no)
)'''
def main(argv):
# If ASH_DISABLED is set, we skip everything and exit without error.
if os.getenv('ASH_DISABLED'): return 0
# Setup.
util.InitLogging()
# Log the command, if debug logging is enabled.
if logging.getLogger().isEnabledFor(logging.DEBUG):
command = []
for arg in argv:
command.append('[%d]=\'%s\'' % (len(command), arg))
logging.debug('argv = "' + ','.join(command) + '"')
# Print an alert if one was specified.
flags = Flags()
if flags.alert:
print(flags.alert, file=sys.stderr)
# If no arguments were given, it may be best to show --help.
if len(argv) == 1 and not util.Config().GetBool('HIDE_USAGE_FOR_NO_ARGS'):
flags.PrintHelp()
# Create the session id, if not already set in the environment.
session_id = os.getenv('ASH_SESSION_ID')
if flags.get_session_id:
if session_id is None:
session_id = Session().Insert()
print(session_id)
# Insert a new command into the database, if one was supplied.
command_flag_used = bool(flags.command
or flags.command_exit
or flags.command_pipe_status
or flags.command_start
or flags.command_finish
or flags.command_number)
if command_flag_used:
Command(
flags.command, flags.command_exit, flags.command_start,
flags.command_finish, flags.command_number, flags.command_pipe_status
).Insert()
# End the current session.
if flags.end_session:
Session().Close()
# Return the desired exit code.
return flags.exit
if __name__ == '__main__':
sys.exit(main(sys.argv)) | 0.432183 | 0.096748 |
from typing import Any, TypeVar, Callable, List, Union
import gym
import ray
from muzero.global_vars import GlobalVars
from muzero.policy import Policy
from muzero.rollout_worker import RolloutWorker
TrainerConfigDict = dict
EnvType = gym.Env
class WorkerSet:
"""Represents a set of RolloutWorkers.
There must be one local worker copy, and zero or more remote workers.
"""
def __init__(self,
env_creator: Callable[[Any], EnvType],
policy: Policy,
trainer_config: TrainerConfigDict,
global_vars: GlobalVars,
num_workers: int = 0,
logdir: str = None):
"""Create a new WorkerSet and initialize its workers.
Arguments:
env_creator (func): Function that returns env given env config.
policy (cls): rllib.policy.Policy class.
trainer_config (dict): Optional dict that extends the common
config of the Trainer class.
num_workers (int): Number of remote rollout workers to create.
logdir (str): Optional logging directory for workers.
_setup (bool): Whether to setup workers. This is only for testing.
"""
self._env_creator = env_creator
self._policy = policy
self._config = trainer_config
self._global_vars = global_vars
self._logdir = logdir
# Always create a local worker
self._local_worker = RolloutWorker(
env_creator, policy, self._config, 1, 0, global_vars)
# Create a number of remote workers
self._remote_workers = []
self._num_workers = 0
self.add_workers(num_workers)
def local_worker(self) -> RolloutWorker:
return self._local_worker
def remote_workers(self) -> List[RolloutWorker]:
return self._remote_workers
def sync_weights(self) -> None:
"""Syncs weights of remote workers with the local worker."""
if self.remote_workers():
weights = ray.put(self.local_worker().get_weights())
for e in self.remote_workers():
e.set_weights.remote(weights)
def stop(self) -> None:
"""Stop all rollout workers."""
self.local_worker().stop()
for w in self.remote_workers():
w.stop.remote()
w.__ray_terminate__.remote()
def add_workers(self, num_workers: int) -> None:
"""Creates and add a number of remote workers to this worker set.
Args:
num_workers (int): The number of remote Workers to add to this
WorkerSet.
"""
cls = ray.remote(
num_cpus=self._config["num_cpus_per_worker"],
num_gpus=self._config["num_gpus_per_worker"],
memory=self._config["memory_per_worker"],
object_store_memory=self._config["object_store_memory_per_worker"],
resources=self._config["custom_resources_per_worker"]
)(RolloutWorker).remote
self._remote_workers.extend([
cls(
self._env_creator,
self._policy,
self._config,
self._num_workers + num_workers,
self._num_workers + i + 1,
self._global_vars)
for i in range(num_workers)
])
self._num_workers += num_workers
def remove_workers(self, num_workers: int) -> None:
while num_workers > 0:
if not self._remote_workers:
break
worker = self._remote_workers.pop()
worker.shutdown.remote()
self._num_workers -= 1
num_workers -= 1 | muzero/worker_set.py | from typing import Any, TypeVar, Callable, List, Union
import gym
import ray
from muzero.global_vars import GlobalVars
from muzero.policy import Policy
from muzero.rollout_worker import RolloutWorker
TrainerConfigDict = dict
EnvType = gym.Env
class WorkerSet:
"""Represents a set of RolloutWorkers.
There must be one local worker copy, and zero or more remote workers.
"""
def __init__(self,
env_creator: Callable[[Any], EnvType],
policy: Policy,
trainer_config: TrainerConfigDict,
global_vars: GlobalVars,
num_workers: int = 0,
logdir: str = None):
"""Create a new WorkerSet and initialize its workers.
Arguments:
env_creator (func): Function that returns env given env config.
policy (cls): rllib.policy.Policy class.
trainer_config (dict): Optional dict that extends the common
config of the Trainer class.
num_workers (int): Number of remote rollout workers to create.
logdir (str): Optional logging directory for workers.
_setup (bool): Whether to setup workers. This is only for testing.
"""
self._env_creator = env_creator
self._policy = policy
self._config = trainer_config
self._global_vars = global_vars
self._logdir = logdir
# Always create a local worker
self._local_worker = RolloutWorker(
env_creator, policy, self._config, 1, 0, global_vars)
# Create a number of remote workers
self._remote_workers = []
self._num_workers = 0
self.add_workers(num_workers)
def local_worker(self) -> RolloutWorker:
return self._local_worker
def remote_workers(self) -> List[RolloutWorker]:
return self._remote_workers
def sync_weights(self) -> None:
"""Syncs weights of remote workers with the local worker."""
if self.remote_workers():
weights = ray.put(self.local_worker().get_weights())
for e in self.remote_workers():
e.set_weights.remote(weights)
def stop(self) -> None:
"""Stop all rollout workers."""
self.local_worker().stop()
for w in self.remote_workers():
w.stop.remote()
w.__ray_terminate__.remote()
def add_workers(self, num_workers: int) -> None:
"""Creates and add a number of remote workers to this worker set.
Args:
num_workers (int): The number of remote Workers to add to this
WorkerSet.
"""
cls = ray.remote(
num_cpus=self._config["num_cpus_per_worker"],
num_gpus=self._config["num_gpus_per_worker"],
memory=self._config["memory_per_worker"],
object_store_memory=self._config["object_store_memory_per_worker"],
resources=self._config["custom_resources_per_worker"]
)(RolloutWorker).remote
self._remote_workers.extend([
cls(
self._env_creator,
self._policy,
self._config,
self._num_workers + num_workers,
self._num_workers + i + 1,
self._global_vars)
for i in range(num_workers)
])
self._num_workers += num_workers
def remove_workers(self, num_workers: int) -> None:
while num_workers > 0:
if not self._remote_workers:
break
worker = self._remote_workers.pop()
worker.shutdown.remote()
self._num_workers -= 1
num_workers -= 1 | 0.930062 | 0.236098 |
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
def predict_numerical_from_text(train_df: pd.DataFrame, test_df: pd.DataFrame, text_col: str, target_col: str):
"""
Predict a numerical value between 0 and 1 from text using Logistic Regression on TF-IDF features.
Specifically, the _predictor will predict the probability of the target being below or above 0.5.
Can be used for e.g. sentiment analysis.
:param train_df: a Pandas dataframe containing the training set
:param test_df: a Pandas dataframe containing the df set
:param text_col: the column containing the text that will be used as features
:param target_col: the column containing the numerical target
:return: a tuple containing the test set predictions as well as the mean cross-validation score
"""
train_df['class'] = train_df[target_col] >= 0.5
text_clf = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', LogisticRegression()),
])
scores = cross_val_score(text_clf, train_df[text_col], train_df['class'], cv=3)
mean_clf_score = scores.mean()
text_clf.fit(train_df[text_col], train_df['class'])
predicted = text_clf.predict_proba(test_df[text_col])
return predicted, mean_clf_score
def predict_multiclass_from_text(train_df: pd.DataFrame, test_df: pd.DataFrame, text_col: str, target_col: str,
metric='neg_log_loss'):
"""
Multi-class classification from text using Logistic Regression on TF-IDF features.
:param train_df: a Pandas dataframe containing the training set
:param test_df: a Pandas dataframe containing the df set
:param text_col: the column containing the text that will be used as features
:param target_col: the column containing the numerical target
:param metric: scoring metric for cross-validation
:return: a tuple containing the test set predictions as well as the mean cross-validation score
"""
text_clf = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', LogisticRegression()),
])
scores = cross_val_score(text_clf, train_df[text_col], train_df[target_col], cv=3, scoring=metric)
mean_clf_score = scores.mean()
text_clf.fit(train_df[text_col], train_df[target_col])
predicted = text_clf.predict_proba(test_df[text_col])
return predicted, mean_clf_score | src/mle/text.py | import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
def predict_numerical_from_text(train_df: pd.DataFrame, test_df: pd.DataFrame, text_col: str, target_col: str):
"""
Predict a numerical value between 0 and 1 from text using Logistic Regression on TF-IDF features.
Specifically, the _predictor will predict the probability of the target being below or above 0.5.
Can be used for e.g. sentiment analysis.
:param train_df: a Pandas dataframe containing the training set
:param test_df: a Pandas dataframe containing the df set
:param text_col: the column containing the text that will be used as features
:param target_col: the column containing the numerical target
:return: a tuple containing the test set predictions as well as the mean cross-validation score
"""
train_df['class'] = train_df[target_col] >= 0.5
text_clf = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', LogisticRegression()),
])
scores = cross_val_score(text_clf, train_df[text_col], train_df['class'], cv=3)
mean_clf_score = scores.mean()
text_clf.fit(train_df[text_col], train_df['class'])
predicted = text_clf.predict_proba(test_df[text_col])
return predicted, mean_clf_score
def predict_multiclass_from_text(train_df: pd.DataFrame, test_df: pd.DataFrame, text_col: str, target_col: str,
metric='neg_log_loss'):
"""
Multi-class classification from text using Logistic Regression on TF-IDF features.
:param train_df: a Pandas dataframe containing the training set
:param test_df: a Pandas dataframe containing the df set
:param text_col: the column containing the text that will be used as features
:param target_col: the column containing the numerical target
:param metric: scoring metric for cross-validation
:return: a tuple containing the test set predictions as well as the mean cross-validation score
"""
text_clf = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', LogisticRegression()),
])
scores = cross_val_score(text_clf, train_df[text_col], train_df[target_col], cv=3, scoring=metric)
mean_clf_score = scores.mean()
text_clf.fit(train_df[text_col], train_df[target_col])
predicted = text_clf.predict_proba(test_df[text_col])
return predicted, mean_clf_score | 0.93906 | 0.662911 |
from django.db import models
from django.utils import timezone
class ReindexingManager(models.Manager):
"""Used to flag when an elasticsearch reindexing is occurring."""
def _flag_reindexing(self, site, new_index, old_index, alias):
"""Flag the database for a reindex on the given site."""
if self._is_reindexing(site):
return # Already flagged.
return self.create(new_index=new_index,
old_index=old_index,
alias=alias,
site=site)
def flag_reindexing_amo(self, new_index, old_index, alias):
"""Flag the database for an AMO reindex."""
return self._flag_reindexing('amo', new_index, old_index, alias)
def _unflag_reindexing(self, site):
"""Unflag the database for a reindex on the given site."""
self.filter(site=site).delete()
def unflag_reindexing_amo(self):
"""Unflag the database for an AMO reindex."""
self._unflag_reindexing('amo')
def _is_reindexing(self, site):
"""Return True if a reindexing is occurring for the given site."""
return self.filter(site=site).exists()
def is_reindexing_amo(self):
"""Return True if a reindexing is occurring on AMO."""
return self._is_reindexing('amo')
def get_indices(self, index):
"""Return the indices associated with an alias.
If we are reindexing, there should be two indices returned.
"""
try:
reindex = self.get(alias=index)
# Yes. Let's reindex on both indexes.
return [idx for idx in (reindex.new_index, reindex.old_index)
if idx is not None]
except Reindexing.DoesNotExist:
return [index]
class Reindexing(models.Model):
SITE_CHOICES = (
('amo', 'AMO'),
)
start_date = models.DateTimeField(default=timezone.now)
old_index = models.CharField(max_length=255, null=True)
new_index = models.CharField(max_length=255)
alias = models.CharField(max_length=255)
site = models.CharField(max_length=3, choices=SITE_CHOICES)
objects = ReindexingManager()
class Meta:
db_table = 'zadmin_reindexing' | src/olympia/lib/es/models.py | from django.db import models
from django.utils import timezone
class ReindexingManager(models.Manager):
"""Used to flag when an elasticsearch reindexing is occurring."""
def _flag_reindexing(self, site, new_index, old_index, alias):
"""Flag the database for a reindex on the given site."""
if self._is_reindexing(site):
return # Already flagged.
return self.create(new_index=new_index,
old_index=old_index,
alias=alias,
site=site)
def flag_reindexing_amo(self, new_index, old_index, alias):
"""Flag the database for an AMO reindex."""
return self._flag_reindexing('amo', new_index, old_index, alias)
def _unflag_reindexing(self, site):
"""Unflag the database for a reindex on the given site."""
self.filter(site=site).delete()
def unflag_reindexing_amo(self):
"""Unflag the database for an AMO reindex."""
self._unflag_reindexing('amo')
def _is_reindexing(self, site):
"""Return True if a reindexing is occurring for the given site."""
return self.filter(site=site).exists()
def is_reindexing_amo(self):
"""Return True if a reindexing is occurring on AMO."""
return self._is_reindexing('amo')
def get_indices(self, index):
"""Return the indices associated with an alias.
If we are reindexing, there should be two indices returned.
"""
try:
reindex = self.get(alias=index)
# Yes. Let's reindex on both indexes.
return [idx for idx in (reindex.new_index, reindex.old_index)
if idx is not None]
except Reindexing.DoesNotExist:
return [index]
class Reindexing(models.Model):
SITE_CHOICES = (
('amo', 'AMO'),
)
start_date = models.DateTimeField(default=timezone.now)
old_index = models.CharField(max_length=255, null=True)
new_index = models.CharField(max_length=255)
alias = models.CharField(max_length=255)
site = models.CharField(max_length=3, choices=SITE_CHOICES)
objects = ReindexingManager()
class Meta:
db_table = 'zadmin_reindexing' | 0.637369 | 0.372106 |
import torch.nn as nn
import torch
up_kwargs = {'mode': 'bilinear', 'align_corners': False}
norm_layer = nn.BatchNorm2d
class _PositionAttentionModule(nn.Module):
""" Position attention module"""
def __init__(self, in_channels):
super(_PositionAttentionModule, self).__init__()
self.conv_b = nn.Conv2d(in_channels, in_channels // 8, 1)
self.conv_c = nn.Conv2d(in_channels, in_channels // 8, 1)
self.conv_d = nn.Conv2d(in_channels, in_channels, 1)
self.alpha = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
batch_size, _, height, width = x.size()
feat_b = self.conv_b(x).view(batch_size, -1, height * width).permute(0, 2, 1)
feat_c = self.conv_c(x).view(batch_size, -1, height * width)
attention_s = self.softmax(torch.bmm(feat_b, feat_c))
feat_d = self.conv_d(x).view(batch_size, -1, height * width)
feat_e = torch.bmm(feat_d, attention_s.permute(0, 2, 1)).view(batch_size, -1, height, width)
out = self.alpha * feat_e + x
return out
class _ChannelAttentionModule(nn.Module):
"""Channel attention module"""
def __init__(self):
super(_ChannelAttentionModule, self).__init__()
self.beta = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
batch_size, _, height, width = x.size()
feat_a = x.view(batch_size, -1, height * width)
feat_a_transpose = x.view(batch_size, -1, height * width).permute(0, 2, 1)
attention = torch.bmm(feat_a, feat_a_transpose)
attention_new = torch.max(attention, dim=-1, keepdim=True)[0].expand_as(attention) - attention
attention = self.softmax(attention_new)
feat_e = torch.bmm(attention, feat_a).view(batch_size, -1, height, width)
out = self.beta * feat_e + x
return out
class DAHead(nn.Module):
def __init__(self, in_channels, num_classes, aux=False, norm_layer=norm_layer, norm_kwargs=None, in_index=3):
super(DAHead, self).__init__()
self.aux = aux
self.in_index = in_index
inter_channels = in_channels // 4
self.conv_p1 = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.conv_c1 = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.pam = _PositionAttentionModule(inter_channels)
self.cam = _ChannelAttentionModule()
self.conv_p2 = nn.Sequential(
nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.conv_c2 = nn.Sequential(
nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.out = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(inter_channels, num_classes, 1)
)
if aux:
self.conv_p3 = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(inter_channels, num_classes, 1)
)
self.conv_c3 = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(inter_channels, num_classes, 1)
)
def _transform_inputs(self, inputs):
if isinstance(self.in_index, (list, tuple)):
inputs = [inputs[i] for i in self.in_index]
elif isinstance(self.in_index, int):
inputs = inputs[self.in_index]
return inputs
def forward(self, inputs):
x = self._transform_inputs(inputs)
feat_p = self.conv_p1(x)
feat_p = self.pam(feat_p)
feat_p = self.conv_p2(feat_p)
feat_c = self.conv_c1(x)
feat_c = self.cam(feat_c)
feat_c = self.conv_c2(feat_c)
feat_fusion = feat_p + feat_c
outputs = []
fusion_out = self.out(feat_fusion)
outputs.append(fusion_out)
if self.aux:
p_out = self.conv_p3(feat_p)
c_out = self.conv_c3(feat_c)
outputs.append(p_out)
outputs.append(c_out)
return outputs | models/head/da.py | import torch.nn as nn
import torch
up_kwargs = {'mode': 'bilinear', 'align_corners': False}
norm_layer = nn.BatchNorm2d
class _PositionAttentionModule(nn.Module):
""" Position attention module"""
def __init__(self, in_channels):
super(_PositionAttentionModule, self).__init__()
self.conv_b = nn.Conv2d(in_channels, in_channels // 8, 1)
self.conv_c = nn.Conv2d(in_channels, in_channels // 8, 1)
self.conv_d = nn.Conv2d(in_channels, in_channels, 1)
self.alpha = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
batch_size, _, height, width = x.size()
feat_b = self.conv_b(x).view(batch_size, -1, height * width).permute(0, 2, 1)
feat_c = self.conv_c(x).view(batch_size, -1, height * width)
attention_s = self.softmax(torch.bmm(feat_b, feat_c))
feat_d = self.conv_d(x).view(batch_size, -1, height * width)
feat_e = torch.bmm(feat_d, attention_s.permute(0, 2, 1)).view(batch_size, -1, height, width)
out = self.alpha * feat_e + x
return out
class _ChannelAttentionModule(nn.Module):
"""Channel attention module"""
def __init__(self):
super(_ChannelAttentionModule, self).__init__()
self.beta = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
batch_size, _, height, width = x.size()
feat_a = x.view(batch_size, -1, height * width)
feat_a_transpose = x.view(batch_size, -1, height * width).permute(0, 2, 1)
attention = torch.bmm(feat_a, feat_a_transpose)
attention_new = torch.max(attention, dim=-1, keepdim=True)[0].expand_as(attention) - attention
attention = self.softmax(attention_new)
feat_e = torch.bmm(attention, feat_a).view(batch_size, -1, height, width)
out = self.beta * feat_e + x
return out
class DAHead(nn.Module):
def __init__(self, in_channels, num_classes, aux=False, norm_layer=norm_layer, norm_kwargs=None, in_index=3):
super(DAHead, self).__init__()
self.aux = aux
self.in_index = in_index
inter_channels = in_channels // 4
self.conv_p1 = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.conv_c1 = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.pam = _PositionAttentionModule(inter_channels)
self.cam = _ChannelAttentionModule()
self.conv_p2 = nn.Sequential(
nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.conv_c2 = nn.Sequential(
nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.out = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(inter_channels, num_classes, 1)
)
if aux:
self.conv_p3 = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(inter_channels, num_classes, 1)
)
self.conv_c3 = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(inter_channels, num_classes, 1)
)
def _transform_inputs(self, inputs):
if isinstance(self.in_index, (list, tuple)):
inputs = [inputs[i] for i in self.in_index]
elif isinstance(self.in_index, int):
inputs = inputs[self.in_index]
return inputs
def forward(self, inputs):
x = self._transform_inputs(inputs)
feat_p = self.conv_p1(x)
feat_p = self.pam(feat_p)
feat_p = self.conv_p2(feat_p)
feat_c = self.conv_c1(x)
feat_c = self.cam(feat_c)
feat_c = self.conv_c2(feat_c)
feat_fusion = feat_p + feat_c
outputs = []
fusion_out = self.out(feat_fusion)
outputs.append(fusion_out)
if self.aux:
p_out = self.conv_p3(feat_p)
c_out = self.conv_c3(feat_c)
outputs.append(p_out)
outputs.append(c_out)
return outputs | 0.955047 | 0.571109 |
import functools
from flask_login import current_user
from flask_restful import abort
from funcy import flatten
view_only = True
not_view_only = False
ACCESS_TYPE_VIEW = "view"
ACCESS_TYPE_MODIFY = "modify"
ACCESS_TYPE_DELETE = "delete"
ACCESS_TYPES = (ACCESS_TYPE_VIEW, ACCESS_TYPE_MODIFY, ACCESS_TYPE_DELETE)
def has_access(obj, user, need_view_only):
if hasattr(obj, "api_key") and user.is_api_user():
return has_access_to_object(obj, user.id, need_view_only)
else:
return has_access_to_groups(obj, user, need_view_only)
def has_access_to_object(obj, api_key, need_view_only):
if obj.api_key == api_key:
return need_view_only
elif hasattr(obj, "dashboard_api_keys"):
# check if api_key belongs to a dashboard containing this query
return api_key in obj.dashboard_api_keys and need_view_only
else:
return False
def has_access_to_groups(obj, user, need_view_only):
groups = obj.groups if hasattr(obj, "groups") else obj
if "admin" in user.permissions:
return True
matching_groups = set(groups.keys()).intersection(user.group_ids)
if not matching_groups:
return False
required_level = 1 if need_view_only else 2
group_level = 1 if all(flatten([groups[group] for group in matching_groups])) else 2
return required_level <= group_level
def require_access(obj, user, need_view_only):
if not has_access(obj, user, need_view_only):
abort(403)
class require_permissions(object):
def __init__(self, permissions, allow_one=False):
self.permissions = permissions
self.allow_one = allow_one
def __call__(self, fn):
@functools.wraps(fn)
def decorated(*args, **kwargs):
if self.allow_one:
has_permissions = any([current_user.has_permission(permission) for permission in self.permissions])
else:
has_permissions = current_user.has_permissions(self.permissions)
if has_permissions:
return fn(*args, **kwargs)
else:
abort(403)
return decorated
def require_permission(permission):
return require_permissions((permission,))
def require_any_of_permission(permissions):
return require_permissions(permissions, True)
def require_admin(fn):
return require_permission("admin")(fn)
def require_super_admin(fn):
return require_permission("super_admin")(fn)
def has_permission_or_owner(permission, object_owner_id):
return int(object_owner_id) == current_user.id or current_user.has_permission(
permission
)
def is_admin_or_owner(object_owner_id):
return has_permission_or_owner("admin", object_owner_id)
def require_permission_or_owner(permission, object_owner_id):
if not has_permission_or_owner(permission, object_owner_id):
abort(403)
def require_admin_or_owner(object_owner_id):
if not is_admin_or_owner(object_owner_id):
abort(403, message="You don't have permission to edit this resource.")
def can_modify(obj, user):
return is_admin_or_owner(obj.user_id) or user.has_access(obj, ACCESS_TYPE_MODIFY)
def require_object_modify_permission(obj, user):
if not can_modify(obj, user):
abort(403) | redash/permissions.py | import functools
from flask_login import current_user
from flask_restful import abort
from funcy import flatten
view_only = True
not_view_only = False
ACCESS_TYPE_VIEW = "view"
ACCESS_TYPE_MODIFY = "modify"
ACCESS_TYPE_DELETE = "delete"
ACCESS_TYPES = (ACCESS_TYPE_VIEW, ACCESS_TYPE_MODIFY, ACCESS_TYPE_DELETE)
def has_access(obj, user, need_view_only):
if hasattr(obj, "api_key") and user.is_api_user():
return has_access_to_object(obj, user.id, need_view_only)
else:
return has_access_to_groups(obj, user, need_view_only)
def has_access_to_object(obj, api_key, need_view_only):
if obj.api_key == api_key:
return need_view_only
elif hasattr(obj, "dashboard_api_keys"):
# check if api_key belongs to a dashboard containing this query
return api_key in obj.dashboard_api_keys and need_view_only
else:
return False
def has_access_to_groups(obj, user, need_view_only):
groups = obj.groups if hasattr(obj, "groups") else obj
if "admin" in user.permissions:
return True
matching_groups = set(groups.keys()).intersection(user.group_ids)
if not matching_groups:
return False
required_level = 1 if need_view_only else 2
group_level = 1 if all(flatten([groups[group] for group in matching_groups])) else 2
return required_level <= group_level
def require_access(obj, user, need_view_only):
if not has_access(obj, user, need_view_only):
abort(403)
class require_permissions(object):
def __init__(self, permissions, allow_one=False):
self.permissions = permissions
self.allow_one = allow_one
def __call__(self, fn):
@functools.wraps(fn)
def decorated(*args, **kwargs):
if self.allow_one:
has_permissions = any([current_user.has_permission(permission) for permission in self.permissions])
else:
has_permissions = current_user.has_permissions(self.permissions)
if has_permissions:
return fn(*args, **kwargs)
else:
abort(403)
return decorated
def require_permission(permission):
return require_permissions((permission,))
def require_any_of_permission(permissions):
return require_permissions(permissions, True)
def require_admin(fn):
return require_permission("admin")(fn)
def require_super_admin(fn):
return require_permission("super_admin")(fn)
def has_permission_or_owner(permission, object_owner_id):
return int(object_owner_id) == current_user.id or current_user.has_permission(
permission
)
def is_admin_or_owner(object_owner_id):
return has_permission_or_owner("admin", object_owner_id)
def require_permission_or_owner(permission, object_owner_id):
if not has_permission_or_owner(permission, object_owner_id):
abort(403)
def require_admin_or_owner(object_owner_id):
if not is_admin_or_owner(object_owner_id):
abort(403, message="You don't have permission to edit this resource.")
def can_modify(obj, user):
return is_admin_or_owner(obj.user_id) or user.has_access(obj, ACCESS_TYPE_MODIFY)
def require_object_modify_permission(obj, user):
if not can_modify(obj, user):
abort(403) | 0.421909 | 0.077938 |
import os
import sys
import argparse
import pytest
from modelmachine.ide import get_program, debug, assemble
__version__ = "0.1.6" # Don't forget fix in setup.py
def run_program(args):
"""Get params from args and run file."""
cpu = get_program(args.filename, args.protect_memory)
cpu.run()
def run_debug(args):
"""Get params from args and run debug."""
cpu = get_program(args.filename, args.protect_memory)
debug(cpu)
def run_tests(args):
"""Run tests."""
args = args # args is unused
path = os.path.abspath(os.path.dirname(__file__))
sys.argv[1] = path
pytest.main()
def run_asm(args):
"""Get params from args and run assembler."""
assemble(args.asm_file, args.machine_file)
def main(argv, stdout):
"""Execute, when user call modelmachine."""
parser = argparse.ArgumentParser(description='Modelmachine ' + __version__)
parser.add_argument('-m', '--protect_memory', action='store_true', default=False,
help='raise an error, if program tries to read dirty memory')
subparsers = parser.add_subparsers(title='commands',
help='commands of model machine emulator')
run = subparsers.add_parser('run', help='run program')
run.add_argument('filename', help='file containing machine code')
run.set_defaults(func=run_program)
debug_parser = subparsers.add_parser('debug', help='run program in debug mode')
debug_parser.add_argument('filename', help='file containing machine code')
debug_parser.set_defaults(func=run_debug)
test = subparsers.add_parser('test', help='run internal tests end exit')
test.set_defaults(func=run_tests)
asm = subparsers.add_parser('asm', help='assemble model machine program')
asm.add_argument('asm_file', help='input file containing asm source')
asm.add_argument('machine_file', help='output file containing machine code')
asm.set_defaults(func=run_asm)
args = parser.parse_args(argv[1:])
if 'func' not in args:
parser.print_help(stdout)
else:
args.func(args)
def exec_main():
"""Hook for testability."""
main(sys.argv, sys.stdout)
if __name__ == '__main__':
exec_main() | modelmachine/__main__.py | import os
import sys
import argparse
import pytest
from modelmachine.ide import get_program, debug, assemble
__version__ = "0.1.6" # Don't forget fix in setup.py
def run_program(args):
"""Get params from args and run file."""
cpu = get_program(args.filename, args.protect_memory)
cpu.run()
def run_debug(args):
"""Get params from args and run debug."""
cpu = get_program(args.filename, args.protect_memory)
debug(cpu)
def run_tests(args):
"""Run tests."""
args = args # args is unused
path = os.path.abspath(os.path.dirname(__file__))
sys.argv[1] = path
pytest.main()
def run_asm(args):
"""Get params from args and run assembler."""
assemble(args.asm_file, args.machine_file)
def main(argv, stdout):
"""Execute, when user call modelmachine."""
parser = argparse.ArgumentParser(description='Modelmachine ' + __version__)
parser.add_argument('-m', '--protect_memory', action='store_true', default=False,
help='raise an error, if program tries to read dirty memory')
subparsers = parser.add_subparsers(title='commands',
help='commands of model machine emulator')
run = subparsers.add_parser('run', help='run program')
run.add_argument('filename', help='file containing machine code')
run.set_defaults(func=run_program)
debug_parser = subparsers.add_parser('debug', help='run program in debug mode')
debug_parser.add_argument('filename', help='file containing machine code')
debug_parser.set_defaults(func=run_debug)
test = subparsers.add_parser('test', help='run internal tests end exit')
test.set_defaults(func=run_tests)
asm = subparsers.add_parser('asm', help='assemble model machine program')
asm.add_argument('asm_file', help='input file containing asm source')
asm.add_argument('machine_file', help='output file containing machine code')
asm.set_defaults(func=run_asm)
args = parser.parse_args(argv[1:])
if 'func' not in args:
parser.print_help(stdout)
else:
args.func(args)
def exec_main():
"""Hook for testability."""
main(sys.argv, sys.stdout)
if __name__ == '__main__':
exec_main() | 0.322099 | 0.124001 |
import torch
import torch.utils.data
from .transforms import *
def fast_collate(batch):
targets = torch.tensor([b[1] for b in batch], dtype=torch.int64)
batch_size = len(targets)
tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
for i in range(batch_size):
tensor[i] += torch.from_numpy(batch[i][0])
return tensor, targets
class PrefetchLoader:
def __init__(self,
loader,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD):
self.loader = loader
self.mean = torch.tensor([x * 255 for x in mean]).cuda().view(1, 3, 1, 1)
self.std = torch.tensor([x * 255 for x in std]).cuda().view(1, 3, 1, 1)
def __iter__(self):
stream = torch.cuda.Stream()
first = True
for next_input, next_target in self.loader:
with torch.cuda.stream(stream):
next_input = next_input.cuda(non_blocking=True)
next_target = next_target.cuda(non_blocking=True)
next_input = next_input.float().sub_(self.mean).div_(self.std)
if not first:
yield input, target
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input = next_input
target = next_target
yield input, target
def __len__(self):
return len(self.loader)
@property
def sampler(self):
return self.loader.sampler
def create_loader(
dataset,
input_size,
batch_size,
is_training=False,
use_prefetcher=True,
interpolation='bilinear',
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
num_workers=1,
crop_pct=None,
tensorflow_preprocessing=False
):
if isinstance(input_size, tuple):
img_size = input_size[-2:]
else:
img_size = input_size
if tensorflow_preprocessing and use_prefetcher:
from data.tf_preprocessing import TfPreprocessTransform
transform = TfPreprocessTransform(is_training=is_training, size=img_size)
else:
transform = transforms_imagenet_eval(
img_size,
interpolation=interpolation,
use_prefetcher=use_prefetcher,
mean=mean,
std=std,
crop_pct=crop_pct)
dataset.transform = transform
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
collate_fn=fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate,
)
if use_prefetcher:
loader = PrefetchLoader(
loader,
mean=mean,
std=std)
return loader | data/loader.py | import torch
import torch.utils.data
from .transforms import *
def fast_collate(batch):
targets = torch.tensor([b[1] for b in batch], dtype=torch.int64)
batch_size = len(targets)
tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
for i in range(batch_size):
tensor[i] += torch.from_numpy(batch[i][0])
return tensor, targets
class PrefetchLoader:
def __init__(self,
loader,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD):
self.loader = loader
self.mean = torch.tensor([x * 255 for x in mean]).cuda().view(1, 3, 1, 1)
self.std = torch.tensor([x * 255 for x in std]).cuda().view(1, 3, 1, 1)
def __iter__(self):
stream = torch.cuda.Stream()
first = True
for next_input, next_target in self.loader:
with torch.cuda.stream(stream):
next_input = next_input.cuda(non_blocking=True)
next_target = next_target.cuda(non_blocking=True)
next_input = next_input.float().sub_(self.mean).div_(self.std)
if not first:
yield input, target
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input = next_input
target = next_target
yield input, target
def __len__(self):
return len(self.loader)
@property
def sampler(self):
return self.loader.sampler
def create_loader(
dataset,
input_size,
batch_size,
is_training=False,
use_prefetcher=True,
interpolation='bilinear',
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
num_workers=1,
crop_pct=None,
tensorflow_preprocessing=False
):
if isinstance(input_size, tuple):
img_size = input_size[-2:]
else:
img_size = input_size
if tensorflow_preprocessing and use_prefetcher:
from data.tf_preprocessing import TfPreprocessTransform
transform = TfPreprocessTransform(is_training=is_training, size=img_size)
else:
transform = transforms_imagenet_eval(
img_size,
interpolation=interpolation,
use_prefetcher=use_prefetcher,
mean=mean,
std=std,
crop_pct=crop_pct)
dataset.transform = transform
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
collate_fn=fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate,
)
if use_prefetcher:
loader = PrefetchLoader(
loader,
mean=mean,
std=std)
return loader | 0.888916 | 0.420064 |
import os, sys
import yaml, json
import logging
import click
from voluptuous import Schema
from .defaults import settings
from .validators import SchemaCheck, config_file, options
from .config_utils import test_config, set_logging
from .exceptions import *
from .utils import *
from .indexlist import IndexList
from .snapshotlist import SnapshotList
from .actions import *
from ._version import __version__
CLASS_MAP = {
'alias' : Alias,
'allocation' : Allocation,
'close' : Close,
'cluster_routing' : ClusterRouting,
'create_index' : CreateIndex,
'delete_indices' : DeleteIndices,
'delete_snapshots' : DeleteSnapshots,
'forcemerge' : ForceMerge,
'open' : Open,
'replicas' : Replicas,
'restore' : Restore,
'snapshot' : Snapshot,
}
EXCLUDED_OPTIONS = [
'ignore_empty_list', 'timeout_override',
'continue_if_exception', 'disable_action'
]
def validate_filter_json(ctx, param, value):
try:
filter_list = ensure_list(json.loads(value))
return filter_list
except ValueError:
raise click.BadParameter('Invalid JSON: {0}'.format(value))
def false_to_none(ctx, param, value):
try:
if value:
return True
else:
return None
except ValueError:
raise click.BadParameter('Invalid value: {0}'.format(value))
def filter_schema_check(action, filter_dict):
valid_filters = SchemaCheck(
filter_dict,
Schema(filters.Filters(action, location='singleton')),
'filters',
'{0} singleton action "filters"'.format(action)
).result()
return validate_filters(action, valid_filters)
def _actionator(action, action_obj, dry_run=True):
logger = logging.getLogger(__name__)
logger.debug('Doing the singleton "{0}" action here.'.format(action))
try:
if dry_run:
action_obj.do_dry_run()
else:
action_obj.do_action()
except Exception as e:
if isinstance(e, NoIndices) or isinstance(e, NoSnapshots):
logger.error(
'Unable to complete action "{0}". No actionable items '
'in list: {1}'.format(action, type(e))
)
else:
logger.error(
'Failed to complete action: {0}. {1}: '
'{2}'.format(action, type(e), e)
)
sys.exit(1)
logger.info('Singleton "{0}" action completed.'.format(action))
def _do_filters(list_object, filters, ignore=False):
logger = logging.getLogger(__name__)
logger.debug('Running filters and testing for empty list object')
try:
list_object.iterate_filters(filters)
list_object.empty_list_check()
except (NoIndices, NoSnapshots) as e:
if isinstance(e, NoIndices):
otype = 'index'
else:
otype = 'snapshot'
if ignore:
logger.info(
'Singleton action not performed: empty {0} list'.format(otype)
)
sys.exit(0)
else:
logger.error(
'Singleton action failed due to empty {0} list'.format(otype)
)
sys.exit(1)
def _prune_excluded(option_dict):
for k in list(option_dict.keys()):
if k in EXCLUDED_OPTIONS:
del option_dict[k]
return option_dict
def option_schema_check(action, option_dict):
clean_options = SchemaCheck(
prune_nones(option_dict),
options.get_schema(action),
'options',
'{0} singleton action "options"'.format(action)
).result()
return _prune_excluded(clean_options)
def config_override(ctx, config_dict):
if config_dict == None:
config_dict = {}
for k in ['client', 'logging']:
if not k in config_dict:
config_dict[k] = {}
for k in list(ctx.params.keys()):
if k in ['dry_run', 'config']:
pass
elif k == 'host':
if 'host' in ctx.params and ctx.params['host'] is not None:
config_dict['client']['hosts'] = ctx.params[k]
elif k in ['loglevel', 'logfile', 'logformat']:
if k in ctx.params and ctx.params[k] is not None:
config_dict['logging'][k] = ctx.params[k]
else:
if k in ctx.params and ctx.params[k] is not None:
config_dict['client'][k] = ctx.params[k]
# After override, prune the nones
for k in ['client', 'logging']:
config_dict[k] = prune_nones(config_dict[k])
return SchemaCheck(config_dict, config_file.client(),
'Client Configuration', 'full configuration dictionary').result()
@click.command(name='allocation')
@click.option(
'--key', type=str, required=True, help='Node identification tag'
)
@click.option(
'--value', type=str, default=None, help='Value associated with --key'
)
@click.option(
'--allocation_type', type=str,
help='Must be one of: require, include, or exclude'
)
@click.option(
'--wait_for_completion', is_flag=True, help='Wait for operation to complete'
)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json,
help='JSON string representing an array of filters.', required=True
)
@click.pass_context
def allocation_singleton(
ctx, key, value, allocation_type, wait_for_completion, ignore_empty_list,
filter_list):
"""
Shard Routing Allocation
"""
action = 'allocation'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
raw_options = {
'key': key,
'value': value,
'allocation_type': allocation_type,
'wait_for_completion': wait_for_completion,
}
logger.debug('Validating provided options: {0}'.format(raw_options))
mykwargs = option_schema_check(action, raw_options)
mykwargs.update(
{ 'max_wait': c_args['timeout'] if c_args['timeout'] else 30 }
)
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
ilo = IndexList(client)
_do_filters(ilo, clean_filters, ignore_empty_list)
action_obj = action_class(ilo, **mykwargs)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.command(name='close')
@click.option(
'--delete_aliases', is_flag=True,
help='Delete all aliases from indices to be closed'
)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json,
help='JSON string representing an array of filters.', required=True
)
@click.pass_context
def close_singleton(
ctx, delete_aliases, ignore_empty_list, filter_list):
"""
Close indices
"""
action = 'close'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
raw_options = { 'delete_aliases': delete_aliases }
logger.debug('Validating provided options: {0}'.format(raw_options))
mykwargs = option_schema_check(action, raw_options)
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
ilo = IndexList(client)
_do_filters(ilo, clean_filters, ignore_empty_list)
action_obj = action_class(ilo, **mykwargs)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.command(name='delete_indices')
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json,
help='JSON string representing an array of filters.', required=True
)
@click.pass_context
def delete_indices_singleton(ctx, ignore_empty_list, filter_list):
"""
Delete indices
"""
action = 'delete_indices'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
mykwargs = {
'master_timeout': c_args['timeout'] if c_args['timeout'] <= 300 else 300
}
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
ilo = IndexList(client)
_do_filters(ilo, clean_filters, ignore_empty_list)
action_obj = action_class(ilo, **mykwargs)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.command(name='delete_snapshots')
@click.option(
'--repository', type=str, required=True, help='Snapshot repository name'
)
@click.option(
'--retry_count', type=int, help='Number of times to retry (max 3)'
)
@click.option(
'--retry_interval', type=int, help='Time in seconds between retries'
)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable snapshots'
)
@click.option(
'--filter_list', callback=validate_filter_json,
help='JSON string representing an array of filters.', required=True
)
@click.pass_context
def delete_snapshots_singleton(
ctx, repository, retry_count, retry_interval, ignore_empty_list,
filter_list):
"""
Delete snapshots
"""
action = 'delete_snapshots'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
raw_options = {
'repository': repository,
'retry_count': retry_count, 'retry_interval': retry_interval
}
logger.debug('Validating provided options: {0}'.format(raw_options))
mykwargs = option_schema_check(action, raw_options)
# Repo arg Not necessary after schema check. It's only for the slo object
del mykwargs['repository']
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
slo = SnapshotList(client, repository=repository)
_do_filters(slo, clean_filters, ignore_empty_list)
action_obj = action_class(slo, **mykwargs)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.command(name='open')
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json,
help='JSON string representing an array of filters.', required=True
)
@click.pass_context
def open_singleton(
ctx, ignore_empty_list, filter_list):
"""
Open indices
"""
action = 'open'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
ilo = IndexList(client)
_do_filters(ilo, clean_filters, ignore_empty_list)
action_obj = action_class(ilo)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.command(name='forcemerge')
@click.option(
'--max_num_segments', type=int, required=True,
help='Maximum number of segments per shard (minimum of 1)'
)
@click.option(
'--delay', type=float,
help='Time in seconds to delay between operations. Default 0, maximum 3600'
)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json,
help='JSON string representing an array of filters.', required=True
)
@click.pass_context
def forcemerge_singleton(
ctx, max_num_segments, delay, ignore_empty_list, filter_list):
"""
forceMerge index/shard segments
"""
action = 'forcemerge'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
raw_options = {
'max_num_segments': max_num_segments,
'delay': delay,
}
logger.debug('Validating provided options: {0}'.format(raw_options))
mykwargs = option_schema_check(action, raw_options)
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
ilo = IndexList(client)
_do_filters(ilo, clean_filters, ignore_empty_list)
action_obj = action_class(ilo, **mykwargs)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.command(name='replicas')
@click.option(
'--count', type=int, required=True, help='Number of replicas (max 10)'
)
@click.option(
'--wait_for_completion', is_flag=True, help='Wait for operation to complete'
)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json,
help='JSON string representing an array of filters.', required=True
)
@click.pass_context
def replicas_singleton(
ctx, count, wait_for_completion, ignore_empty_list, filter_list):
"""
Change replica count
"""
action = 'replicas'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
raw_options = {
'count': count,
'wait_for_completion': wait_for_completion,
}
logger.debug('Validating provided options: {0}'.format(raw_options))
mykwargs = option_schema_check(action, raw_options)
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
ilo = IndexList(client)
_do_filters(ilo, clean_filters, ignore_empty_list)
action_obj = action_class(ilo, **mykwargs)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.command(name='snapshot')
@click.option(
'--repository', type=str, required=True, help='Snapshot repository')
@click.option(
'--name', type=str, help='Snapshot name',
show_default=True, default='curator-%Y%m%d%H%M%S'
)
@click.option(
'--ignore_unavailable', is_flag=True, show_default=True,
help='Ignore unavailable shards/indices.'
)
@click.option(
'--include_global_state', type=bool, show_default=True,
default=True, expose_value=True,
help='Store cluster global state with snapshot.'
)
@click.option(
'--partial', is_flag=True, show_default=True,
help='Do not fail if primary shard is unavailable.'
)
@click.option(
'--wait_for_completion',
type=bool, show_default=True, default=True,
help='Wait for operation to complete'
)
@click.option(
'--skip_repo_fs_check', is_flag=True, expose_value=True,
help='Skip repository filesystem access validation.'
)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json, default='{"filtertype":"none"}',
help='JSON string representing an array of filters.'
)
@click.pass_context
def snapshot_singleton(
ctx, repository, name, ignore_unavailable, include_global_state, partial,
skip_repo_fs_check, wait_for_completion, ignore_empty_list, filter_list):
"""
Snapshot indices
"""
action = 'snapshot'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
raw_options = {
'repository': repository,
'name': name,
'ignore_unavailable': ignore_unavailable,
'include_global_state': include_global_state,
'partial': partial,
'skip_repo_fs_check': skip_repo_fs_check,
'wait_for_completion': wait_for_completion,
}
logger.debug('Validating provided options: {0}'.format(raw_options))
mykwargs = option_schema_check(action, raw_options)
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
ilo = IndexList(client)
_do_filters(ilo, clean_filters, ignore_empty_list)
action_obj = action_class(ilo, **mykwargs)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.command(name='show_indices')
@click.option('--verbose', help='Show verbose output.', is_flag=True)
@click.option('--header', help='Print header if --verbose', is_flag=True)
@click.option('--epoch', help='Print time as epoch if --verbose', is_flag=True)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json, default='{"filtertype":"none"}',
help='JSON string representing an array of filters.'
)
@click.pass_context
def show_indices_singleton(
ctx, epoch, header, verbose, ignore_empty_list, filter_list):
"""
Show indices
"""
action = "open"
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
logger.debug(
'Using dummy "open" action for show_indices singleton. '
'No action will be taken.'
)
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
ilo = IndexList(client)
_do_filters(ilo, clean_filters, ignore_empty_list)
indices = sorted(ilo.indices)
# Do some calculations to figure out the proper column sizes
allbytes = []
alldocs = []
for idx in indices:
allbytes.append(byte_size(ilo.index_info[idx]['size_in_bytes']))
alldocs.append(str(ilo.index_info[idx]['docs']))
if epoch:
timeformat = '{6:>13}'
column = 'creation_date'
else:
timeformat = '{6:>20}'
column = 'Creation Timestamp'
formatting = (
'{0:' + str(len(max(indices, key=len))) + '} '
'{1:>5} '
'{2:>' + str(len(max(allbytes, key=len)) + 1) + '} '
'{3:>' + str(len(max(alldocs, key=len)) + 1) + '} '
'{4:>3} {5:>3} ' + timeformat
)
# Print the header, if both verbose and header are enabled
if header and verbose:
click.secho(
formatting.format(
'Index', 'State', 'Size', 'Docs', 'Pri', 'Rep', column
), bold=True, underline=True
)
# Loop through indices and print info, if verbose
for idx in indices:
p = ilo.index_info[idx]
if verbose:
if epoch:
datefield = p['age']['creation_date'] if 'creation_date' in p['age'] else 0
else:
datefield = '{0}Z'.format(
datetime.utcfromtimestamp(p['age']['creation_date']
).isoformat()) if 'creation_date' in p['age'] else 'unknown/closed'
click.echo(
formatting.format(
idx, p['state'], byte_size(p['size_in_bytes']),
p['docs'], p['number_of_shards'], p['number_of_replicas'],
datefield
)
)
else:
click.echo('{0}'.format(idx))
@click.command(name='show_snapshots')
@click.option(
'--repository', type=str, required=True, help='Snapshot repository name'
)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable snapshots'
)
@click.option(
'--filter_list', callback=validate_filter_json, default='{"filtertype":"none"}',
help='JSON string representing an array of filters.'
)
@click.pass_context
def show_snapshots_singleton(
ctx, repository, ignore_empty_list, filter_list):
"""
Show snapshots
"""
action = 'delete_snapshots'
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
slo = SnapshotList(client, repository=repository)
_do_filters(slo, clean_filters, ignore_empty_list)
snapshots = sorted(slo.snapshots)
for idx in snapshots:
click.secho('{0}'.format(idx))
@click.command(name='restore')
@click.option(
'--repository', type=str, required=True, help='Snapshot repository')
@click.option(
'--name', type=str, help='Snapshot name', required=False, default=None,
)
@click.option(
'--rename_pattern', type=str, help='Rename pattern', required=False, default=None,
)
@click.option(
'--rename_replacement', type=str, help='Rename replacement', required=False, default=None,
)
@click.option(
'--ignore_unavailable', is_flag=True, show_default=True,
help='Ignore unavailable shards/indices.'
)
@click.option(
'--include_global_state', type=bool, show_default=True,
default=True, expose_value=True,
help='Store cluster global state with snapshot.'
)
@click.option(
'--partial', is_flag=True, show_default=True,
help='Do not fail if primary shard is unavailable.'
)
@click.option(
'--wait_for_completion',
type=bool, show_default=True, default=True,
help='Wait for operation to complete'
)
@click.option(
'--skip_repo_fs_check', is_flag=True, expose_value=True,
help='Skip repository filesystem access validation.'
)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json, default='{"filtertype":"none"}',
help='JSON string representing an array of filters.'
)
@click.pass_context
def restore_singleton(
ctx, repository, name, rename_pattern, rename_replacement, ignore_unavailable,
include_global_state, partial, wait_for_completion, skip_repo_fs_check,
ignore_empty_list, filter_list):
"""
Restore a snapshot
"""
action = 'restore'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
raw_options = {
'repository': repository,
'name': name,
'rename_pattern': rename_pattern,
'rename_replacement': rename_replacement,
'ignore_unavailable': ignore_unavailable,
'include_global_state': include_global_state,
'partial': partial,
'skip_repo_fs_check': skip_repo_fs_check,
'wait_for_completion': wait_for_completion,
}
logger.debug('Validating provided options: {0}'.format(raw_options))
mykwargs = option_schema_check(action, raw_options)
mykwargs.pop('repository')
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
slo = SnapshotList(client, repository=repository)
_do_filters(slo, clean_filters, ignore_empty_list)
action_obj = action_class(slo, **mykwargs)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.group()
@click.option(
'--config',
help='Path to configuration file. Default: ~/.curator/curator.yml',
type=click.Path(), default=settings.config_file()
)
@click.option('--host', help='Elasticsearch host.')
@click.option('--url_prefix', help='Elasticsearch http url prefix.')
@click.option('--port', help='Elasticsearch port.')
@click.option(
'--use_ssl', is_flag=True, callback=false_to_none,
help='Connect to Elasticsearch through SSL.'
)
@click.option(
'--certificate', help='Path to certificate to use for SSL validation.')
@click.option(
'--client-cert',
help='Path to file containing SSL certificate for client auth.', type=str
)
@click.option(
'--client-key',
help='Path to file containing SSL key for client auth.', type=str
)
@click.option(
'--ssl-no-validate', is_flag=True, callback=false_to_none,
help='Do not validate SSL certificate'
)
@click.option('--http_auth', help='Use Basic Authentication ex: user:pass')
@click.option('--timeout', help='Connection timeout in seconds.', type=int)
@click.option(
'--master-only', is_flag=True, callback=false_to_none,
help='Only operate on elected master node.'
)
@click.option('--dry-run', is_flag=True, help='Do not perform any changes.')
@click.option('--loglevel', help='Log level')
@click.option('--logfile', help='log file')
@click.option('--logformat', help='Log output format [default|logstash|json].')
@click.version_option(version=__version__)
@click.pass_context
def cli(
ctx, config, host, url_prefix, port, use_ssl, certificate, client_cert,
client_key, ssl_no_validate, http_auth, timeout, master_only, dry_run,
loglevel, logfile, logformat):
if os.path.isfile(config):
initial_config = test_config(config)
else:
initial_config = None
configuration = config_override(ctx, initial_config)
set_logging(configuration['logging'])
test_client_options(configuration['client'])
logger = logging.getLogger(__name__)
ctx.obj['config'] = configuration
cli.add_command(allocation_singleton)
cli.add_command(close_singleton)
cli.add_command(delete_indices_singleton)
cli.add_command(delete_snapshots_singleton)
cli.add_command(forcemerge_singleton)
cli.add_command(open_singleton)
cli.add_command(replicas_singleton)
cli.add_command(snapshot_singleton)
cli.add_command(restore_singleton)
cli.add_command(show_indices_singleton)
cli.add_command(show_snapshots_singleton) | curator/singletons.py | import os, sys
import yaml, json
import logging
import click
from voluptuous import Schema
from .defaults import settings
from .validators import SchemaCheck, config_file, options
from .config_utils import test_config, set_logging
from .exceptions import *
from .utils import *
from .indexlist import IndexList
from .snapshotlist import SnapshotList
from .actions import *
from ._version import __version__
CLASS_MAP = {
'alias' : Alias,
'allocation' : Allocation,
'close' : Close,
'cluster_routing' : ClusterRouting,
'create_index' : CreateIndex,
'delete_indices' : DeleteIndices,
'delete_snapshots' : DeleteSnapshots,
'forcemerge' : ForceMerge,
'open' : Open,
'replicas' : Replicas,
'restore' : Restore,
'snapshot' : Snapshot,
}
EXCLUDED_OPTIONS = [
'ignore_empty_list', 'timeout_override',
'continue_if_exception', 'disable_action'
]
def validate_filter_json(ctx, param, value):
try:
filter_list = ensure_list(json.loads(value))
return filter_list
except ValueError:
raise click.BadParameter('Invalid JSON: {0}'.format(value))
def false_to_none(ctx, param, value):
try:
if value:
return True
else:
return None
except ValueError:
raise click.BadParameter('Invalid value: {0}'.format(value))
def filter_schema_check(action, filter_dict):
valid_filters = SchemaCheck(
filter_dict,
Schema(filters.Filters(action, location='singleton')),
'filters',
'{0} singleton action "filters"'.format(action)
).result()
return validate_filters(action, valid_filters)
def _actionator(action, action_obj, dry_run=True):
logger = logging.getLogger(__name__)
logger.debug('Doing the singleton "{0}" action here.'.format(action))
try:
if dry_run:
action_obj.do_dry_run()
else:
action_obj.do_action()
except Exception as e:
if isinstance(e, NoIndices) or isinstance(e, NoSnapshots):
logger.error(
'Unable to complete action "{0}". No actionable items '
'in list: {1}'.format(action, type(e))
)
else:
logger.error(
'Failed to complete action: {0}. {1}: '
'{2}'.format(action, type(e), e)
)
sys.exit(1)
logger.info('Singleton "{0}" action completed.'.format(action))
def _do_filters(list_object, filters, ignore=False):
logger = logging.getLogger(__name__)
logger.debug('Running filters and testing for empty list object')
try:
list_object.iterate_filters(filters)
list_object.empty_list_check()
except (NoIndices, NoSnapshots) as e:
if isinstance(e, NoIndices):
otype = 'index'
else:
otype = 'snapshot'
if ignore:
logger.info(
'Singleton action not performed: empty {0} list'.format(otype)
)
sys.exit(0)
else:
logger.error(
'Singleton action failed due to empty {0} list'.format(otype)
)
sys.exit(1)
def _prune_excluded(option_dict):
for k in list(option_dict.keys()):
if k in EXCLUDED_OPTIONS:
del option_dict[k]
return option_dict
def option_schema_check(action, option_dict):
clean_options = SchemaCheck(
prune_nones(option_dict),
options.get_schema(action),
'options',
'{0} singleton action "options"'.format(action)
).result()
return _prune_excluded(clean_options)
def config_override(ctx, config_dict):
if config_dict == None:
config_dict = {}
for k in ['client', 'logging']:
if not k in config_dict:
config_dict[k] = {}
for k in list(ctx.params.keys()):
if k in ['dry_run', 'config']:
pass
elif k == 'host':
if 'host' in ctx.params and ctx.params['host'] is not None:
config_dict['client']['hosts'] = ctx.params[k]
elif k in ['loglevel', 'logfile', 'logformat']:
if k in ctx.params and ctx.params[k] is not None:
config_dict['logging'][k] = ctx.params[k]
else:
if k in ctx.params and ctx.params[k] is not None:
config_dict['client'][k] = ctx.params[k]
# After override, prune the nones
for k in ['client', 'logging']:
config_dict[k] = prune_nones(config_dict[k])
return SchemaCheck(config_dict, config_file.client(),
'Client Configuration', 'full configuration dictionary').result()
@click.command(name='allocation')
@click.option(
'--key', type=str, required=True, help='Node identification tag'
)
@click.option(
'--value', type=str, default=None, help='Value associated with --key'
)
@click.option(
'--allocation_type', type=str,
help='Must be one of: require, include, or exclude'
)
@click.option(
'--wait_for_completion', is_flag=True, help='Wait for operation to complete'
)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json,
help='JSON string representing an array of filters.', required=True
)
@click.pass_context
def allocation_singleton(
ctx, key, value, allocation_type, wait_for_completion, ignore_empty_list,
filter_list):
"""
Shard Routing Allocation
"""
action = 'allocation'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
raw_options = {
'key': key,
'value': value,
'allocation_type': allocation_type,
'wait_for_completion': wait_for_completion,
}
logger.debug('Validating provided options: {0}'.format(raw_options))
mykwargs = option_schema_check(action, raw_options)
mykwargs.update(
{ 'max_wait': c_args['timeout'] if c_args['timeout'] else 30 }
)
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
ilo = IndexList(client)
_do_filters(ilo, clean_filters, ignore_empty_list)
action_obj = action_class(ilo, **mykwargs)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.command(name='close')
@click.option(
'--delete_aliases', is_flag=True,
help='Delete all aliases from indices to be closed'
)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json,
help='JSON string representing an array of filters.', required=True
)
@click.pass_context
def close_singleton(
ctx, delete_aliases, ignore_empty_list, filter_list):
"""
Close indices
"""
action = 'close'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
raw_options = { 'delete_aliases': delete_aliases }
logger.debug('Validating provided options: {0}'.format(raw_options))
mykwargs = option_schema_check(action, raw_options)
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
ilo = IndexList(client)
_do_filters(ilo, clean_filters, ignore_empty_list)
action_obj = action_class(ilo, **mykwargs)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.command(name='delete_indices')
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json,
help='JSON string representing an array of filters.', required=True
)
@click.pass_context
def delete_indices_singleton(ctx, ignore_empty_list, filter_list):
"""
Delete indices
"""
action = 'delete_indices'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
mykwargs = {
'master_timeout': c_args['timeout'] if c_args['timeout'] <= 300 else 300
}
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
ilo = IndexList(client)
_do_filters(ilo, clean_filters, ignore_empty_list)
action_obj = action_class(ilo, **mykwargs)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.command(name='delete_snapshots')
@click.option(
'--repository', type=str, required=True, help='Snapshot repository name'
)
@click.option(
'--retry_count', type=int, help='Number of times to retry (max 3)'
)
@click.option(
'--retry_interval', type=int, help='Time in seconds between retries'
)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable snapshots'
)
@click.option(
'--filter_list', callback=validate_filter_json,
help='JSON string representing an array of filters.', required=True
)
@click.pass_context
def delete_snapshots_singleton(
ctx, repository, retry_count, retry_interval, ignore_empty_list,
filter_list):
"""
Delete snapshots
"""
action = 'delete_snapshots'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
raw_options = {
'repository': repository,
'retry_count': retry_count, 'retry_interval': retry_interval
}
logger.debug('Validating provided options: {0}'.format(raw_options))
mykwargs = option_schema_check(action, raw_options)
# Repo arg Not necessary after schema check. It's only for the slo object
del mykwargs['repository']
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
slo = SnapshotList(client, repository=repository)
_do_filters(slo, clean_filters, ignore_empty_list)
action_obj = action_class(slo, **mykwargs)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.command(name='open')
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json,
help='JSON string representing an array of filters.', required=True
)
@click.pass_context
def open_singleton(
ctx, ignore_empty_list, filter_list):
"""
Open indices
"""
action = 'open'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
ilo = IndexList(client)
_do_filters(ilo, clean_filters, ignore_empty_list)
action_obj = action_class(ilo)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.command(name='forcemerge')
@click.option(
'--max_num_segments', type=int, required=True,
help='Maximum number of segments per shard (minimum of 1)'
)
@click.option(
'--delay', type=float,
help='Time in seconds to delay between operations. Default 0, maximum 3600'
)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json,
help='JSON string representing an array of filters.', required=True
)
@click.pass_context
def forcemerge_singleton(
ctx, max_num_segments, delay, ignore_empty_list, filter_list):
"""
forceMerge index/shard segments
"""
action = 'forcemerge'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
raw_options = {
'max_num_segments': max_num_segments,
'delay': delay,
}
logger.debug('Validating provided options: {0}'.format(raw_options))
mykwargs = option_schema_check(action, raw_options)
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
ilo = IndexList(client)
_do_filters(ilo, clean_filters, ignore_empty_list)
action_obj = action_class(ilo, **mykwargs)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.command(name='replicas')
@click.option(
'--count', type=int, required=True, help='Number of replicas (max 10)'
)
@click.option(
'--wait_for_completion', is_flag=True, help='Wait for operation to complete'
)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json,
help='JSON string representing an array of filters.', required=True
)
@click.pass_context
def replicas_singleton(
ctx, count, wait_for_completion, ignore_empty_list, filter_list):
"""
Change replica count
"""
action = 'replicas'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
raw_options = {
'count': count,
'wait_for_completion': wait_for_completion,
}
logger.debug('Validating provided options: {0}'.format(raw_options))
mykwargs = option_schema_check(action, raw_options)
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
ilo = IndexList(client)
_do_filters(ilo, clean_filters, ignore_empty_list)
action_obj = action_class(ilo, **mykwargs)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.command(name='snapshot')
@click.option(
'--repository', type=str, required=True, help='Snapshot repository')
@click.option(
'--name', type=str, help='Snapshot name',
show_default=True, default='curator-%Y%m%d%H%M%S'
)
@click.option(
'--ignore_unavailable', is_flag=True, show_default=True,
help='Ignore unavailable shards/indices.'
)
@click.option(
'--include_global_state', type=bool, show_default=True,
default=True, expose_value=True,
help='Store cluster global state with snapshot.'
)
@click.option(
'--partial', is_flag=True, show_default=True,
help='Do not fail if primary shard is unavailable.'
)
@click.option(
'--wait_for_completion',
type=bool, show_default=True, default=True,
help='Wait for operation to complete'
)
@click.option(
'--skip_repo_fs_check', is_flag=True, expose_value=True,
help='Skip repository filesystem access validation.'
)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json, default='{"filtertype":"none"}',
help='JSON string representing an array of filters.'
)
@click.pass_context
def snapshot_singleton(
ctx, repository, name, ignore_unavailable, include_global_state, partial,
skip_repo_fs_check, wait_for_completion, ignore_empty_list, filter_list):
"""
Snapshot indices
"""
action = 'snapshot'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
raw_options = {
'repository': repository,
'name': name,
'ignore_unavailable': ignore_unavailable,
'include_global_state': include_global_state,
'partial': partial,
'skip_repo_fs_check': skip_repo_fs_check,
'wait_for_completion': wait_for_completion,
}
logger.debug('Validating provided options: {0}'.format(raw_options))
mykwargs = option_schema_check(action, raw_options)
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
ilo = IndexList(client)
_do_filters(ilo, clean_filters, ignore_empty_list)
action_obj = action_class(ilo, **mykwargs)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.command(name='show_indices')
@click.option('--verbose', help='Show verbose output.', is_flag=True)
@click.option('--header', help='Print header if --verbose', is_flag=True)
@click.option('--epoch', help='Print time as epoch if --verbose', is_flag=True)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json, default='{"filtertype":"none"}',
help='JSON string representing an array of filters.'
)
@click.pass_context
def show_indices_singleton(
ctx, epoch, header, verbose, ignore_empty_list, filter_list):
"""
Show indices
"""
action = "open"
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
logger.debug(
'Using dummy "open" action for show_indices singleton. '
'No action will be taken.'
)
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
ilo = IndexList(client)
_do_filters(ilo, clean_filters, ignore_empty_list)
indices = sorted(ilo.indices)
# Do some calculations to figure out the proper column sizes
allbytes = []
alldocs = []
for idx in indices:
allbytes.append(byte_size(ilo.index_info[idx]['size_in_bytes']))
alldocs.append(str(ilo.index_info[idx]['docs']))
if epoch:
timeformat = '{6:>13}'
column = 'creation_date'
else:
timeformat = '{6:>20}'
column = 'Creation Timestamp'
formatting = (
'{0:' + str(len(max(indices, key=len))) + '} '
'{1:>5} '
'{2:>' + str(len(max(allbytes, key=len)) + 1) + '} '
'{3:>' + str(len(max(alldocs, key=len)) + 1) + '} '
'{4:>3} {5:>3} ' + timeformat
)
# Print the header, if both verbose and header are enabled
if header and verbose:
click.secho(
formatting.format(
'Index', 'State', 'Size', 'Docs', 'Pri', 'Rep', column
), bold=True, underline=True
)
# Loop through indices and print info, if verbose
for idx in indices:
p = ilo.index_info[idx]
if verbose:
if epoch:
datefield = p['age']['creation_date'] if 'creation_date' in p['age'] else 0
else:
datefield = '{0}Z'.format(
datetime.utcfromtimestamp(p['age']['creation_date']
).isoformat()) if 'creation_date' in p['age'] else 'unknown/closed'
click.echo(
formatting.format(
idx, p['state'], byte_size(p['size_in_bytes']),
p['docs'], p['number_of_shards'], p['number_of_replicas'],
datefield
)
)
else:
click.echo('{0}'.format(idx))
@click.command(name='show_snapshots')
@click.option(
'--repository', type=str, required=True, help='Snapshot repository name'
)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable snapshots'
)
@click.option(
'--filter_list', callback=validate_filter_json, default='{"filtertype":"none"}',
help='JSON string representing an array of filters.'
)
@click.pass_context
def show_snapshots_singleton(
ctx, repository, ignore_empty_list, filter_list):
"""
Show snapshots
"""
action = 'delete_snapshots'
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
slo = SnapshotList(client, repository=repository)
_do_filters(slo, clean_filters, ignore_empty_list)
snapshots = sorted(slo.snapshots)
for idx in snapshots:
click.secho('{0}'.format(idx))
@click.command(name='restore')
@click.option(
'--repository', type=str, required=True, help='Snapshot repository')
@click.option(
'--name', type=str, help='Snapshot name', required=False, default=None,
)
@click.option(
'--rename_pattern', type=str, help='Rename pattern', required=False, default=None,
)
@click.option(
'--rename_replacement', type=str, help='Rename replacement', required=False, default=None,
)
@click.option(
'--ignore_unavailable', is_flag=True, show_default=True,
help='Ignore unavailable shards/indices.'
)
@click.option(
'--include_global_state', type=bool, show_default=True,
default=True, expose_value=True,
help='Store cluster global state with snapshot.'
)
@click.option(
'--partial', is_flag=True, show_default=True,
help='Do not fail if primary shard is unavailable.'
)
@click.option(
'--wait_for_completion',
type=bool, show_default=True, default=True,
help='Wait for operation to complete'
)
@click.option(
'--skip_repo_fs_check', is_flag=True, expose_value=True,
help='Skip repository filesystem access validation.'
)
@click.option(
'--ignore_empty_list', is_flag=True,
help='Do not raise exception if there are no actionable indices'
)
@click.option(
'--filter_list', callback=validate_filter_json, default='{"filtertype":"none"}',
help='JSON string representing an array of filters.'
)
@click.pass_context
def restore_singleton(
ctx, repository, name, rename_pattern, rename_replacement, ignore_unavailable,
include_global_state, partial, wait_for_completion, skip_repo_fs_check,
ignore_empty_list, filter_list):
"""
Restore a snapshot
"""
action = 'restore'
action_class = CLASS_MAP[action]
c_args = ctx.obj['config']['client']
client = get_client(**c_args)
logger = logging.getLogger(__name__)
raw_options = {
'repository': repository,
'name': name,
'rename_pattern': rename_pattern,
'rename_replacement': rename_replacement,
'ignore_unavailable': ignore_unavailable,
'include_global_state': include_global_state,
'partial': partial,
'skip_repo_fs_check': skip_repo_fs_check,
'wait_for_completion': wait_for_completion,
}
logger.debug('Validating provided options: {0}'.format(raw_options))
mykwargs = option_schema_check(action, raw_options)
mykwargs.pop('repository')
logger.debug('Validating provided filters: {0}'.format(filter_list))
clean_filters = {
'filters': filter_schema_check(action, filter_list)
}
slo = SnapshotList(client, repository=repository)
_do_filters(slo, clean_filters, ignore_empty_list)
action_obj = action_class(slo, **mykwargs)
### Do the action
_actionator(action, action_obj, dry_run=ctx.parent.params['dry_run'])
@click.group()
@click.option(
'--config',
help='Path to configuration file. Default: ~/.curator/curator.yml',
type=click.Path(), default=settings.config_file()
)
@click.option('--host', help='Elasticsearch host.')
@click.option('--url_prefix', help='Elasticsearch http url prefix.')
@click.option('--port', help='Elasticsearch port.')
@click.option(
'--use_ssl', is_flag=True, callback=false_to_none,
help='Connect to Elasticsearch through SSL.'
)
@click.option(
'--certificate', help='Path to certificate to use for SSL validation.')
@click.option(
'--client-cert',
help='Path to file containing SSL certificate for client auth.', type=str
)
@click.option(
'--client-key',
help='Path to file containing SSL key for client auth.', type=str
)
@click.option(
'--ssl-no-validate', is_flag=True, callback=false_to_none,
help='Do not validate SSL certificate'
)
@click.option('--http_auth', help='Use Basic Authentication ex: user:pass')
@click.option('--timeout', help='Connection timeout in seconds.', type=int)
@click.option(
'--master-only', is_flag=True, callback=false_to_none,
help='Only operate on elected master node.'
)
@click.option('--dry-run', is_flag=True, help='Do not perform any changes.')
@click.option('--loglevel', help='Log level')
@click.option('--logfile', help='log file')
@click.option('--logformat', help='Log output format [default|logstash|json].')
@click.version_option(version=__version__)
@click.pass_context
def cli(
ctx, config, host, url_prefix, port, use_ssl, certificate, client_cert,
client_key, ssl_no_validate, http_auth, timeout, master_only, dry_run,
loglevel, logfile, logformat):
if os.path.isfile(config):
initial_config = test_config(config)
else:
initial_config = None
configuration = config_override(ctx, initial_config)
set_logging(configuration['logging'])
test_client_options(configuration['client'])
logger = logging.getLogger(__name__)
ctx.obj['config'] = configuration
cli.add_command(allocation_singleton)
cli.add_command(close_singleton)
cli.add_command(delete_indices_singleton)
cli.add_command(delete_snapshots_singleton)
cli.add_command(forcemerge_singleton)
cli.add_command(open_singleton)
cli.add_command(replicas_singleton)
cli.add_command(snapshot_singleton)
cli.add_command(restore_singleton)
cli.add_command(show_indices_singleton)
cli.add_command(show_snapshots_singleton) | 0.270288 | 0.080755 |
import asyncio
import datetime
import time
import traceback
from concurrent.futures import CancelledError
import discord
from discord.ext import commands
from discord.ext.commands import BadArgument
from Util import Permissioncheckers, Configuration, Utils, GearbotLogging, Pages, InfractionUtils, Emoji, Translator, \
Archive
from Util.Converters import BannedMember, UserID, Reason
from database.DatabaseConnector import LoggedMessage
class Moderation:
permissions = {
"min": 2,
"max": 6,
"required": 2,
"commands": {
"userinfo": {"required": 2, "min": 0, "max": 6},
"serverinfo": {"required": 2, "min": 0, "max": 6},
"roles": {"required": 2, "min": 0, "max": 6},
}
}
def __init__(self, bot):
self.bot: commands.Bot = bot
bot.mutes = self.mutes = Utils.fetch_from_disk("mutes")
self.running = True
self.bot.loop.create_task(unmuteTask(self))
Pages.register("roles", self.roles_init, self.roles_update), []
def __unload(self):
Utils.saveToDisk("mutes", self.mutes)
self.running = False
Pages.unregister("roles")
async def __local_check(self, ctx):
return Permissioncheckers.check_permission(ctx)
async def roles_init(self, ctx):
pages = self.gen_roles_pages(ctx.guild)
page = pages[0]
return f"**{Translator.translate('roles', ctx.guild.id, server_name=ctx.guild.name, page_num=1, pages=len(pages))}**```\n{page}```", None, len(pages) > 1, []
async def roles_update(self, ctx, message, page_num, action, data):
pages = self.gen_roles_pages(message.guild)
page, page_num = Pages.basic_pages(pages, page_num, action)
return f"**{Translator.translate('roles', message.guild.id, server_name=ctx.guild.name, page_num=page_num + 1, pages=len(pages))}**```\n{page}```", None, page_num
@staticmethod
def gen_roles_pages(guild: discord.Guild):
role_list = dict()
longest_name = 1
for role in guild.roles:
role_list[f"{role.name} - {role.id}"] = role
longest_name = max(longest_name, len(role.name))
return Pages.paginate("\n".join(f"{role_list[r].name} {' ' * (longest_name - len(role_list[r].name))} - {role_list[r].id}" for r in sorted(role_list.keys())))
@commands.command()
@commands.guild_only()
async def roles(self, ctx: commands.Context):
"""Lists all roles on the server and their IDs, useful for configuring without having to ping that role"""
await Pages.create_new("roles", ctx)
@commands.command(aliases=["👢"])
@commands.guild_only()
@commands.bot_has_permissions(kick_members=True)
async def kick(self, ctx, user: discord.Member, *, reason:Reason=""):
"""kick_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
if (ctx.author != user and user != ctx.bot.user and ctx.author.top_role > user.top_role) or (ctx.guild.owner == ctx.author and ctx.author != user):
if ctx.me.top_role > user.top_role:
self.bot.data["forced_exits"].append(user.id)
await ctx.guild.kick(user,
reason=f"Moderator: {ctx.author.name}#{ctx.author.discriminator} ({ctx.author.id}) Reason: {reason}")
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('kick_confirmation', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, reason=reason)}")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS",
f":boot: {Translator.translate('kick_log', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, reason=reason)}")
InfractionUtils.add_infraction(ctx.guild.id, user.id, ctx.author.id, Translator.translate('kick', ctx.guild.id), reason)
else:
await ctx.send(Translator.translate('kick_unable',ctx.guild.id, user=Utils.clean_user(user)))
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('kick_not_allowed', ctx.guild.id, user=user)}")
@commands.command(aliases=["🚪"])
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
async def ban(self, ctx: commands.Context, user: discord.Member, *, reason:Reason=""):
"""ban_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
if (ctx.author != user and user != ctx.bot.user and ctx.author.top_role > user.top_role) or (ctx.guild.owner == ctx.author and ctx.author != user):
if ctx.me.top_role > user.top_role:
self.bot.data["forced_exits"].append(user.id)
await ctx.guild.ban(user, reason=f"Moderator: {ctx.author.name} ({ctx.author.id}) Reason: {reason}",
delete_message_days=0)
InfractionUtils.add_infraction(ctx.guild.id, user.id, ctx.author.id, "Ban", reason)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('ban_confirmation', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, reason=reason)}")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS",
f":door: {Translator.translate('ban_log', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, reason=reason)}")
else:
await ctx.send(Translator.translate('ban_unable', ctx.guild.id, user=Utils.clean_user(user)))
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('ban_not_allowed', ctx.guild.id, user=user)}")
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
async def softban(self, ctx:commands.Context, user: discord.Member, *, reason:Reason=""):
"""softban_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
if (ctx.author != user and user != ctx.bot.user and ctx.author.top_role > user.top_role) or (ctx.guild.owner == ctx.author and ctx.author != user):
if ctx.me.top_role > user.top_role:
self.bot.data["forced_exits"].append(user.id)
self.bot.data["unbans"].append(user.id)
await ctx.guild.ban(user, reason=f"softban - Moderator: {ctx.author.name} ({ctx.author.id}) Reason: {reason}", delete_message_days=1)
await ctx.guild.unban(user)
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('softban_confirmation', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, reason=reason)}")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS", f":door: {Translator.translate('softban_log', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, reason=reason)}")
InfractionUtils.add_infraction(ctx.guild.id, user.id, ctx.author.id, "Softban", reason)
else:
await ctx.send(Translator.translate('softban_unable', ctx.guild.id, user=Utils.clean_user(user)))
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('softban_not_allowed', ctx.guild.id, user=user)}")
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
async def forceban(self, ctx: commands.Context, user_id: UserID, *, reason:Reason=""):
"""forceban_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
try:
member = await commands.MemberConverter().convert(ctx, str(user_id))
except BadArgument:
user = await ctx.bot.get_user_info(user_id)
self.bot.data["forced_exits"].append(user.id)
await ctx.guild.ban(user, reason=f"Moderator: {ctx.author.name} ({ctx.author.id}) Reason: {reason}",
delete_message_days=0)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('forceban_confirmation', ctx.guild.id, user=Utils.clean_user(user), user_id=user_id, reason=reason)}")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS",
f":door: {Translator.translate('forceban_log', ctx.guild.id, user=Utils.clean_user(user), user_id=user_id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, reason=reason)}")
InfractionUtils.add_infraction(ctx.guild.id, user.id, ctx.author.id, Translator.translate('forced_ban', ctx.guild.id), reason)
else:
await ctx.send(f"{Emoji.get_chat_emoji('WARNING')} {Translator.translate('forceban_to_ban', ctx.guild.id, user=Utils.clean_user(member))}")
await ctx.invoke(self.ban, member, reason=reason)
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(manage_messages=True)
async def purge(self, ctx, msgs: int):
"""purge_help"""
if msgs < 1:
return await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('purge_too_small', ctx.guild.id)}")
if msgs > 1000:
return await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('purge_too_big', ctx.guild.id)}")
try:
deleted = await ctx.channel.purge(limit=msgs)
except discord.NotFound:
# sleep for a sec just in case the other bot is still purging so we don't get removed as well
await asyncio.sleep(1)
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('purge_fail_not_found', ctx.guild.id)}")
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('purge_confirmation', ctx.guild.id, count=len(deleted))}", delete_after=10)
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
async def unban(self, ctx, member: BannedMember, *, reason:Reason=""):
"""unban_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
self.bot.data["unbans"].append(member.user.id)
await ctx.guild.unban(member.user, reason=f"Moderator: {ctx.author.name} ({ctx.author.id}) Reason: {reason}")
InfractionUtils.add_infraction(ctx.guild.id, member.user.id, ctx.author.id, "Unban", reason)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('unban_confirmation', ctx.guild.id, user=Utils.clean_user(member.user), user_id=member.user.id, reason=reason)}")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS",
f"{Emoji.get_chat_emoji('INNOCENT')} {Translator.translate('unban_log', ctx.guild.id, user=Utils.clean_user(member.user), user_id=member.user.id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, reason=reason)}")
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(manage_roles=True)
async def mute(self, ctx: commands.Context, target: discord.Member, durationNumber: int, durationIdentifier: str, *,
reason:Reason=""):
"""mute_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
roleid = Configuration.get_var(ctx.guild.id, "MUTE_ROLE")
if roleid is 0:
await ctx.send(f"{Emoji.get_chat_emoji('WARNING')} {Translator.translate('mute_not_configured', ctx.guild.id, user=target.mention)}")
else:
role = discord.utils.get(ctx.guild.roles, id=roleid)
if role is None:
await ctx.send(f"{Emoji.get_chat_emoji('WARNING')} {Translator.translate('mute_role_missing', ctx.guild.id, user=target.mention)}")
else:
if (ctx.author != target and target != ctx.bot.user and ctx.author.top_role > target.top_role) or ctx.guild.owner == ctx.author:
duration = Utils.convertToSeconds(durationNumber, durationIdentifier)
if duration > 0:
until = time.time() + duration
await target.add_roles(role, reason=f"{reason}, as requested by {ctx.author.name}")
if not str(ctx.guild.id) in self.mutes:
self.mutes[str(ctx.guild.id)] = dict()
self.mutes[str(ctx.guild.id)][str(target.id)] = until
await ctx.send(f"{Emoji.get_chat_emoji('MUTE')} {Translator.translate('mute_confirmation', ctx.guild.id, user=Utils.clean_user(target), duration=f'{durationNumber} {durationIdentifier}')}")
Utils.saveToDisk("mutes", self.mutes)
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS", f"{Emoji.get_chat_emoji('MUTE')} {Translator.translate('mute_log', ctx.guild.id, user=Utils.clean_user(target), user_id=target.id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, duration=f'{durationNumber} {durationIdentifier}', reason=reason)}")
InfractionUtils.add_infraction(ctx.guild.id, target.id, ctx.author.id, "Mute", reason)
else:
await ctx.send(f"{Emoji.get_chat_emoji('WHAT')} {Translator.translate('mute_negative_denied', ctx.guild.id, duration=f'{durationNumber} {durationIdentifier}')} {Emoji.get_chat_emoji('WHAT')}")
else:
await ctx.send(
f"{Emoji.get_chat_emoji('NO')} {Translator.translate('mute_not_allowed', ctx.guild.id, user=target)}")
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(manage_roles=True)
async def unmute(self, ctx: commands.Context, target: discord.Member, *, reason:Reason=""):
"""unmute_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
roleid = Configuration.get_var(ctx.guild.id, "MUTE_ROLE")
if roleid is 0:
await ctx.send(
f"{Emoji.get_chat_emoji('NO')} The mute feature has been disabled on this server, as such i cannot unmute that person")
else:
role = discord.utils.get(ctx.guild.roles, id=roleid)
if role is None:
await ctx.send(
f"{Emoji.get_chat_emoji('NO')} Unable to comply, the role i've been told to use for muting no longer exists")
else:
await target.remove_roles(role, reason=f"Unmuted by {ctx.author.name}, {reason}")
await ctx.send(f"{Emoji.get_chat_emoji('INNOCENT')} {target.display_name} has been unmuted")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS",
f"{Emoji.get_chat_emoji('INNOCENT')} {target.name}#{target.discriminator} (`{target.id}`) has been unmuted by {ctx.author.name}")
InfractionUtils.add_infraction(ctx.guild.id, target.id, ctx.author.id, "Unmute", reason)
@commands.command()
async def userinfo(self, ctx: commands.Context, *, userID:UserID):
"""Shows information about the chosen user"""
user = None
member = None
if userID is None:
user = ctx.author
if ctx.guild is not None:
member = ctx.guild.get_member(user.id)
elif ctx.guild is not None:
try:
user = member = ctx.guild.get_member(userID)
except BadArgument:
pass
if user is None:
user = await Utils.get_user(userID)
embed = discord.Embed(color=0x7289DA, timestamp=ctx.message.created_at)
embed.set_thumbnail(url=user.avatar_url)
embed.set_footer(text=Translator.translate('requested_by', ctx, user=ctx.author.name), icon_url=ctx.author.avatar_url)
embed.add_field(name=Translator.translate('name', ctx), value=f"{user.name}#{user.discriminator}", inline=True)
embed.add_field(name=Translator.translate('id', ctx), value=user.id, inline=True)
embed.add_field(name=Translator.translate('bot_account', ctx), value=user.bot, inline=True)
embed.add_field(name=Translator.translate('animated_avatar', ctx), value=user.is_avatar_animated(), inline=True)
if member is not None:
account_joined = member.joined_at.strftime("%d-%m-%Y")
embed.add_field(name=Translator.translate('nickname', ctx), value=member.nick, inline=True)
embed.add_field(name=Translator.translate('top_role', ctx), value=member.top_role.name, inline=True)
embed.add_field(name=Translator.translate('joined_at', ctx),
value=f"{account_joined} ({(ctx.message.created_at - member.joined_at).days} days ago)",
inline=True)
account_made = user.created_at.strftime("%d-%m-%Y")
embed.add_field(name=Translator.translate('account_created_at', ctx),
value=f"{account_made} ({(ctx.message.created_at - user.created_at).days} days ago)",
inline=True)
embed.add_field(name=Translator.translate('avatar_url', ctx), value=f"[{Translator.translate('avatar_url', ctx)}]({user.avatar_url})")
await ctx.send(embed=embed)
@commands.command()
async def serverinfo(self, ctx):
"""Shows information about the current server."""
guild_features = ", ".join(ctx.guild.features)
print(guild_features)
if guild_features == "":
guild_features = None
role_list = []
for i in range(len(ctx.guild.roles)):
role_list.append(ctx.guild.roles[i].name)
guild_made = ctx.guild.created_at.strftime("%d-%m-%Y")
embed = discord.Embed(color=0x7289DA, timestamp= datetime.datetime.fromtimestamp(time.time()))
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.set_footer(text=Translator.translate('requested_by', ctx, user=ctx.author), icon_url=ctx.author.avatar_url)
embed.add_field(name=Translator.translate('name', ctx), value=ctx.guild.name, inline=True)
embed.add_field(name=Translator.translate('id', ctx), value=ctx.guild.id, inline=True)
embed.add_field(name=Translator.translate('owner', ctx), value=ctx.guild.owner, inline=True)
embed.add_field(name=Translator.translate('members', ctx), value=ctx.guild.member_count, inline=True)
embed.add_field(name=Translator.translate('text_channels', ctx), value=str(len(ctx.guild.text_channels)), inline=True)
embed.add_field(name=Translator.translate('voice_channels', ctx), value=str(len(ctx.guild.voice_channels)), inline=True)
embed.add_field(name=Translator.translate('total_channel', ctx), value=str(len(ctx.guild.text_channels) + len(ctx.guild.voice_channels)),
inline=True)
embed.add_field(name=Translator.translate('created_at', ctx),
value=f"{guild_made} ({(ctx.message.created_at - ctx.guild.created_at).days} days ago)",
inline=True)
embed.add_field(name=Translator.translate('vip_features', ctx), value=guild_features, inline=True)
if ctx.guild.icon_url != "":
embed.add_field(name=Translator.translate('server_icon', ctx), value=f"[{Translator.translate('server_icon', ctx)}]({ctx.guild.icon_url})", inline=True)
embed.add_field(name=Translator.translate('all_roles', ctx), value=", ".join(role_list), inline=True) #todo paginate
await ctx.send(embed=embed)
@commands.group()
@commands.bot_has_permissions(attach_files=True)
async def archive(self, ctx):
await ctx.trigger_typing()
@archive.command()
async def channel(self, ctx, channel:discord.TextChannel=None, amount=100):
if amount > 5000:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('archive_too_much', ctx)}")
return
if channel is None:
channel = ctx.message.channel
if Configuration.get_var(ctx.guild.id, "EDIT_LOGS"):
permissions = channel.permissions_for(ctx.author)
if permissions.read_messages and permissions.read_message_history:
messages = LoggedMessage.select().where((LoggedMessage.server == ctx.guild.id) & (LoggedMessage.channel == channel.id)).order_by(LoggedMessage.messageid.desc()).limit(amount)
await Archive.ship_messages(ctx, messages)
else:
ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('archive_denied_read_perms')}")
else:
await ctx.send("Not implemented, please enable edit logs to be able to use archiving")
@archive.command()
async def user(self, ctx, user:UserID, amount=100):
if amount > 5000:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('archive_too_much', ctx)}")
return
if Configuration.get_var(ctx.guild.id, "EDIT_LOGS"):
messages = LoggedMessage.select().where(
(LoggedMessage.server == ctx.guild.id) & (LoggedMessage.author == user)).order_by(LoggedMessage.messageid.desc()).limit(amount)
await Archive.ship_messages(ctx, messages)
else:
await ctx.send("Please enable edit logs so i can archive users")
async def on_guild_channel_create(self, channel: discord.abc.GuildChannel):
guild: discord.Guild = channel.guild
roleid = Configuration.get_var(guild.id, "MUTE_ROLE")
if roleid is not 0:
role = discord.utils.get(guild.roles, id=roleid)
if role is not None and channel.permissions_for(guild.me).manage_channels:
if isinstance(channel, discord.TextChannel):
await channel.set_permissions(role, reason=Translator.translate('mute_setup', guild.id), send_messages=False,
add_reactions=False)
else:
await channel.set_permissions(role, reason=Translator.translate('mute_setup', guild.id), speak=False, connect=False)
async def on_member_join(self, member: discord.Member):
if str(member.guild.id) in self.mutes and member.id in self.mutes[str(member.guild.id)]:
roleid = Configuration.get_var(member.guild.id, "MUTE_ROLE")
if roleid is not 0:
role = discord.utils.get(member.guild.roles, id=roleid)
if role is not None:
if member.guild.me.guild_permissions.manage_roles:
await member.add_roles(role, reason=Translator.translate('mute_reapply_reason', member.guild.id))
await GearbotLogging.log_to(member.guild.id, "MOD_ACTIONS",f"{Emoji.get_chat_emoji('MUTE')} {Translator.translate('mute_reapply_log', member.guild.id, user=Utils.clean_user(member), user_id=member.id)}")
else:
await GearbotLogging.log_to(member.guild.id, "MOD_ACTIONS", Translator.translate('mute_reapply_failed_log', member.build.id))
async def on_guild_remove(self, guild: discord.Guild):
if guild.id in self.mutes.keys():
del self.mutes[guild.id]
Utils.saveToDisk("mutes", self.mutes)
def setup(bot):
bot.add_cog(Moderation(bot))
async def unmuteTask(modcog: Moderation):
GearbotLogging.info("Started unmute background task")
skips = []
updated = False
while modcog.running:
userid = 0
guildid = 0
try:
guildstoremove = []
for guildid, list in modcog.mutes.items():
guild: discord.Guild = modcog.bot.get_guild(int(guildid))
toremove = []
if Configuration.get_var(int(guildid), "MUTE_ROLE") is 0:
guildstoremove.append(guildid)
for userid, until in list.items():
if time.time() > until and userid not in skips:
member = guild.get_member(int(userid))
role = discord.utils.get(guild.roles, id=Configuration.get_var(int(guildid), "MUTE_ROLE"))
if guild.me.guild_permissions.manage_roles:
await member.remove_roles(role, reason="Mute expired")
await GearbotLogging.log_to(guild.id, "MOD_ACTIONS",
f"<:gearInnocent:465177981287923712> {member.name}#{member.discriminator} (`{member.id}`) has automaticaly been unmuted")
else:
await GearbotLogging.log_to(guild.id, "MOD_ACTIONS",
f":no_entry: ERROR: {member.name}#{member.discriminator} (`{member.id}`) was muted earlier but I no longer have the permissions needed to unmute this person, please remove the role manually!")
updated = True
toremove.append(userid)
for todo in toremove:
del list[todo]
await asyncio.sleep(0)
if updated:
Utils.saveToDisk("mutes", modcog.mutes)
updated = False
for id in guildstoremove:
del modcog.mutes[id]
await asyncio.sleep(10)
except CancelledError:
pass # bot shutdown
except Exception as ex:
GearbotLogging.error("Something went wrong in the unmute task")
GearbotLogging.error(traceback.format_exc())
skips.append(userid)
embed = discord.Embed(colour=discord.Colour(0xff0000),
timestamp=datetime.datetime.utcfromtimestamp(time.time()))
embed.set_author(name="Something went wrong in the unmute task:")
embed.add_field(name="Current guildid", value=guildid)
embed.add_field(name="Current userid", value=userid)
embed.add_field(name="Exception", value=ex)
v = ""
for line in traceback.format_exc().splitlines():
if len(v) + len(line) > 1024:
embed.add_field(name="Stacktrace", value=v)
v = ""
v = f"{v}\n{line}"
if len(v) > 0:
embed.add_field(name="Stacktrace", value=v)
await GearbotLogging.bot_log(embed=embed)
await asyncio.sleep(10)
GearbotLogging.info("Unmute background task terminated") | GearBot/Cogs/Moderation.py | import asyncio
import datetime
import time
import traceback
from concurrent.futures import CancelledError
import discord
from discord.ext import commands
from discord.ext.commands import BadArgument
from Util import Permissioncheckers, Configuration, Utils, GearbotLogging, Pages, InfractionUtils, Emoji, Translator, \
Archive
from Util.Converters import BannedMember, UserID, Reason
from database.DatabaseConnector import LoggedMessage
class Moderation:
permissions = {
"min": 2,
"max": 6,
"required": 2,
"commands": {
"userinfo": {"required": 2, "min": 0, "max": 6},
"serverinfo": {"required": 2, "min": 0, "max": 6},
"roles": {"required": 2, "min": 0, "max": 6},
}
}
def __init__(self, bot):
self.bot: commands.Bot = bot
bot.mutes = self.mutes = Utils.fetch_from_disk("mutes")
self.running = True
self.bot.loop.create_task(unmuteTask(self))
Pages.register("roles", self.roles_init, self.roles_update), []
def __unload(self):
Utils.saveToDisk("mutes", self.mutes)
self.running = False
Pages.unregister("roles")
async def __local_check(self, ctx):
return Permissioncheckers.check_permission(ctx)
async def roles_init(self, ctx):
pages = self.gen_roles_pages(ctx.guild)
page = pages[0]
return f"**{Translator.translate('roles', ctx.guild.id, server_name=ctx.guild.name, page_num=1, pages=len(pages))}**```\n{page}```", None, len(pages) > 1, []
async def roles_update(self, ctx, message, page_num, action, data):
pages = self.gen_roles_pages(message.guild)
page, page_num = Pages.basic_pages(pages, page_num, action)
return f"**{Translator.translate('roles', message.guild.id, server_name=ctx.guild.name, page_num=page_num + 1, pages=len(pages))}**```\n{page}```", None, page_num
@staticmethod
def gen_roles_pages(guild: discord.Guild):
role_list = dict()
longest_name = 1
for role in guild.roles:
role_list[f"{role.name} - {role.id}"] = role
longest_name = max(longest_name, len(role.name))
return Pages.paginate("\n".join(f"{role_list[r].name} {' ' * (longest_name - len(role_list[r].name))} - {role_list[r].id}" for r in sorted(role_list.keys())))
@commands.command()
@commands.guild_only()
async def roles(self, ctx: commands.Context):
"""Lists all roles on the server and their IDs, useful for configuring without having to ping that role"""
await Pages.create_new("roles", ctx)
@commands.command(aliases=["👢"])
@commands.guild_only()
@commands.bot_has_permissions(kick_members=True)
async def kick(self, ctx, user: discord.Member, *, reason:Reason=""):
"""kick_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
if (ctx.author != user and user != ctx.bot.user and ctx.author.top_role > user.top_role) or (ctx.guild.owner == ctx.author and ctx.author != user):
if ctx.me.top_role > user.top_role:
self.bot.data["forced_exits"].append(user.id)
await ctx.guild.kick(user,
reason=f"Moderator: {ctx.author.name}#{ctx.author.discriminator} ({ctx.author.id}) Reason: {reason}")
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('kick_confirmation', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, reason=reason)}")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS",
f":boot: {Translator.translate('kick_log', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, reason=reason)}")
InfractionUtils.add_infraction(ctx.guild.id, user.id, ctx.author.id, Translator.translate('kick', ctx.guild.id), reason)
else:
await ctx.send(Translator.translate('kick_unable',ctx.guild.id, user=Utils.clean_user(user)))
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('kick_not_allowed', ctx.guild.id, user=user)}")
@commands.command(aliases=["🚪"])
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
async def ban(self, ctx: commands.Context, user: discord.Member, *, reason:Reason=""):
"""ban_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
if (ctx.author != user and user != ctx.bot.user and ctx.author.top_role > user.top_role) or (ctx.guild.owner == ctx.author and ctx.author != user):
if ctx.me.top_role > user.top_role:
self.bot.data["forced_exits"].append(user.id)
await ctx.guild.ban(user, reason=f"Moderator: {ctx.author.name} ({ctx.author.id}) Reason: {reason}",
delete_message_days=0)
InfractionUtils.add_infraction(ctx.guild.id, user.id, ctx.author.id, "Ban", reason)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('ban_confirmation', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, reason=reason)}")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS",
f":door: {Translator.translate('ban_log', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, reason=reason)}")
else:
await ctx.send(Translator.translate('ban_unable', ctx.guild.id, user=Utils.clean_user(user)))
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('ban_not_allowed', ctx.guild.id, user=user)}")
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
async def softban(self, ctx:commands.Context, user: discord.Member, *, reason:Reason=""):
"""softban_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
if (ctx.author != user and user != ctx.bot.user and ctx.author.top_role > user.top_role) or (ctx.guild.owner == ctx.author and ctx.author != user):
if ctx.me.top_role > user.top_role:
self.bot.data["forced_exits"].append(user.id)
self.bot.data["unbans"].append(user.id)
await ctx.guild.ban(user, reason=f"softban - Moderator: {ctx.author.name} ({ctx.author.id}) Reason: {reason}", delete_message_days=1)
await ctx.guild.unban(user)
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('softban_confirmation', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, reason=reason)}")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS", f":door: {Translator.translate('softban_log', ctx.guild.id, user=Utils.clean_user(user), user_id=user.id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, reason=reason)}")
InfractionUtils.add_infraction(ctx.guild.id, user.id, ctx.author.id, "Softban", reason)
else:
await ctx.send(Translator.translate('softban_unable', ctx.guild.id, user=Utils.clean_user(user)))
else:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('softban_not_allowed', ctx.guild.id, user=user)}")
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
async def forceban(self, ctx: commands.Context, user_id: UserID, *, reason:Reason=""):
"""forceban_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
try:
member = await commands.MemberConverter().convert(ctx, str(user_id))
except BadArgument:
user = await ctx.bot.get_user_info(user_id)
self.bot.data["forced_exits"].append(user.id)
await ctx.guild.ban(user, reason=f"Moderator: {ctx.author.name} ({ctx.author.id}) Reason: {reason}",
delete_message_days=0)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('forceban_confirmation', ctx.guild.id, user=Utils.clean_user(user), user_id=user_id, reason=reason)}")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS",
f":door: {Translator.translate('forceban_log', ctx.guild.id, user=Utils.clean_user(user), user_id=user_id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, reason=reason)}")
InfractionUtils.add_infraction(ctx.guild.id, user.id, ctx.author.id, Translator.translate('forced_ban', ctx.guild.id), reason)
else:
await ctx.send(f"{Emoji.get_chat_emoji('WARNING')} {Translator.translate('forceban_to_ban', ctx.guild.id, user=Utils.clean_user(member))}")
await ctx.invoke(self.ban, member, reason=reason)
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(manage_messages=True)
async def purge(self, ctx, msgs: int):
"""purge_help"""
if msgs < 1:
return await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('purge_too_small', ctx.guild.id)}")
if msgs > 1000:
return await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('purge_too_big', ctx.guild.id)}")
try:
deleted = await ctx.channel.purge(limit=msgs)
except discord.NotFound:
# sleep for a sec just in case the other bot is still purging so we don't get removed as well
await asyncio.sleep(1)
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('purge_fail_not_found', ctx.guild.id)}")
await ctx.send(f"{Emoji.get_chat_emoji('YES')} {Translator.translate('purge_confirmation', ctx.guild.id, count=len(deleted))}", delete_after=10)
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
async def unban(self, ctx, member: BannedMember, *, reason:Reason=""):
"""unban_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
self.bot.data["unbans"].append(member.user.id)
await ctx.guild.unban(member.user, reason=f"Moderator: {ctx.author.name} ({ctx.author.id}) Reason: {reason}")
InfractionUtils.add_infraction(ctx.guild.id, member.user.id, ctx.author.id, "Unban", reason)
await ctx.send(
f"{Emoji.get_chat_emoji('YES')} {Translator.translate('unban_confirmation', ctx.guild.id, user=Utils.clean_user(member.user), user_id=member.user.id, reason=reason)}")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS",
f"{Emoji.get_chat_emoji('INNOCENT')} {Translator.translate('unban_log', ctx.guild.id, user=Utils.clean_user(member.user), user_id=member.user.id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, reason=reason)}")
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(manage_roles=True)
async def mute(self, ctx: commands.Context, target: discord.Member, durationNumber: int, durationIdentifier: str, *,
reason:Reason=""):
"""mute_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
roleid = Configuration.get_var(ctx.guild.id, "MUTE_ROLE")
if roleid is 0:
await ctx.send(f"{Emoji.get_chat_emoji('WARNING')} {Translator.translate('mute_not_configured', ctx.guild.id, user=target.mention)}")
else:
role = discord.utils.get(ctx.guild.roles, id=roleid)
if role is None:
await ctx.send(f"{Emoji.get_chat_emoji('WARNING')} {Translator.translate('mute_role_missing', ctx.guild.id, user=target.mention)}")
else:
if (ctx.author != target and target != ctx.bot.user and ctx.author.top_role > target.top_role) or ctx.guild.owner == ctx.author:
duration = Utils.convertToSeconds(durationNumber, durationIdentifier)
if duration > 0:
until = time.time() + duration
await target.add_roles(role, reason=f"{reason}, as requested by {ctx.author.name}")
if not str(ctx.guild.id) in self.mutes:
self.mutes[str(ctx.guild.id)] = dict()
self.mutes[str(ctx.guild.id)][str(target.id)] = until
await ctx.send(f"{Emoji.get_chat_emoji('MUTE')} {Translator.translate('mute_confirmation', ctx.guild.id, user=Utils.clean_user(target), duration=f'{durationNumber} {durationIdentifier}')}")
Utils.saveToDisk("mutes", self.mutes)
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS", f"{Emoji.get_chat_emoji('MUTE')} {Translator.translate('mute_log', ctx.guild.id, user=Utils.clean_user(target), user_id=target.id, moderator=Utils.clean_user(ctx.author), moderator_id=ctx.author.id, duration=f'{durationNumber} {durationIdentifier}', reason=reason)}")
InfractionUtils.add_infraction(ctx.guild.id, target.id, ctx.author.id, "Mute", reason)
else:
await ctx.send(f"{Emoji.get_chat_emoji('WHAT')} {Translator.translate('mute_negative_denied', ctx.guild.id, duration=f'{durationNumber} {durationIdentifier}')} {Emoji.get_chat_emoji('WHAT')}")
else:
await ctx.send(
f"{Emoji.get_chat_emoji('NO')} {Translator.translate('mute_not_allowed', ctx.guild.id, user=target)}")
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(manage_roles=True)
async def unmute(self, ctx: commands.Context, target: discord.Member, *, reason:Reason=""):
"""unmute_help"""
if reason == "":
reason = Translator.translate("no_reason", ctx.guild.id)
roleid = Configuration.get_var(ctx.guild.id, "MUTE_ROLE")
if roleid is 0:
await ctx.send(
f"{Emoji.get_chat_emoji('NO')} The mute feature has been disabled on this server, as such i cannot unmute that person")
else:
role = discord.utils.get(ctx.guild.roles, id=roleid)
if role is None:
await ctx.send(
f"{Emoji.get_chat_emoji('NO')} Unable to comply, the role i've been told to use for muting no longer exists")
else:
await target.remove_roles(role, reason=f"Unmuted by {ctx.author.name}, {reason}")
await ctx.send(f"{Emoji.get_chat_emoji('INNOCENT')} {target.display_name} has been unmuted")
await GearbotLogging.log_to(ctx.guild.id, "MOD_ACTIONS",
f"{Emoji.get_chat_emoji('INNOCENT')} {target.name}#{target.discriminator} (`{target.id}`) has been unmuted by {ctx.author.name}")
InfractionUtils.add_infraction(ctx.guild.id, target.id, ctx.author.id, "Unmute", reason)
@commands.command()
async def userinfo(self, ctx: commands.Context, *, userID:UserID):
"""Shows information about the chosen user"""
user = None
member = None
if userID is None:
user = ctx.author
if ctx.guild is not None:
member = ctx.guild.get_member(user.id)
elif ctx.guild is not None:
try:
user = member = ctx.guild.get_member(userID)
except BadArgument:
pass
if user is None:
user = await Utils.get_user(userID)
embed = discord.Embed(color=0x7289DA, timestamp=ctx.message.created_at)
embed.set_thumbnail(url=user.avatar_url)
embed.set_footer(text=Translator.translate('requested_by', ctx, user=ctx.author.name), icon_url=ctx.author.avatar_url)
embed.add_field(name=Translator.translate('name', ctx), value=f"{user.name}#{user.discriminator}", inline=True)
embed.add_field(name=Translator.translate('id', ctx), value=user.id, inline=True)
embed.add_field(name=Translator.translate('bot_account', ctx), value=user.bot, inline=True)
embed.add_field(name=Translator.translate('animated_avatar', ctx), value=user.is_avatar_animated(), inline=True)
if member is not None:
account_joined = member.joined_at.strftime("%d-%m-%Y")
embed.add_field(name=Translator.translate('nickname', ctx), value=member.nick, inline=True)
embed.add_field(name=Translator.translate('top_role', ctx), value=member.top_role.name, inline=True)
embed.add_field(name=Translator.translate('joined_at', ctx),
value=f"{account_joined} ({(ctx.message.created_at - member.joined_at).days} days ago)",
inline=True)
account_made = user.created_at.strftime("%d-%m-%Y")
embed.add_field(name=Translator.translate('account_created_at', ctx),
value=f"{account_made} ({(ctx.message.created_at - user.created_at).days} days ago)",
inline=True)
embed.add_field(name=Translator.translate('avatar_url', ctx), value=f"[{Translator.translate('avatar_url', ctx)}]({user.avatar_url})")
await ctx.send(embed=embed)
@commands.command()
async def serverinfo(self, ctx):
"""Shows information about the current server."""
guild_features = ", ".join(ctx.guild.features)
print(guild_features)
if guild_features == "":
guild_features = None
role_list = []
for i in range(len(ctx.guild.roles)):
role_list.append(ctx.guild.roles[i].name)
guild_made = ctx.guild.created_at.strftime("%d-%m-%Y")
embed = discord.Embed(color=0x7289DA, timestamp= datetime.datetime.fromtimestamp(time.time()))
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.set_footer(text=Translator.translate('requested_by', ctx, user=ctx.author), icon_url=ctx.author.avatar_url)
embed.add_field(name=Translator.translate('name', ctx), value=ctx.guild.name, inline=True)
embed.add_field(name=Translator.translate('id', ctx), value=ctx.guild.id, inline=True)
embed.add_field(name=Translator.translate('owner', ctx), value=ctx.guild.owner, inline=True)
embed.add_field(name=Translator.translate('members', ctx), value=ctx.guild.member_count, inline=True)
embed.add_field(name=Translator.translate('text_channels', ctx), value=str(len(ctx.guild.text_channels)), inline=True)
embed.add_field(name=Translator.translate('voice_channels', ctx), value=str(len(ctx.guild.voice_channels)), inline=True)
embed.add_field(name=Translator.translate('total_channel', ctx), value=str(len(ctx.guild.text_channels) + len(ctx.guild.voice_channels)),
inline=True)
embed.add_field(name=Translator.translate('created_at', ctx),
value=f"{guild_made} ({(ctx.message.created_at - ctx.guild.created_at).days} days ago)",
inline=True)
embed.add_field(name=Translator.translate('vip_features', ctx), value=guild_features, inline=True)
if ctx.guild.icon_url != "":
embed.add_field(name=Translator.translate('server_icon', ctx), value=f"[{Translator.translate('server_icon', ctx)}]({ctx.guild.icon_url})", inline=True)
embed.add_field(name=Translator.translate('all_roles', ctx), value=", ".join(role_list), inline=True) #todo paginate
await ctx.send(embed=embed)
@commands.group()
@commands.bot_has_permissions(attach_files=True)
async def archive(self, ctx):
await ctx.trigger_typing()
@archive.command()
async def channel(self, ctx, channel:discord.TextChannel=None, amount=100):
if amount > 5000:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('archive_too_much', ctx)}")
return
if channel is None:
channel = ctx.message.channel
if Configuration.get_var(ctx.guild.id, "EDIT_LOGS"):
permissions = channel.permissions_for(ctx.author)
if permissions.read_messages and permissions.read_message_history:
messages = LoggedMessage.select().where((LoggedMessage.server == ctx.guild.id) & (LoggedMessage.channel == channel.id)).order_by(LoggedMessage.messageid.desc()).limit(amount)
await Archive.ship_messages(ctx, messages)
else:
ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('archive_denied_read_perms')}")
else:
await ctx.send("Not implemented, please enable edit logs to be able to use archiving")
@archive.command()
async def user(self, ctx, user:UserID, amount=100):
if amount > 5000:
await ctx.send(f"{Emoji.get_chat_emoji('NO')} {Translator.translate('archive_too_much', ctx)}")
return
if Configuration.get_var(ctx.guild.id, "EDIT_LOGS"):
messages = LoggedMessage.select().where(
(LoggedMessage.server == ctx.guild.id) & (LoggedMessage.author == user)).order_by(LoggedMessage.messageid.desc()).limit(amount)
await Archive.ship_messages(ctx, messages)
else:
await ctx.send("Please enable edit logs so i can archive users")
async def on_guild_channel_create(self, channel: discord.abc.GuildChannel):
guild: discord.Guild = channel.guild
roleid = Configuration.get_var(guild.id, "MUTE_ROLE")
if roleid is not 0:
role = discord.utils.get(guild.roles, id=roleid)
if role is not None and channel.permissions_for(guild.me).manage_channels:
if isinstance(channel, discord.TextChannel):
await channel.set_permissions(role, reason=Translator.translate('mute_setup', guild.id), send_messages=False,
add_reactions=False)
else:
await channel.set_permissions(role, reason=Translator.translate('mute_setup', guild.id), speak=False, connect=False)
async def on_member_join(self, member: discord.Member):
if str(member.guild.id) in self.mutes and member.id in self.mutes[str(member.guild.id)]:
roleid = Configuration.get_var(member.guild.id, "MUTE_ROLE")
if roleid is not 0:
role = discord.utils.get(member.guild.roles, id=roleid)
if role is not None:
if member.guild.me.guild_permissions.manage_roles:
await member.add_roles(role, reason=Translator.translate('mute_reapply_reason', member.guild.id))
await GearbotLogging.log_to(member.guild.id, "MOD_ACTIONS",f"{Emoji.get_chat_emoji('MUTE')} {Translator.translate('mute_reapply_log', member.guild.id, user=Utils.clean_user(member), user_id=member.id)}")
else:
await GearbotLogging.log_to(member.guild.id, "MOD_ACTIONS", Translator.translate('mute_reapply_failed_log', member.build.id))
async def on_guild_remove(self, guild: discord.Guild):
if guild.id in self.mutes.keys():
del self.mutes[guild.id]
Utils.saveToDisk("mutes", self.mutes)
def setup(bot):
bot.add_cog(Moderation(bot))
async def unmuteTask(modcog: Moderation):
GearbotLogging.info("Started unmute background task")
skips = []
updated = False
while modcog.running:
userid = 0
guildid = 0
try:
guildstoremove = []
for guildid, list in modcog.mutes.items():
guild: discord.Guild = modcog.bot.get_guild(int(guildid))
toremove = []
if Configuration.get_var(int(guildid), "MUTE_ROLE") is 0:
guildstoremove.append(guildid)
for userid, until in list.items():
if time.time() > until and userid not in skips:
member = guild.get_member(int(userid))
role = discord.utils.get(guild.roles, id=Configuration.get_var(int(guildid), "MUTE_ROLE"))
if guild.me.guild_permissions.manage_roles:
await member.remove_roles(role, reason="Mute expired")
await GearbotLogging.log_to(guild.id, "MOD_ACTIONS",
f"<:gearInnocent:465177981287923712> {member.name}#{member.discriminator} (`{member.id}`) has automaticaly been unmuted")
else:
await GearbotLogging.log_to(guild.id, "MOD_ACTIONS",
f":no_entry: ERROR: {member.name}#{member.discriminator} (`{member.id}`) was muted earlier but I no longer have the permissions needed to unmute this person, please remove the role manually!")
updated = True
toremove.append(userid)
for todo in toremove:
del list[todo]
await asyncio.sleep(0)
if updated:
Utils.saveToDisk("mutes", modcog.mutes)
updated = False
for id in guildstoremove:
del modcog.mutes[id]
await asyncio.sleep(10)
except CancelledError:
pass # bot shutdown
except Exception as ex:
GearbotLogging.error("Something went wrong in the unmute task")
GearbotLogging.error(traceback.format_exc())
skips.append(userid)
embed = discord.Embed(colour=discord.Colour(0xff0000),
timestamp=datetime.datetime.utcfromtimestamp(time.time()))
embed.set_author(name="Something went wrong in the unmute task:")
embed.add_field(name="Current guildid", value=guildid)
embed.add_field(name="Current userid", value=userid)
embed.add_field(name="Exception", value=ex)
v = ""
for line in traceback.format_exc().splitlines():
if len(v) + len(line) > 1024:
embed.add_field(name="Stacktrace", value=v)
v = ""
v = f"{v}\n{line}"
if len(v) > 0:
embed.add_field(name="Stacktrace", value=v)
await GearbotLogging.bot_log(embed=embed)
await asyncio.sleep(10)
GearbotLogging.info("Unmute background task terminated") | 0.453504 | 0.18704 |
class MovieWorld:
def __init__(self, name):
self.name = name
self.customers = []
self.dvds = []
def __repr__(self):
customers = '\n'.join(repr(customer) for customer in self.customers)
dvds = '\n'.join(repr(dvd) for dvd in self.dvds)
return customers + "\n" + dvds
@staticmethod
def dvd_capacity():
return 15
@staticmethod
def customer_capacity():
return 10
@staticmethod
def filter_object(obj_id, list_of_obj):
return [obj for obj in list_of_obj if obj.id == obj_id][0]
def add_customer(self, customer):
if len(self.customers) < self.customer_capacity():
self.customers.append(customer)
def add_dvd(self, dvd):
if len(self.dvds) < self.dvd_capacity():
self.dvds.append(dvd)
def rent_dvd(self, customer_id, dvd_id):
# filtered_customers = [customer for customer in self.customers if customer.id == customer_id]
# filtered_dvd = [dvd for dvd in self.dvds if dvd.id == dvd_id]
current_customer = self.filter_object(customer_id, self.customers)
current_dvd = self.filter_object(dvd_id, self.dvds)
if current_dvd in current_customer.rented_dvds:
return f"{current_customer.name} has already rented {current_dvd.name}"
if current_dvd.is_rented:
return "DVD is already rented"
if not current_dvd.age_restriction <= current_customer.age:
return f"{current_customer.name} should be at least {current_dvd.age_restriction} to rent this movie"
current_dvd.is_rented = True
current_customer.rented_dvds.append(current_dvd)
return f"{current_customer.name} has successfully rented {current_dvd.name}"
def return_dvd(self, customer_id, dvd_id):
current_customer = self.filter_object(customer_id, self.customers)
current_dvd = self.filter_object(dvd_id, self.dvds)
if current_dvd not in current_customer.rented_dvds:
return f"{current_customer.name} does not have that DVD"
current_customer.rented_dvds.remove(current_dvd)
current_dvd.is_rented = False
return f"{current_customer.name} has successfully returned {current_dvd.name}" | static_and_class_methods/movie_world/movie_world.py | class MovieWorld:
def __init__(self, name):
self.name = name
self.customers = []
self.dvds = []
def __repr__(self):
customers = '\n'.join(repr(customer) for customer in self.customers)
dvds = '\n'.join(repr(dvd) for dvd in self.dvds)
return customers + "\n" + dvds
@staticmethod
def dvd_capacity():
return 15
@staticmethod
def customer_capacity():
return 10
@staticmethod
def filter_object(obj_id, list_of_obj):
return [obj for obj in list_of_obj if obj.id == obj_id][0]
def add_customer(self, customer):
if len(self.customers) < self.customer_capacity():
self.customers.append(customer)
def add_dvd(self, dvd):
if len(self.dvds) < self.dvd_capacity():
self.dvds.append(dvd)
def rent_dvd(self, customer_id, dvd_id):
# filtered_customers = [customer for customer in self.customers if customer.id == customer_id]
# filtered_dvd = [dvd for dvd in self.dvds if dvd.id == dvd_id]
current_customer = self.filter_object(customer_id, self.customers)
current_dvd = self.filter_object(dvd_id, self.dvds)
if current_dvd in current_customer.rented_dvds:
return f"{current_customer.name} has already rented {current_dvd.name}"
if current_dvd.is_rented:
return "DVD is already rented"
if not current_dvd.age_restriction <= current_customer.age:
return f"{current_customer.name} should be at least {current_dvd.age_restriction} to rent this movie"
current_dvd.is_rented = True
current_customer.rented_dvds.append(current_dvd)
return f"{current_customer.name} has successfully rented {current_dvd.name}"
def return_dvd(self, customer_id, dvd_id):
current_customer = self.filter_object(customer_id, self.customers)
current_dvd = self.filter_object(dvd_id, self.dvds)
if current_dvd not in current_customer.rented_dvds:
return f"{current_customer.name} does not have that DVD"
current_customer.rented_dvds.remove(current_dvd)
current_dvd.is_rented = False
return f"{current_customer.name} has successfully returned {current_dvd.name}" | 0.458227 | 0.202089 |
import sys
import os.path
import json
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import models.models as database
from sqlalchemy.exc import IntegrityError
import uuid
from config.config import env
from config.mongo_adapter import mongo
from bot import reactbot, chatter
import time
class ChatCtrl(object):
@staticmethod
def sendResponse(db, bot, data, emit):
try:
## Get response from bot
response = chatter.Chatter.answer(bot, data['message'], data['_conversation_id'])
## FACTOR DE RETRASO
delay_factor = env['DELAY']
num_words = len(response.split(' '))
## tiempo calculado a responder
time_to_respond = num_words * delay_factor
emit('typing', {
'userName': 'ReactBot',
'type': 'valkiria'
},
room=data['room'])
## retrasar evento typing por el tiempo
## time_to_respond y activar animacion
time.sleep(time_to_respond)
## nueva conversacion
newMessage = database.Conversacion(
texto=data['message'],
usuarios_id=data['user_id'],
sesion_id=data['sesion_id'],
respuesta_bot=response
)
db.session.add(newMessage)
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
finally:
## enviar evento stop typing
emit('stop typing',{
'userName': 'ReactBot'
},
room=data['room'])
## retardar 0.4 segundos el evento stop typing
## para activar la animacion
time.sleep(0.4)
## emitir evento user_says:msg
emit('user_says:msg', {
'userName': 'ReactBot',
'message': response,
'type': 'valkiria'
}, room=data['room'])
@staticmethod
def getConversation(user_id, db, response):
try:
res = {
'success': False
}
user = database.Usuarios.query.get(user_id)
db_conversations = database.Conversacion.query.filter_by(usuarios_id=user_id).all()
user_conversation = []
for conversation in db_conversations:
# print(conversation.fecha_creacion)
c = {
'id': conversation.id,
'created_at': str(conversation.fecha_creacion),
'user_res': {
'userName': user.nombre_usuario,
'message': conversation.texto,
},
'bot_res': {
'userName': 'valkiria',
'message': conversation.respuesta_bot,
}
}
user_conversation.append(c)
res['success'] = True
res['conversation'] = user_conversation
except Exception as e:
print(e)
res['msg'] = 'Hubo un error al obtener la conversación'
finally:
print(res)
return response(json.dumps(res), mimetype='application/json')
@staticmethod
def getConversationText(user_id, db, response):
try:
res = {
'success': False
}
user = database.Usuarios.query.get(user_id)
db_conversations = database.Conversacion.query.filter_by(usuarios_id=user_id).all()
user_conversation = []
for conversation in db_conversations:
# print(conversation.fecha_creacion)
c = {
'id': conversation.id,
'created_at': str(conversation.fecha_creacion),
'message': conversation.texto,
}
user_conversation.append(c)
res['success'] = True
res['conversation'] = user_conversation
except Exception as e:
res['msg'] = 'Hubo un error al obtener la conversación'
finally:
return response(json.dumps(res), mimetype='application/json')
@staticmethod
def getConversations(db, response):
try:
res = {
'success': False
}
db_conversations = database.Conversacion.query.all()
user_conversation = []
for conversation in db_conversations:
c = {
'id': conversation.id,
'created_at': str(conversation.fecha_creacion),
'message': conversation.texto,
}
user_conversation.append(c)
res['success'] = True
res['conversations'] = user_conversation
except Exception as e:
res['msg'] = 'Hubo un error al obtener la conversación'
finally:
return response(json.dumps(res), mimetype='application/json') | controllers/chat_ctrl.py | import sys
import os.path
import json
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import models.models as database
from sqlalchemy.exc import IntegrityError
import uuid
from config.config import env
from config.mongo_adapter import mongo
from bot import reactbot, chatter
import time
class ChatCtrl(object):
@staticmethod
def sendResponse(db, bot, data, emit):
try:
## Get response from bot
response = chatter.Chatter.answer(bot, data['message'], data['_conversation_id'])
## FACTOR DE RETRASO
delay_factor = env['DELAY']
num_words = len(response.split(' '))
## tiempo calculado a responder
time_to_respond = num_words * delay_factor
emit('typing', {
'userName': 'ReactBot',
'type': 'valkiria'
},
room=data['room'])
## retrasar evento typing por el tiempo
## time_to_respond y activar animacion
time.sleep(time_to_respond)
## nueva conversacion
newMessage = database.Conversacion(
texto=data['message'],
usuarios_id=data['user_id'],
sesion_id=data['sesion_id'],
respuesta_bot=response
)
db.session.add(newMessage)
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
finally:
## enviar evento stop typing
emit('stop typing',{
'userName': 'ReactBot'
},
room=data['room'])
## retardar 0.4 segundos el evento stop typing
## para activar la animacion
time.sleep(0.4)
## emitir evento user_says:msg
emit('user_says:msg', {
'userName': 'ReactBot',
'message': response,
'type': 'valkiria'
}, room=data['room'])
@staticmethod
def getConversation(user_id, db, response):
try:
res = {
'success': False
}
user = database.Usuarios.query.get(user_id)
db_conversations = database.Conversacion.query.filter_by(usuarios_id=user_id).all()
user_conversation = []
for conversation in db_conversations:
# print(conversation.fecha_creacion)
c = {
'id': conversation.id,
'created_at': str(conversation.fecha_creacion),
'user_res': {
'userName': user.nombre_usuario,
'message': conversation.texto,
},
'bot_res': {
'userName': 'valkiria',
'message': conversation.respuesta_bot,
}
}
user_conversation.append(c)
res['success'] = True
res['conversation'] = user_conversation
except Exception as e:
print(e)
res['msg'] = 'Hubo un error al obtener la conversación'
finally:
print(res)
return response(json.dumps(res), mimetype='application/json')
@staticmethod
def getConversationText(user_id, db, response):
try:
res = {
'success': False
}
user = database.Usuarios.query.get(user_id)
db_conversations = database.Conversacion.query.filter_by(usuarios_id=user_id).all()
user_conversation = []
for conversation in db_conversations:
# print(conversation.fecha_creacion)
c = {
'id': conversation.id,
'created_at': str(conversation.fecha_creacion),
'message': conversation.texto,
}
user_conversation.append(c)
res['success'] = True
res['conversation'] = user_conversation
except Exception as e:
res['msg'] = 'Hubo un error al obtener la conversación'
finally:
return response(json.dumps(res), mimetype='application/json')
@staticmethod
def getConversations(db, response):
try:
res = {
'success': False
}
db_conversations = database.Conversacion.query.all()
user_conversation = []
for conversation in db_conversations:
c = {
'id': conversation.id,
'created_at': str(conversation.fecha_creacion),
'message': conversation.texto,
}
user_conversation.append(c)
res['success'] = True
res['conversations'] = user_conversation
except Exception as e:
res['msg'] = 'Hubo un error al obtener la conversación'
finally:
return response(json.dumps(res), mimetype='application/json') | 0.084278 | 0.072112 |
from typing import List
import pydantic
from fastapi.testclient import TestClient
from seisspark.pipeline_repository import PipelineInfo
from seisspark_service.routers.pipelines import CreateModuleRequest, CreatePipelineRequest, ModuleDescription, PipelineDescription
def test_seisspark_service_modules(seisspark_service_client: TestClient) -> None:
response = seisspark_service_client.get("/api/v1/modules")
response.raise_for_status()
module_types: List[str] = pydantic.parse_obj_as(List[str], response.json())
assert "SUfilter" in module_types and "SUsort" in module_types and "SUimp2d" in module_types
response = seisspark_service_client.get("/api/v1/modules/SUfilter")
response.raise_for_status()
module_schema = response.json()
print(module_schema)
assert module_schema == {
"title": "SUFilterParams",
"type": "object",
"properties": {
"filter": {
"title": "Filter",
"default": [{"f": 10.0, "a": 0.0}, {"f": 20.0, "a": 1.0}, {"f": 30.0, "a": 1.0}, {"f": 40.0, "a": 0.0}],
"type": "array",
"items": {"$ref": "#/definitions/SUFilterFA"},
}
},
"definitions": {"SUFilterFA": {"title": "SUFilterFA", "type": "object", "properties": {"f": {"title": "F", "type": "number"}, "a": {"title": "A", "type": "number"}}, "required": ["f", "a"]}},
}
def test_seisspark_service_pipelines(seisspark_service_client: TestClient) -> None:
response = seisspark_service_client.get("/api/v1/pipelines")
response.raise_for_status()
pipelines: List[PipelineInfo] = pydantic.parse_obj_as(List[PipelineInfo], response.json())
assert pipelines == []
pipeline_name = "test_pipeline"
response = seisspark_service_client.post("/api/v1/pipelines", json=CreatePipelineRequest(name=pipeline_name).dict())
response.raise_for_status()
pipeline_info: PipelineInfo = pydantic.parse_obj_as(PipelineInfo, response.json())
assert pipeline_info.name == pipeline_name
pipeline_id = pipeline_info.id
response = seisspark_service_client.get(f"/api/v1/pipelines/{pipeline_id}")
response.raise_for_status()
pipeline_desc: PipelineDescription = pydantic.parse_obj_as(PipelineDescription, response.json())
assert pipeline_desc.id == pipeline_id and pipeline_desc.name == pipeline_name and pipeline_desc.modules == []
response = seisspark_service_client.get("/api/v1/pipelines")
response.raise_for_status()
pipelines = pydantic.parse_obj_as(List[PipelineInfo], response.json())
assert pipelines == [PipelineInfo(id=pipeline_id, name=pipeline_name)]
response = seisspark_service_client.delete(f"/api/v1/pipelines/{pipeline_id}")
response.raise_for_status()
response = seisspark_service_client.get("/api/v1/pipelines")
response.raise_for_status()
pipelines = pydantic.parse_obj_as(List[PipelineInfo], response.json())
assert pipelines == []
def test_seisspark_service_pipeline_module(seisspark_service_client: TestClient) -> None:
response = seisspark_service_client.get("/api/v1/pipelines")
response.raise_for_status()
pipelines: List[PipelineInfo] = pydantic.parse_obj_as(List[PipelineInfo], response.json())
assert pipelines == []
pipeline_name = "test_pipeline"
response = seisspark_service_client.post("/api/v1/pipelines", json=CreatePipelineRequest(name=pipeline_name).dict())
response.raise_for_status()
pipeline_info: PipelineInfo = pydantic.parse_obj_as(PipelineInfo, response.json())
assert pipeline_info.name == pipeline_name
pipeline_id = pipeline_info.id
module_type = "SUimp2d"
module_name = "input"
response = seisspark_service_client.post(f"/api/v1/pipelines/{pipeline_id}/modules", json=CreateModuleRequest(module_type=module_type, name=module_name).dict())
response.raise_for_status()
module_descr: ModuleDescription = pydantic.parse_obj_as(ModuleDescription, response.json())
assert module_descr.name == module_name
module_id = module_descr.id
response = seisspark_service_client.get(f"/api/v1/pipelines/{pipeline_id}/modules/{module_id}/parameters")
response.raise_for_status()
json_parameters = response.json()
assert type(json_parameters) == dict
response = seisspark_service_client.get(f"/api/v1/pipelines/{pipeline_id}/modules/{module_id}/data/0")
response.raise_for_status()
json_data = response.json()
assert type(json_data) == list and type(json_data[0]) == list and type(json_data[0][0]) == float
response = seisspark_service_client.delete(f"/api/v1/pipelines/{pipeline_id}/modules/{module_id}")
response.raise_for_status()
response = seisspark_service_client.delete(f"/api/v1/pipelines/{pipeline_id}")
response.raise_for_status() | tests/test_seisspark_service.py | from typing import List
import pydantic
from fastapi.testclient import TestClient
from seisspark.pipeline_repository import PipelineInfo
from seisspark_service.routers.pipelines import CreateModuleRequest, CreatePipelineRequest, ModuleDescription, PipelineDescription
def test_seisspark_service_modules(seisspark_service_client: TestClient) -> None:
response = seisspark_service_client.get("/api/v1/modules")
response.raise_for_status()
module_types: List[str] = pydantic.parse_obj_as(List[str], response.json())
assert "SUfilter" in module_types and "SUsort" in module_types and "SUimp2d" in module_types
response = seisspark_service_client.get("/api/v1/modules/SUfilter")
response.raise_for_status()
module_schema = response.json()
print(module_schema)
assert module_schema == {
"title": "SUFilterParams",
"type": "object",
"properties": {
"filter": {
"title": "Filter",
"default": [{"f": 10.0, "a": 0.0}, {"f": 20.0, "a": 1.0}, {"f": 30.0, "a": 1.0}, {"f": 40.0, "a": 0.0}],
"type": "array",
"items": {"$ref": "#/definitions/SUFilterFA"},
}
},
"definitions": {"SUFilterFA": {"title": "SUFilterFA", "type": "object", "properties": {"f": {"title": "F", "type": "number"}, "a": {"title": "A", "type": "number"}}, "required": ["f", "a"]}},
}
def test_seisspark_service_pipelines(seisspark_service_client: TestClient) -> None:
response = seisspark_service_client.get("/api/v1/pipelines")
response.raise_for_status()
pipelines: List[PipelineInfo] = pydantic.parse_obj_as(List[PipelineInfo], response.json())
assert pipelines == []
pipeline_name = "test_pipeline"
response = seisspark_service_client.post("/api/v1/pipelines", json=CreatePipelineRequest(name=pipeline_name).dict())
response.raise_for_status()
pipeline_info: PipelineInfo = pydantic.parse_obj_as(PipelineInfo, response.json())
assert pipeline_info.name == pipeline_name
pipeline_id = pipeline_info.id
response = seisspark_service_client.get(f"/api/v1/pipelines/{pipeline_id}")
response.raise_for_status()
pipeline_desc: PipelineDescription = pydantic.parse_obj_as(PipelineDescription, response.json())
assert pipeline_desc.id == pipeline_id and pipeline_desc.name == pipeline_name and pipeline_desc.modules == []
response = seisspark_service_client.get("/api/v1/pipelines")
response.raise_for_status()
pipelines = pydantic.parse_obj_as(List[PipelineInfo], response.json())
assert pipelines == [PipelineInfo(id=pipeline_id, name=pipeline_name)]
response = seisspark_service_client.delete(f"/api/v1/pipelines/{pipeline_id}")
response.raise_for_status()
response = seisspark_service_client.get("/api/v1/pipelines")
response.raise_for_status()
pipelines = pydantic.parse_obj_as(List[PipelineInfo], response.json())
assert pipelines == []
def test_seisspark_service_pipeline_module(seisspark_service_client: TestClient) -> None:
response = seisspark_service_client.get("/api/v1/pipelines")
response.raise_for_status()
pipelines: List[PipelineInfo] = pydantic.parse_obj_as(List[PipelineInfo], response.json())
assert pipelines == []
pipeline_name = "test_pipeline"
response = seisspark_service_client.post("/api/v1/pipelines", json=CreatePipelineRequest(name=pipeline_name).dict())
response.raise_for_status()
pipeline_info: PipelineInfo = pydantic.parse_obj_as(PipelineInfo, response.json())
assert pipeline_info.name == pipeline_name
pipeline_id = pipeline_info.id
module_type = "SUimp2d"
module_name = "input"
response = seisspark_service_client.post(f"/api/v1/pipelines/{pipeline_id}/modules", json=CreateModuleRequest(module_type=module_type, name=module_name).dict())
response.raise_for_status()
module_descr: ModuleDescription = pydantic.parse_obj_as(ModuleDescription, response.json())
assert module_descr.name == module_name
module_id = module_descr.id
response = seisspark_service_client.get(f"/api/v1/pipelines/{pipeline_id}/modules/{module_id}/parameters")
response.raise_for_status()
json_parameters = response.json()
assert type(json_parameters) == dict
response = seisspark_service_client.get(f"/api/v1/pipelines/{pipeline_id}/modules/{module_id}/data/0")
response.raise_for_status()
json_data = response.json()
assert type(json_data) == list and type(json_data[0]) == list and type(json_data[0][0]) == float
response = seisspark_service_client.delete(f"/api/v1/pipelines/{pipeline_id}/modules/{module_id}")
response.raise_for_status()
response = seisspark_service_client.delete(f"/api/v1/pipelines/{pipeline_id}")
response.raise_for_status() | 0.659624 | 0.349505 |
import serial
import struct
import binascii
import numpy as np
from time import sleep
from threading import Thread
from matplotlib import pyplot as plt
from matplotlib import animation
global ax1
global ax2
global ax3
global ax4
global ax5
global ax6
global f
global Name_Str
global finish_data
REPORT_DATA_LEN = 66
DIR_FILE = './'
Right_Data = []
Left_Data = []
R_xs = []
R_v_cur = []
R_err = []
R_err1 = []
R_err2 = []
R_count = []
L_xs = []
L_v_cur = []
L_err = []
L_err1 = []
L_err2 = []
L_count = []
def bcc_off(serial):
global f
serial.write(bytes.fromhex('A3 3A 00 01 01 00'))
while True:
flag = 0
while serial.inWaiting()>0:
data = serial.readline()
print(data,len(data))
if data[:6] == b'\xA3\x3A\x00\x01\x00\x01':
print("bcc off")
flag = 1
break
if flag == 1:
break
def recv(serial):
global f
while True:
data = serial.readline()
if data != b'':
print("rx: ",data,file = f)
sleep(0.01)
return data
def Clear_Buf():
global R_xs
global R_v_cur
global L_xs
global L_v_cur
R_xs = []
R_v_cur = []
R_err = []
R_err1 = []
R_err2 = []
R_count = []
L_xs = []
L_v_cur = []
L_err = []
L_err1 = []
L_err2 = []
L_count = []
def Send_CMD():
global f
global Name_Str
while True:
tx_header = "A33A"
tx_buf = tx_header
indata = input("\r\nw [v] [s]: 线速度 距离\r\nd [w] [deg]:角速度 角度\r\nq [v] [w]: 线速度 角速度\r\ninput cmd:")
cmd_datas = indata.split(" ")
cmd_i = 0
flag = 0
Name_Str = indata
for cmd_data in cmd_datas:
print(cmd_data)
if cmd_i == 0:
if cmd_data == 'q': #线速度 角速度
tx_buf += 'A0'
tx_buf += '08'
elif cmd_data == 'w': #线速度 距离
tx_buf += "A1"
tx_buf += "08"
elif cmd_data == 'd': #角速度 度
tx_buf += 'A2'
tx_buf += '08'
elif cmd_i == 1:
bytes_hex1 = struct.pack('>l',int(cmd_data))#大端
str_data1 = str(binascii.b2a_hex(bytes_hex1))[2:-1]
tx_buf += str_data1
elif cmd_i == 2:
bytes_hex2 = struct.pack('>l',int(cmd_data))
str_data2 = str(binascii.b2a_hex(bytes_hex2))[2:-1]
tx_buf += str_data2
flag = 1
cmd_i += 1
if flag == 1:
f = open(DIR_FILE+Name_Str+'.txt','w')
print(tx_buf,file = f)
tx_buf_b = bytes().fromhex(tx_buf)
serial.write(tx_buf_b)
Clear_Buf()
def UART_Rx_Decode(data):
global f
odd_data = ''
decode_datas = data.split('eeeeeeee')
for decode_data in decode_datas:
#print('x:%d ',len(decode_data),decode_data)
if len(decode_data) == REPORT_DATA_LEN:
if decode_data[:2] == "01": #Right_Data
#print('R:',decode_data)
Right_Data.append(decode_data)
elif decode_data[:2] == "02": #Left_Data
Left_Data.append(decode_data)
else:
print("error:",decode_data,file = f)
else:
if decode_data[:2] == "01":
odd_data = decode_data
elif decode_data[:2] == "02":
odd_data = decode_data
else:
print("rx: ",decode_data,file = f)
return odd_data
def UART_Handle():
global finish_data
has_data = 0
count = 0
last_data = ''
while True:
data = serial.readline()
sleep(0.1)
if data != b'':
print("...")
temp = str(binascii.b2a_hex(data))[2:-1] #str
last_data = UART_Rx_Decode(last_data+temp)
has_data = 1
count = 0
#finish_data = 0
#print(temp)
#print("receive: ",temp)
#serial.write(data)
else:
if 1==has_data:
count = count+1
if count > 9:
finish_data = 1
has_data = 0
print("xx")
def Draw_Init():
line1.set_data([],[])
line2.set_data([],[])
return line1,line2,
def Draw_Plot():
global ax1
global ax2
global ax3
global ax4
global ax5
global ax6
global f
global R_xs
global R_v_cur
global finish_data
Err_Count = []
if finish_data == 1:
r_len = len(Right_Data)
l_len = len(Left_Data)
if r_len >= l_len:
min_len = l_len
else:
min_len = r_len
print('len:',r_len,l_len,min_len,file = f)
for i in range(r_len):
#print(Right_Data)
r_y_str = (Right_Data[i])[2:]
r_y_hex = bytes.fromhex(r_y_str)
r_num,r_v_dst,r_v_cur,r_err,r_err1,r_err2,r_inc,r_count = struct.unpack('<llllllll',bytes(r_y_hex))
r_inc = r_inc/30000
print("r:%5d %8d %8d %8d %8d %8d %8d %8d"%(r_num,r_v_dst,r_v_cur,r_err,r_err1,r_err2,r_inc,r_count),file = f)
if r_num != 0:
R_xs.append(r_num)
R_v_cur.append(r_v_cur)
R_err.append(r_err)
R_err1.append(r_err1)
R_err2.append(r_err2)
R_count.append(r_count)
for i in range(l_len):
l_y_str = (Left_Data[i])[2:]
l_y_hex = bytes.fromhex(l_y_str)
l_num,l_v_dst,l_v_cur,l_err,l_err1,l_err2,l_inc,l_count = struct.unpack('<llllllll',bytes(l_y_hex))
l_inc = l_inc/30000
print('l:%5d %8d %8d %8d %8d %8d %8d %8d'%(l_num,l_v_dst,l_v_cur,l_err,l_err1,l_err2,l_inc,l_count),file = f)
if l_num != 0:
L_xs.append(l_num)
L_v_cur.append(l_v_cur)
L_err.append(l_err)
L_err1.append(l_err1)
L_err2.append(l_err2)
L_count.append(l_count)
min_len = min_len-5
for i in range(min_len):
print(i,R_count[i], L_count[i],(R_count[i]-L_count[i]),file = f)
Err_Count.append(R_count[i]-L_count[i])
ax1.plot(R_xs,R_v_cur,'b-')
ax3.plot(R_xs,R_err,'r-',label='err')
ax3.plot(R_xs,R_err1,'g-',label='err1')
ax3.plot(R_xs,R_err2,'b-',label='err2')
ax5.plot(R_xs,R_count,'r*',label='r_count')
ax2.plot(L_xs,L_v_cur,'b-')
ax4.plot(L_xs,L_err,'r-',label='err')
ax4.plot(L_xs,L_err1,'g-',label='err1')
ax4.plot(L_xs,L_err2,'b-',label='err2')
ax5.plot(L_xs,L_count,'g*',label='l_count')
ax6.plot(range(min_len),Err_Count,'g.',label='err')
f.close()
plt.savefig(DIR_FILE+Name_Str+'.png',dpi=100)
plt.show()
finish_data = 0
print("show")
def DRAW_Handle():
global ax1
global ax2
global ax3
global ax4
global ax5
global ax6
fig = plt.figure()
fig.set_size_inches(18,10,forward=True)
ax1 = fig.add_subplot(3,2,1)
ax2 = fig.add_subplot(3,2,2)
ax3 = fig.add_subplot(3,2,3)
ax4 = fig.add_subplot(3,2,4)
ax5 = fig.add_subplot(3,2,5)
ax6 = fig.add_subplot(3,2,6)
ax1.set_title('Right wheel')
ax2.set_title('Left wheel')
ax3.set_title('Right error')
ax4.set_title('Left error')
ax4.set_title('Left error')
ax5.set_title('Count')
ax6.set_title('Count error')
ax1.grid(True) #显示网格
ax2.grid(True)
ax3.grid(True)
ax4.grid(True)
ax5.grid(True)
ax6.grid(True)
while True:
Draw_Plot()
sleep(0.1)
if __name__ == '__main__':
global finish_data
global Name_Str
serial = serial.Serial('COM5', 115200, timeout=0.5)
if serial.isOpen():
print("success")
else:
print("failed")
bcc_off(serial)
finish_data = 0
t1 = Thread(target=Send_CMD,args=())
t2 = Thread(target=UART_Handle,args=())
t3 = Thread(target=DRAW_Handle,args=())
t1.start()
t2.start()
t3.start() | python/10_uart/01_uart.py |
import serial
import struct
import binascii
import numpy as np
from time import sleep
from threading import Thread
from matplotlib import pyplot as plt
from matplotlib import animation
global ax1
global ax2
global ax3
global ax4
global ax5
global ax6
global f
global Name_Str
global finish_data
REPORT_DATA_LEN = 66
DIR_FILE = './'
Right_Data = []
Left_Data = []
R_xs = []
R_v_cur = []
R_err = []
R_err1 = []
R_err2 = []
R_count = []
L_xs = []
L_v_cur = []
L_err = []
L_err1 = []
L_err2 = []
L_count = []
def bcc_off(serial):
global f
serial.write(bytes.fromhex('A3 3A 00 01 01 00'))
while True:
flag = 0
while serial.inWaiting()>0:
data = serial.readline()
print(data,len(data))
if data[:6] == b'\xA3\x3A\x00\x01\x00\x01':
print("bcc off")
flag = 1
break
if flag == 1:
break
def recv(serial):
global f
while True:
data = serial.readline()
if data != b'':
print("rx: ",data,file = f)
sleep(0.01)
return data
def Clear_Buf():
global R_xs
global R_v_cur
global L_xs
global L_v_cur
R_xs = []
R_v_cur = []
R_err = []
R_err1 = []
R_err2 = []
R_count = []
L_xs = []
L_v_cur = []
L_err = []
L_err1 = []
L_err2 = []
L_count = []
def Send_CMD():
global f
global Name_Str
while True:
tx_header = "A33A"
tx_buf = tx_header
indata = input("\r\nw [v] [s]: 线速度 距离\r\nd [w] [deg]:角速度 角度\r\nq [v] [w]: 线速度 角速度\r\ninput cmd:")
cmd_datas = indata.split(" ")
cmd_i = 0
flag = 0
Name_Str = indata
for cmd_data in cmd_datas:
print(cmd_data)
if cmd_i == 0:
if cmd_data == 'q': #线速度 角速度
tx_buf += 'A0'
tx_buf += '08'
elif cmd_data == 'w': #线速度 距离
tx_buf += "A1"
tx_buf += "08"
elif cmd_data == 'd': #角速度 度
tx_buf += 'A2'
tx_buf += '08'
elif cmd_i == 1:
bytes_hex1 = struct.pack('>l',int(cmd_data))#大端
str_data1 = str(binascii.b2a_hex(bytes_hex1))[2:-1]
tx_buf += str_data1
elif cmd_i == 2:
bytes_hex2 = struct.pack('>l',int(cmd_data))
str_data2 = str(binascii.b2a_hex(bytes_hex2))[2:-1]
tx_buf += str_data2
flag = 1
cmd_i += 1
if flag == 1:
f = open(DIR_FILE+Name_Str+'.txt','w')
print(tx_buf,file = f)
tx_buf_b = bytes().fromhex(tx_buf)
serial.write(tx_buf_b)
Clear_Buf()
def UART_Rx_Decode(data):
global f
odd_data = ''
decode_datas = data.split('eeeeeeee')
for decode_data in decode_datas:
#print('x:%d ',len(decode_data),decode_data)
if len(decode_data) == REPORT_DATA_LEN:
if decode_data[:2] == "01": #Right_Data
#print('R:',decode_data)
Right_Data.append(decode_data)
elif decode_data[:2] == "02": #Left_Data
Left_Data.append(decode_data)
else:
print("error:",decode_data,file = f)
else:
if decode_data[:2] == "01":
odd_data = decode_data
elif decode_data[:2] == "02":
odd_data = decode_data
else:
print("rx: ",decode_data,file = f)
return odd_data
def UART_Handle():
global finish_data
has_data = 0
count = 0
last_data = ''
while True:
data = serial.readline()
sleep(0.1)
if data != b'':
print("...")
temp = str(binascii.b2a_hex(data))[2:-1] #str
last_data = UART_Rx_Decode(last_data+temp)
has_data = 1
count = 0
#finish_data = 0
#print(temp)
#print("receive: ",temp)
#serial.write(data)
else:
if 1==has_data:
count = count+1
if count > 9:
finish_data = 1
has_data = 0
print("xx")
def Draw_Init():
line1.set_data([],[])
line2.set_data([],[])
return line1,line2,
def Draw_Plot():
global ax1
global ax2
global ax3
global ax4
global ax5
global ax6
global f
global R_xs
global R_v_cur
global finish_data
Err_Count = []
if finish_data == 1:
r_len = len(Right_Data)
l_len = len(Left_Data)
if r_len >= l_len:
min_len = l_len
else:
min_len = r_len
print('len:',r_len,l_len,min_len,file = f)
for i in range(r_len):
#print(Right_Data)
r_y_str = (Right_Data[i])[2:]
r_y_hex = bytes.fromhex(r_y_str)
r_num,r_v_dst,r_v_cur,r_err,r_err1,r_err2,r_inc,r_count = struct.unpack('<llllllll',bytes(r_y_hex))
r_inc = r_inc/30000
print("r:%5d %8d %8d %8d %8d %8d %8d %8d"%(r_num,r_v_dst,r_v_cur,r_err,r_err1,r_err2,r_inc,r_count),file = f)
if r_num != 0:
R_xs.append(r_num)
R_v_cur.append(r_v_cur)
R_err.append(r_err)
R_err1.append(r_err1)
R_err2.append(r_err2)
R_count.append(r_count)
for i in range(l_len):
l_y_str = (Left_Data[i])[2:]
l_y_hex = bytes.fromhex(l_y_str)
l_num,l_v_dst,l_v_cur,l_err,l_err1,l_err2,l_inc,l_count = struct.unpack('<llllllll',bytes(l_y_hex))
l_inc = l_inc/30000
print('l:%5d %8d %8d %8d %8d %8d %8d %8d'%(l_num,l_v_dst,l_v_cur,l_err,l_err1,l_err2,l_inc,l_count),file = f)
if l_num != 0:
L_xs.append(l_num)
L_v_cur.append(l_v_cur)
L_err.append(l_err)
L_err1.append(l_err1)
L_err2.append(l_err2)
L_count.append(l_count)
min_len = min_len-5
for i in range(min_len):
print(i,R_count[i], L_count[i],(R_count[i]-L_count[i]),file = f)
Err_Count.append(R_count[i]-L_count[i])
ax1.plot(R_xs,R_v_cur,'b-')
ax3.plot(R_xs,R_err,'r-',label='err')
ax3.plot(R_xs,R_err1,'g-',label='err1')
ax3.plot(R_xs,R_err2,'b-',label='err2')
ax5.plot(R_xs,R_count,'r*',label='r_count')
ax2.plot(L_xs,L_v_cur,'b-')
ax4.plot(L_xs,L_err,'r-',label='err')
ax4.plot(L_xs,L_err1,'g-',label='err1')
ax4.plot(L_xs,L_err2,'b-',label='err2')
ax5.plot(L_xs,L_count,'g*',label='l_count')
ax6.plot(range(min_len),Err_Count,'g.',label='err')
f.close()
plt.savefig(DIR_FILE+Name_Str+'.png',dpi=100)
plt.show()
finish_data = 0
print("show")
def DRAW_Handle():
global ax1
global ax2
global ax3
global ax4
global ax5
global ax6
fig = plt.figure()
fig.set_size_inches(18,10,forward=True)
ax1 = fig.add_subplot(3,2,1)
ax2 = fig.add_subplot(3,2,2)
ax3 = fig.add_subplot(3,2,3)
ax4 = fig.add_subplot(3,2,4)
ax5 = fig.add_subplot(3,2,5)
ax6 = fig.add_subplot(3,2,6)
ax1.set_title('Right wheel')
ax2.set_title('Left wheel')
ax3.set_title('Right error')
ax4.set_title('Left error')
ax4.set_title('Left error')
ax5.set_title('Count')
ax6.set_title('Count error')
ax1.grid(True) #显示网格
ax2.grid(True)
ax3.grid(True)
ax4.grid(True)
ax5.grid(True)
ax6.grid(True)
while True:
Draw_Plot()
sleep(0.1)
if __name__ == '__main__':
global finish_data
global Name_Str
serial = serial.Serial('COM5', 115200, timeout=0.5)
if serial.isOpen():
print("success")
else:
print("failed")
bcc_off(serial)
finish_data = 0
t1 = Thread(target=Send_CMD,args=())
t2 = Thread(target=UART_Handle,args=())
t3 = Thread(target=DRAW_Handle,args=())
t1.start()
t2.start()
t3.start() | 0.051762 | 0.128006 |
# Python imports
import urllib
import logging
# AppEngine imports
from google.appengine.ext import db
from google.appengine.api import urlfetch
# Django imports
from django.utils import simplejson
# Local imports
import utils
import models
import hashlib
import settings
class LabelProvider(object):
""" Provides methods to work with saving bookmarks and invites.
Provides some convenience methods for working with label data structures.
It may be desirable to refactor this class to be static methods on the
Label data model sometime in the future.
"""
def get_by_user(self, user):
""" Get labels created by the specified user.
Currently this method only returns the last 100 labels. This could be
refactored to be more flexible and allow paging.
Args:
user: A models.User object.
Returns:
A list of models.Label objects, created by the specified user and sorted
by last modified first.
"""
query = models.Label.gql("where user=:1 order by updated desc", user)
return query.fetch(100)
def get_by_invitee(self, user):
""" Get labels where the specified user has been marked as an invitee.
Currently this method only returns the last 100 labels. This could be
refactored to be more flexible and allow paging.
Args:
user: A models.User object.
Returns:
A list of models.Label objects, where the specified user is in the
invited user list.
TODO: Sort this list by updated date.
"""
if hasattr(user, "guid"):
query = models.Label.gql("where invited_guids = :1", user.guid)
return query.fetch(100)
return None
def get_by_key(self, key):
""" Gets a label by its data store key.
Returns:
The data store entry corresponding with the specified key.
"""
return db.get(key)
def set(self, user, restaurant, text):
""" Creates a new label if one does not already exist.
If the restaurant passed in to this method has not been saved to the data
store (only cached in memory), it will be stored to enforce the reference
property on the resulting label.
Args:
user: The user who created this label.
restaurant: The restaurant which is being labeled.
text: A text description for the label.
Returns:
If a label with the same user, restaurant, and text exists, the data
store entry for that label. Otherwise, the newly created label.
"""
label = models.Label.gql("where user=:1 and restaurant=:2 and text=:3",
user, restaurant, text).get()
if label:
return label
key_name = "label|%s|%s|%s" % (user.provider_id, restaurant.restaurant_id,
hashlib.sha1(text).hexdigest())
params = {
"key_name" : key_name,
"restaurant" : restaurant,
"user" : user,
"text" : text,
}
db.put(restaurant) # May just be locally cached
return models.Label.get_or_insert(**params)
def move_user_data(self, from_user, to_user):
""" Moves all label data from one user to another user account.
Used in cases where two accounts merge, this migrates all label data
from the first account to the second account. This includes changing
the owner on any labels created by the first user, and updating any
invites which specify the first user, to the second user.
Args:
from_user: The user where data is being migrated from.
to_user: The user who data will be migrated to.
"""
query = models.Label.gql("where user=:1", from_user)
for label in query:
if label.invited_guids:
label.invited_guids.remove(to_user.guid)
label.user = to_user
label.put()
query = models.Label.gql("where invited_guids = :1", from_user.guid)
for label in query:
label.invited_guids.remove(from_user.guid)
label.invited_guids.append(to_user.guid)
label.put()
class RestaurantProvider(object):
""" Provides data about restaurants from the YQL web service.
The public interface from this class could be adopted to support other
restaurant providers. Yahoo! local search was chosen because it provides
rating data and has fairly permissive quota limits.
"""
def __init__(self):
""" Constructor.
Sets which fields will be requested from the YQL query.
"""
self.restaurant_fields = ", ".join([
"id", "Title", "Address", "City", "State", "Rating.AverageRating",
"Rating.TotalRatings", "BusinessUrl", "Categories", "Latitude",
"Longitude"])
def _cache_restaurant(self, restaurant):
""" Stores a restaurant in the memory cache.
Args:
restaurant: The object to store in the cache. The restaurant id is used
as a cache key.
"""
utils.cache_set(restaurant, "restaurant", restaurant.restaurant_id)
def _cache_get_restaurant(self, restaurant_id):
""" Gets a restaurant from the cache.
Args:
restaurant_id: The id of the restaurant to retrieve from the cache.
Returns:
The data for the restaurant with the corresponding ID, if it existed in
the cache, else None.
"""
return utils.cache_get("restaurant", restaurant_id)
def _convert_result_to_restaurant(self, result):
""" Converts a YQL search result to a models.Restaurant object.
Args:
result: The dict returned by a YQL search.
Returns:
An initialized (but not saved) models.Restaurant instance with the
appropriate properties set to the data from the search result.
"""
try:
rating_average = float(result["Rating"]["AverageRating"])
except ValueError:
rating_average = -1.0
try:
rating_count = int(result["Rating"]["TotalRatings"])
except ValueError:
rating_count = 0
location = db.GeoPt(result["Latitude"], result["Longitude"])
categories = []
if isinstance(result["Categories"]["Category"], list):
for category in result["Categories"]["Category"]:
categories.append(category["content"])
else:
categories.append(result["Categories"]["Category"]["content"])
params = {
"key_name" : "restaurant|%s" % result["id"],
"restaurant_id" : result["id"],
"name" : result["Title"],
"address" : result["Address"],
"city" : result["City"],
"state" : result["State"],
"rating_average" : rating_average,
"rating_count" : rating_count,
"url" : result["BusinessUrl"],
"location" : location,
"categories" : categories,
}
return models.Restaurant(**params)
def _yql_query(self, query):
""" Performs a query on the YQL web service interface.
Args:
query: A string query in the YQL query syntax.
Returns:
If a result was returned, a dict representing the data structure which
was passed back. None otherwise.
"""
query_hash = hashlib.sha1(query).hexdigest()
result = utils.cache_get("yql_query", query_hash)
if result is None:
logging.info("Fetching yql query: %s" % query)
query = urllib.quote_plus(query)
url = "http://query.yahooapis.com/v1/public/yql?q=%s&format=json" % query
response = simplejson.loads(urlfetch.fetch(url).content)["query"]
if response is None or int(response["count"]) == 0:
return None
# Result set is inconsistent if there is only one result
if int(response["count"]) == 1:
result = [response["results"]["Result"]]
else:
result = response["results"]["Result"]
utils.cache_set(result, "yql_query", query_hash)
return result
def _yql_restaurant_search(self, term, location):
""" Performs a Yahoo! local search, limiting results to restaurants only.
This uses the category code for "Restaurants" in the Yahoo! local search
API to limit results, otherwise searching would return all kinds of local
businesses.
The number of search results are limited to keep this code simple. A
more sophisticated implementation would allow paging.
Args:
term: The search term to use.
location: A string representing the location to search in.
Returns:
A list of models.Restaurant objects corresponding to the results of the
query, or None if no restaurants were found.
"""
query_params = {
"query" : term,
"location" : location,
"category" : "96926236", # Code for 'Restaurants'
"limit" : settings.SEARCH_RESULTS,
"offset" : 0,
"fields" : self.restaurant_fields,
}
query = " ".join([
'select %(fields)s from local.search where',
'query="%(query)s"',
'and location="%(location)s"',
'and category="%(category)s"',
'limit %(limit)s',
'offset %(offset)s',
]) % query_params
result = self._yql_query(query)
if result is None:
restaurants = None
else:
restaurants = map(self._convert_result_to_restaurant, result)
return restaurants
def _yql_restaurant_get(self, restaurant_id):
""" Gets a single restaurant's data.
This method performs a query for a single restaurant, returning the results
from the Yahoo! local search.
Args:
The ID of the restaurant to fetch.
Returns:
The models.Restaurant object with the data corresponding to the restaurant
with the given ID number, or None if no results were found.
"""
params = {
"id" : restaurant_id,
"fields" : self.restaurant_fields,
}
query = 'select %(fields)s from local.search(1) where id=%(id)s' % params
result = self._yql_query(query)
if result:
return self._convert_result_to_restaurant(result)
return None
def search(self, term, location):
""" Performs a search for restaurants in a specific location.
Results returned from this method will be models.Restaurant instances, but
not necessarily written to the data store, to prevent the store being
flooded with every search result. This way, we only store restaurants that
have been bookmarked by at least one user.
Args:
term: The search term to use.
location: A string representing the location to search in.
Returns:
A list of models.Restaurant objects corresponding to the results of the
query, or None if no restaurants were found.
"""
restaurants = self._yql_restaurant_search(term, location)
if restaurants:
for restaurant in restaurants:
self._cache_restaurant(restaurant)
return restaurants
def get_restaurant(self, restaurant_id):
""" Gets a single restaurant by ID.
Args:
The ID of the restaurant to fetch.
Returns:
The models.Restaurant object with the data corresponding to the restaurant
with the given ID number, or None if no results were found.
"""
restaurant = self._cache_get_restaurant(restaurant_id)
if restaurant is None:
restaurant = self._yql_restaurant_get(restaurant_id)
self.cache_restaurant(restaurant)
return restaurant | providers/restaurants.py | # Python imports
import urllib
import logging
# AppEngine imports
from google.appengine.ext import db
from google.appengine.api import urlfetch
# Django imports
from django.utils import simplejson
# Local imports
import utils
import models
import hashlib
import settings
class LabelProvider(object):
""" Provides methods to work with saving bookmarks and invites.
Provides some convenience methods for working with label data structures.
It may be desirable to refactor this class to be static methods on the
Label data model sometime in the future.
"""
def get_by_user(self, user):
""" Get labels created by the specified user.
Currently this method only returns the last 100 labels. This could be
refactored to be more flexible and allow paging.
Args:
user: A models.User object.
Returns:
A list of models.Label objects, created by the specified user and sorted
by last modified first.
"""
query = models.Label.gql("where user=:1 order by updated desc", user)
return query.fetch(100)
def get_by_invitee(self, user):
""" Get labels where the specified user has been marked as an invitee.
Currently this method only returns the last 100 labels. This could be
refactored to be more flexible and allow paging.
Args:
user: A models.User object.
Returns:
A list of models.Label objects, where the specified user is in the
invited user list.
TODO: Sort this list by updated date.
"""
if hasattr(user, "guid"):
query = models.Label.gql("where invited_guids = :1", user.guid)
return query.fetch(100)
return None
def get_by_key(self, key):
""" Gets a label by its data store key.
Returns:
The data store entry corresponding with the specified key.
"""
return db.get(key)
def set(self, user, restaurant, text):
""" Creates a new label if one does not already exist.
If the restaurant passed in to this method has not been saved to the data
store (only cached in memory), it will be stored to enforce the reference
property on the resulting label.
Args:
user: The user who created this label.
restaurant: The restaurant which is being labeled.
text: A text description for the label.
Returns:
If a label with the same user, restaurant, and text exists, the data
store entry for that label. Otherwise, the newly created label.
"""
label = models.Label.gql("where user=:1 and restaurant=:2 and text=:3",
user, restaurant, text).get()
if label:
return label
key_name = "label|%s|%s|%s" % (user.provider_id, restaurant.restaurant_id,
hashlib.sha1(text).hexdigest())
params = {
"key_name" : key_name,
"restaurant" : restaurant,
"user" : user,
"text" : text,
}
db.put(restaurant) # May just be locally cached
return models.Label.get_or_insert(**params)
def move_user_data(self, from_user, to_user):
""" Moves all label data from one user to another user account.
Used in cases where two accounts merge, this migrates all label data
from the first account to the second account. This includes changing
the owner on any labels created by the first user, and updating any
invites which specify the first user, to the second user.
Args:
from_user: The user where data is being migrated from.
to_user: The user who data will be migrated to.
"""
query = models.Label.gql("where user=:1", from_user)
for label in query:
if label.invited_guids:
label.invited_guids.remove(to_user.guid)
label.user = to_user
label.put()
query = models.Label.gql("where invited_guids = :1", from_user.guid)
for label in query:
label.invited_guids.remove(from_user.guid)
label.invited_guids.append(to_user.guid)
label.put()
class RestaurantProvider(object):
""" Provides data about restaurants from the YQL web service.
The public interface from this class could be adopted to support other
restaurant providers. Yahoo! local search was chosen because it provides
rating data and has fairly permissive quota limits.
"""
def __init__(self):
""" Constructor.
Sets which fields will be requested from the YQL query.
"""
self.restaurant_fields = ", ".join([
"id", "Title", "Address", "City", "State", "Rating.AverageRating",
"Rating.TotalRatings", "BusinessUrl", "Categories", "Latitude",
"Longitude"])
def _cache_restaurant(self, restaurant):
""" Stores a restaurant in the memory cache.
Args:
restaurant: The object to store in the cache. The restaurant id is used
as a cache key.
"""
utils.cache_set(restaurant, "restaurant", restaurant.restaurant_id)
def _cache_get_restaurant(self, restaurant_id):
""" Gets a restaurant from the cache.
Args:
restaurant_id: The id of the restaurant to retrieve from the cache.
Returns:
The data for the restaurant with the corresponding ID, if it existed in
the cache, else None.
"""
return utils.cache_get("restaurant", restaurant_id)
def _convert_result_to_restaurant(self, result):
""" Converts a YQL search result to a models.Restaurant object.
Args:
result: The dict returned by a YQL search.
Returns:
An initialized (but not saved) models.Restaurant instance with the
appropriate properties set to the data from the search result.
"""
try:
rating_average = float(result["Rating"]["AverageRating"])
except ValueError:
rating_average = -1.0
try:
rating_count = int(result["Rating"]["TotalRatings"])
except ValueError:
rating_count = 0
location = db.GeoPt(result["Latitude"], result["Longitude"])
categories = []
if isinstance(result["Categories"]["Category"], list):
for category in result["Categories"]["Category"]:
categories.append(category["content"])
else:
categories.append(result["Categories"]["Category"]["content"])
params = {
"key_name" : "restaurant|%s" % result["id"],
"restaurant_id" : result["id"],
"name" : result["Title"],
"address" : result["Address"],
"city" : result["City"],
"state" : result["State"],
"rating_average" : rating_average,
"rating_count" : rating_count,
"url" : result["BusinessUrl"],
"location" : location,
"categories" : categories,
}
return models.Restaurant(**params)
def _yql_query(self, query):
""" Performs a query on the YQL web service interface.
Args:
query: A string query in the YQL query syntax.
Returns:
If a result was returned, a dict representing the data structure which
was passed back. None otherwise.
"""
query_hash = hashlib.sha1(query).hexdigest()
result = utils.cache_get("yql_query", query_hash)
if result is None:
logging.info("Fetching yql query: %s" % query)
query = urllib.quote_plus(query)
url = "http://query.yahooapis.com/v1/public/yql?q=%s&format=json" % query
response = simplejson.loads(urlfetch.fetch(url).content)["query"]
if response is None or int(response["count"]) == 0:
return None
# Result set is inconsistent if there is only one result
if int(response["count"]) == 1:
result = [response["results"]["Result"]]
else:
result = response["results"]["Result"]
utils.cache_set(result, "yql_query", query_hash)
return result
def _yql_restaurant_search(self, term, location):
""" Performs a Yahoo! local search, limiting results to restaurants only.
This uses the category code for "Restaurants" in the Yahoo! local search
API to limit results, otherwise searching would return all kinds of local
businesses.
The number of search results are limited to keep this code simple. A
more sophisticated implementation would allow paging.
Args:
term: The search term to use.
location: A string representing the location to search in.
Returns:
A list of models.Restaurant objects corresponding to the results of the
query, or None if no restaurants were found.
"""
query_params = {
"query" : term,
"location" : location,
"category" : "96926236", # Code for 'Restaurants'
"limit" : settings.SEARCH_RESULTS,
"offset" : 0,
"fields" : self.restaurant_fields,
}
query = " ".join([
'select %(fields)s from local.search where',
'query="%(query)s"',
'and location="%(location)s"',
'and category="%(category)s"',
'limit %(limit)s',
'offset %(offset)s',
]) % query_params
result = self._yql_query(query)
if result is None:
restaurants = None
else:
restaurants = map(self._convert_result_to_restaurant, result)
return restaurants
def _yql_restaurant_get(self, restaurant_id):
""" Gets a single restaurant's data.
This method performs a query for a single restaurant, returning the results
from the Yahoo! local search.
Args:
The ID of the restaurant to fetch.
Returns:
The models.Restaurant object with the data corresponding to the restaurant
with the given ID number, or None if no results were found.
"""
params = {
"id" : restaurant_id,
"fields" : self.restaurant_fields,
}
query = 'select %(fields)s from local.search(1) where id=%(id)s' % params
result = self._yql_query(query)
if result:
return self._convert_result_to_restaurant(result)
return None
def search(self, term, location):
""" Performs a search for restaurants in a specific location.
Results returned from this method will be models.Restaurant instances, but
not necessarily written to the data store, to prevent the store being
flooded with every search result. This way, we only store restaurants that
have been bookmarked by at least one user.
Args:
term: The search term to use.
location: A string representing the location to search in.
Returns:
A list of models.Restaurant objects corresponding to the results of the
query, or None if no restaurants were found.
"""
restaurants = self._yql_restaurant_search(term, location)
if restaurants:
for restaurant in restaurants:
self._cache_restaurant(restaurant)
return restaurants
def get_restaurant(self, restaurant_id):
""" Gets a single restaurant by ID.
Args:
The ID of the restaurant to fetch.
Returns:
The models.Restaurant object with the data corresponding to the restaurant
with the given ID number, or None if no results were found.
"""
restaurant = self._cache_get_restaurant(restaurant_id)
if restaurant is None:
restaurant = self._yql_restaurant_get(restaurant_id)
self.cache_restaurant(restaurant)
return restaurant | 0.58522 | 0.388328 |
import unittest
import pandas as pd
from etl.helpers.field_mapping import common
from etl.helpers.field_mapping.common import FieldMapping
class TestCommonMethods(unittest.TestCase):
def test_get_field_mapping_filename(self):
filename = common.get_field_mapping_filename("field_name", "random_dir/")
self.assertEqual("random_dir/field_name.csv", filename)
class TestFieldMapping(unittest.TestCase):
def test_get_field_mapping_df(self):
field_mapping = FieldMapping.from_dict(
{
"sample_input": ("sample_output", "yes"),
"sample_input2": ("sample_output2", "no"),
}
)
actual_field_mapping_df = field_mapping.get_field_mapping_df()
expected_field_mapping_df = pd.DataFrame(
data={
"Input": ["sample_input", "sample_input2"],
"Output": ["sample_output", "sample_output2"],
"Approved": ["yes", "no"],
}
)
pd.util.testing.assert_frame_equal(
expected_field_mapping_df, actual_field_mapping_df
)
def test_get_field_mapping_dict(self):
field_mapping_df = pd.DataFrame(
data={
"Input": ["sample_input", "sample_input2"],
"Output": ["sample_output", "sample_output2"],
"Approved": ["yes", "no"],
}
)
field_mapping = FieldMapping.from_dataframe(field_mapping_df)
actual_field_mapping_dict = field_mapping.get_field_mapping_dict()
expected_field_mapping_dict = {
"sample_input": ("sample_output", "yes"),
"sample_input2": ("sample_output2", "no"),
}
self.assertDictEqual(expected_field_mapping_dict, actual_field_mapping_dict)
def test_if_empty_mapping_is_empty_should_be_True(self):
field_mapping = FieldMapping.from_dict({})
self.assertTrue(field_mapping.is_empty())
def test_if_empty_mapping_is_not_empty_should_be_False(self):
field_mapping = FieldMapping.from_dict({"input": ("output", "No")})
self.assertFalse(field_mapping.is_empty())
if __name__ == "__main__":
unittest.main() | etl/helpers/field_mapping/test_common.py | import unittest
import pandas as pd
from etl.helpers.field_mapping import common
from etl.helpers.field_mapping.common import FieldMapping
class TestCommonMethods(unittest.TestCase):
def test_get_field_mapping_filename(self):
filename = common.get_field_mapping_filename("field_name", "random_dir/")
self.assertEqual("random_dir/field_name.csv", filename)
class TestFieldMapping(unittest.TestCase):
def test_get_field_mapping_df(self):
field_mapping = FieldMapping.from_dict(
{
"sample_input": ("sample_output", "yes"),
"sample_input2": ("sample_output2", "no"),
}
)
actual_field_mapping_df = field_mapping.get_field_mapping_df()
expected_field_mapping_df = pd.DataFrame(
data={
"Input": ["sample_input", "sample_input2"],
"Output": ["sample_output", "sample_output2"],
"Approved": ["yes", "no"],
}
)
pd.util.testing.assert_frame_equal(
expected_field_mapping_df, actual_field_mapping_df
)
def test_get_field_mapping_dict(self):
field_mapping_df = pd.DataFrame(
data={
"Input": ["sample_input", "sample_input2"],
"Output": ["sample_output", "sample_output2"],
"Approved": ["yes", "no"],
}
)
field_mapping = FieldMapping.from_dataframe(field_mapping_df)
actual_field_mapping_dict = field_mapping.get_field_mapping_dict()
expected_field_mapping_dict = {
"sample_input": ("sample_output", "yes"),
"sample_input2": ("sample_output2", "no"),
}
self.assertDictEqual(expected_field_mapping_dict, actual_field_mapping_dict)
def test_if_empty_mapping_is_empty_should_be_True(self):
field_mapping = FieldMapping.from_dict({})
self.assertTrue(field_mapping.is_empty())
def test_if_empty_mapping_is_not_empty_should_be_False(self):
field_mapping = FieldMapping.from_dict({"input": ("output", "No")})
self.assertFalse(field_mapping.is_empty())
if __name__ == "__main__":
unittest.main() | 0.650134 | 0.493836 |
"""Cache model."""
__all__ = ['CacheCell']
import mxnet as mx
from mxnet.gluon import HybridBlock
class CacheCell(HybridBlock):
r"""Cache language model.
We implement the neural cache language model proposed in the following work::
@article{grave2016improving,
title={Improving neural language models with a continuous cache},
author={<NAME> <NAME>},
journal={ICLR},
year={2017}
}
Parameters
----------
lm_model : gluonnlp.model.StandardRNN or gluonnlp.model.AWDRNN
The type of RNN to use. Options are 'gluonnlp.model.StandardRNN', 'gluonnlp.model.AWDRNN'.
vocab_size : int
Size of the input vocabulary.
window : int
Size of cache window
theta : float
The scala controls the flatness of the cache distribution
that predict the next word as shown below:
.. math::
p_{cache} \propto \sum_{i=1}^{t-1} \mathbb{1}_{w=x_{i+1}} exp(\theta {h_t}^T h_i)
where :math:`p_{cache}` is the cache distribution, :math:`\mathbb{1}` is
the identity function, and :math:`h_i` is the output of timestep i.
lambdas : float
Linear scalar between only cache and vocab distribution, the formulation is as below:
.. math::
p = (1 - \lambda) p_{vocab} + \lambda p_{cache}
where :math:`p_{vocab}` is the vocabulary distribution and :math:`p_{cache}`
is the cache distribution.
"""
def __init__(self, lm_model, vocab_size, window, theta, lambdas, **kwargs):
super(CacheCell, self).__init__(**kwargs)
self._vocab_size = vocab_size
self._window = window
self._theta = theta
self._lambdas = lambdas
with self.name_scope():
self.lm_model = lm_model
def save_parameters(self, filename):
"""Save parameters to file.
filename : str
Path to file.
"""
self.lm_model.save_parameters(filename)
def load_parameters(self, filename, ctx=mx.cpu()): # pylint: disable=arguments-differ
"""Load parameters from file.
filename : str
Path to parameter file.
ctx : Context or list of Context, default cpu()
Context(s) initialize loaded parameters on.
"""
self.lm_model.load_parameters(filename, ctx=ctx)
def begin_state(self, *args, **kwargs):
"""Initialize the hidden states.
"""
return self.lm_model.begin_state(*args, **kwargs)
def __call__(self, inputs, target, next_word_history, cache_history, begin_state=None):
# pylint: disable=arguments-differ
"""Defines the forward computation for cache cell. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`.
Parameters
----------
inputs: NDArray or Symbol
The input data
target: NDArray or Symbol
The label
next_word_history: NDArray or Symbol
The next word in memory
cache_history: NDArray or Symbol
The hidden state in cache history
begin_state: list of NDArray or Symbol, optional
The begin states.
Returns
--------
out: NDArray or Symbol
The linear interpolation of the cache language model
with the regular word-level language model
next_word_history: NDArray or Symbol
The next words to be kept in the memory for look up
(size is equal to the window size)
cache_history: NDArray or Symbol
The hidden states to be kept in the memory for look up
(size is equal to the window size)
"""
# XXX Temporary hack for hybridization as hybridblock does not support None inputs
begin_state = [] if begin_state is None else begin_state
return super(CacheCell, self).__call__(inputs, target, next_word_history,
cache_history, begin_state)
def hybrid_forward(self, F, inputs, target, next_word_history, cache_history, begin_state=None):
# pylint: disable=arguments-differ
"""Defines the forward computation for cache cell. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`.
Parameters
----------
inputs: NDArray or Symbol
The input data
target: NDArray or Symbol
The label
next_word_history: NDArray or Symbol
The next word in memory
cache_history: NDArray or Symbol
The hidden state in cache history
begin_state: list of NDArray or Symbol, optional
The begin states.
Returns
--------
out: NDArray or Symbol
The linear interpolation of the cache language model
with the regular word-level language model
next_word_history: NDArray or Symbol
The next words to be kept in the memory for look up
(size is equal to the window size)
cache_history: NDArray or Symbol
The hidden states to be kept in the memory for look up
(size is equal to the window size)
"""
# XXX Temporary hack for hybridization as hybridblock does not support None inputs
if isinstance(begin_state, list) and len(begin_state) == 0:
begin_state = None
output, hidden, encoder_hs, _ = super(self.lm_model.__class__, self.lm_model).\
hybrid_forward(F, inputs, begin_state)
encoder_h = encoder_hs[-1].reshape(-3, -2)
output = output.reshape(-1, self._vocab_size)
start_idx = len(next_word_history) \
if next_word_history is not None else 0
next_word_history = F.concat(*[F.one_hot(t[0], self._vocab_size, on_value=1, off_value=0)
for t in target], dim=0) if next_word_history is None \
else F.concat(next_word_history,
F.concat(*[F.one_hot(t[0], self._vocab_size, on_value=1, off_value=0)
for t in target], dim=0), dim=0)
cache_history = encoder_h if cache_history is None \
else F.concat(cache_history, encoder_h, dim=0)
out = None
softmax_output = F.softmax(output)
for idx, vocab_L in enumerate(softmax_output):
joint_p = vocab_L
if start_idx + idx > self._window:
valid_next_word = next_word_history[start_idx + idx - self._window:start_idx + idx]
valid_cache_history = cache_history[start_idx + idx - self._window:start_idx + idx]
logits = F.dot(valid_cache_history, encoder_h[idx])
cache_attn = F.softmax(self._theta * logits).reshape(-1, 1)
cache_dist = (cache_attn.broadcast_to(valid_next_word.shape)
* valid_next_word).sum(axis=0)
joint_p = self._lambdas * cache_dist + (1 - self._lambdas) * vocab_L
out = joint_p[target[idx]] if out is None \
else F.concat(out, joint_p[target[idx]], dim=0)
next_word_history = next_word_history[-self._window:]
cache_history = cache_history[-self._window:]
return out, next_word_history, cache_history, hidden | src/gluonnlp/model/train/cache.py | """Cache model."""
__all__ = ['CacheCell']
import mxnet as mx
from mxnet.gluon import HybridBlock
class CacheCell(HybridBlock):
r"""Cache language model.
We implement the neural cache language model proposed in the following work::
@article{grave2016improving,
title={Improving neural language models with a continuous cache},
author={<NAME> <NAME>},
journal={ICLR},
year={2017}
}
Parameters
----------
lm_model : gluonnlp.model.StandardRNN or gluonnlp.model.AWDRNN
The type of RNN to use. Options are 'gluonnlp.model.StandardRNN', 'gluonnlp.model.AWDRNN'.
vocab_size : int
Size of the input vocabulary.
window : int
Size of cache window
theta : float
The scala controls the flatness of the cache distribution
that predict the next word as shown below:
.. math::
p_{cache} \propto \sum_{i=1}^{t-1} \mathbb{1}_{w=x_{i+1}} exp(\theta {h_t}^T h_i)
where :math:`p_{cache}` is the cache distribution, :math:`\mathbb{1}` is
the identity function, and :math:`h_i` is the output of timestep i.
lambdas : float
Linear scalar between only cache and vocab distribution, the formulation is as below:
.. math::
p = (1 - \lambda) p_{vocab} + \lambda p_{cache}
where :math:`p_{vocab}` is the vocabulary distribution and :math:`p_{cache}`
is the cache distribution.
"""
def __init__(self, lm_model, vocab_size, window, theta, lambdas, **kwargs):
super(CacheCell, self).__init__(**kwargs)
self._vocab_size = vocab_size
self._window = window
self._theta = theta
self._lambdas = lambdas
with self.name_scope():
self.lm_model = lm_model
def save_parameters(self, filename):
"""Save parameters to file.
filename : str
Path to file.
"""
self.lm_model.save_parameters(filename)
def load_parameters(self, filename, ctx=mx.cpu()): # pylint: disable=arguments-differ
"""Load parameters from file.
filename : str
Path to parameter file.
ctx : Context or list of Context, default cpu()
Context(s) initialize loaded parameters on.
"""
self.lm_model.load_parameters(filename, ctx=ctx)
def begin_state(self, *args, **kwargs):
"""Initialize the hidden states.
"""
return self.lm_model.begin_state(*args, **kwargs)
def __call__(self, inputs, target, next_word_history, cache_history, begin_state=None):
# pylint: disable=arguments-differ
"""Defines the forward computation for cache cell. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`.
Parameters
----------
inputs: NDArray or Symbol
The input data
target: NDArray or Symbol
The label
next_word_history: NDArray or Symbol
The next word in memory
cache_history: NDArray or Symbol
The hidden state in cache history
begin_state: list of NDArray or Symbol, optional
The begin states.
Returns
--------
out: NDArray or Symbol
The linear interpolation of the cache language model
with the regular word-level language model
next_word_history: NDArray or Symbol
The next words to be kept in the memory for look up
(size is equal to the window size)
cache_history: NDArray or Symbol
The hidden states to be kept in the memory for look up
(size is equal to the window size)
"""
# XXX Temporary hack for hybridization as hybridblock does not support None inputs
begin_state = [] if begin_state is None else begin_state
return super(CacheCell, self).__call__(inputs, target, next_word_history,
cache_history, begin_state)
def hybrid_forward(self, F, inputs, target, next_word_history, cache_history, begin_state=None):
# pylint: disable=arguments-differ
"""Defines the forward computation for cache cell. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`.
Parameters
----------
inputs: NDArray or Symbol
The input data
target: NDArray or Symbol
The label
next_word_history: NDArray or Symbol
The next word in memory
cache_history: NDArray or Symbol
The hidden state in cache history
begin_state: list of NDArray or Symbol, optional
The begin states.
Returns
--------
out: NDArray or Symbol
The linear interpolation of the cache language model
with the regular word-level language model
next_word_history: NDArray or Symbol
The next words to be kept in the memory for look up
(size is equal to the window size)
cache_history: NDArray or Symbol
The hidden states to be kept in the memory for look up
(size is equal to the window size)
"""
# XXX Temporary hack for hybridization as hybridblock does not support None inputs
if isinstance(begin_state, list) and len(begin_state) == 0:
begin_state = None
output, hidden, encoder_hs, _ = super(self.lm_model.__class__, self.lm_model).\
hybrid_forward(F, inputs, begin_state)
encoder_h = encoder_hs[-1].reshape(-3, -2)
output = output.reshape(-1, self._vocab_size)
start_idx = len(next_word_history) \
if next_word_history is not None else 0
next_word_history = F.concat(*[F.one_hot(t[0], self._vocab_size, on_value=1, off_value=0)
for t in target], dim=0) if next_word_history is None \
else F.concat(next_word_history,
F.concat(*[F.one_hot(t[0], self._vocab_size, on_value=1, off_value=0)
for t in target], dim=0), dim=0)
cache_history = encoder_h if cache_history is None \
else F.concat(cache_history, encoder_h, dim=0)
out = None
softmax_output = F.softmax(output)
for idx, vocab_L in enumerate(softmax_output):
joint_p = vocab_L
if start_idx + idx > self._window:
valid_next_word = next_word_history[start_idx + idx - self._window:start_idx + idx]
valid_cache_history = cache_history[start_idx + idx - self._window:start_idx + idx]
logits = F.dot(valid_cache_history, encoder_h[idx])
cache_attn = F.softmax(self._theta * logits).reshape(-1, 1)
cache_dist = (cache_attn.broadcast_to(valid_next_word.shape)
* valid_next_word).sum(axis=0)
joint_p = self._lambdas * cache_dist + (1 - self._lambdas) * vocab_L
out = joint_p[target[idx]] if out is None \
else F.concat(out, joint_p[target[idx]], dim=0)
next_word_history = next_word_history[-self._window:]
cache_history = cache_history[-self._window:]
return out, next_word_history, cache_history, hidden | 0.92984 | 0.554048 |
import time
import warnings
from typing import Any, Dict, List, Optional, Sequence, Union
from googleapiclient.discovery import build
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class DatastoreHook(GoogleBaseHook):
"""
Interact with Google Cloud Datastore. This hook uses the Google Cloud Platform connection.
This object is not threads safe. If you want to make multiple requests
simultaneously, you will need to create a hook per thread.
:param api_version: The version of the API it is going to connect to.
:type api_version: str
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
api_version: str = 'v1',
datastore_conn_id: Optional[str] = None
) -> None:
if datastore_conn_id:
warnings.warn(
"The datastore_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=2)
gcp_conn_id = datastore_conn_id
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self.connection = None
self.api_version = api_version
def get_conn(self) -> Any:
"""
Establishes a connection to the Google API.
:return: a Google Cloud Datastore service object.
:rtype: Resource
"""
if not self.connection:
http_authorized = self._authorize()
self.connection = build('datastore', self.api_version, http=http_authorized,
cache_discovery=False)
return self.connection
@GoogleBaseHook.fallback_to_default_project_id
def allocate_ids(self, partial_keys: List, project_id: str) -> List:
"""
Allocate IDs for incomplete keys.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/allocateIds
:param partial_keys: a list of partial keys.
:type partial_keys: list
:param project_id: Google Cloud Platform project ID against which to make the request.
:type project_id: str
:return: a list of full keys.
:rtype: list
"""
conn = self.get_conn() # type: Any
resp = (conn # pylint: disable=no-member
.projects()
.allocateIds(projectId=project_id, body={'keys': partial_keys})
.execute(num_retries=self.num_retries))
return resp['keys']
@GoogleBaseHook.fallback_to_default_project_id
def begin_transaction(self, project_id: str, transaction_options: Dict[str, Any]) -> str:
"""
Begins a new transaction.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/beginTransaction
:param project_id: Google Cloud Platform project ID against which to make the request.
:type project_id: str
:param transaction_options: Options for a new transaction.
:type transaction_options: Dict[str, Any]
:return: a transaction handle.
:rtype: str
"""
conn = self.get_conn() # type: Any
resp = (conn # pylint: disable=no-member
.projects()
.beginTransaction(projectId=project_id, body={"transactionOptions": transaction_options})
.execute(num_retries=self.num_retries))
return resp['transaction']
@GoogleBaseHook.fallback_to_default_project_id
def commit(self, body: Dict, project_id: str) -> Dict:
"""
Commit a transaction, optionally creating, deleting or modifying some entities.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/commit
:param body: the body of the commit request.
:type body: dict
:param project_id: Google Cloud Platform project ID against which to make the request.
:type project_id: str
:return: the response body of the commit request.
:rtype: dict
"""
conn = self.get_conn() # type: Any
resp = (conn # pylint: disable=no-member
.projects()
.commit(projectId=project_id, body=body)
.execute(num_retries=self.num_retries))
return resp
@GoogleBaseHook.fallback_to_default_project_id
def lookup(
self,
keys: List,
project_id: str,
read_consistency: Optional[str] = None,
transaction: Optional[str] = None,
) -> Dict:
"""
Lookup some entities by key.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/lookup
:param keys: the keys to lookup.
:type keys: list
:param read_consistency: the read consistency to use. default, strong or eventual.
Cannot be used with a transaction.
:type read_consistency: str
:param transaction: the transaction to use, if any.
:type transaction: str
:param project_id: Google Cloud Platform project ID against which to make the request.
:type project_id: str
:return: the response body of the lookup request.
:rtype: dict
"""
conn = self.get_conn() # type: Any
body = {'keys': keys} # type: Dict[str, Any]
if read_consistency:
body['readConsistency'] = read_consistency
if transaction:
body['transaction'] = transaction
resp = (conn # pylint: disable=no-member
.projects()
.lookup(projectId=project_id, body=body)
.execute(num_retries=self.num_retries))
return resp
@GoogleBaseHook.fallback_to_default_project_id
def rollback(self, transaction: str, project_id: str) -> Any:
"""
Roll back a transaction.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/rollback
:param transaction: the transaction to roll back.
:type transaction: str
:param project_id: Google Cloud Platform project ID against which to make the request.
:type project_id: str
"""
conn = self.get_conn() # type: Any
conn.projects().rollback( # pylint: disable=no-member
projectId=project_id, body={'transaction': transaction}
).execute(num_retries=self.num_retries)
@GoogleBaseHook.fallback_to_default_project_id
def run_query(self, body: Dict, project_id: str) -> Dict:
"""
Run a query for entities.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/runQuery
:param body: the body of the query request.
:type body: dict
:param project_id: Google Cloud Platform project ID against which to make the request.
:type project_id: str
:return: the batch of query results.
:rtype: dict
"""
conn = self.get_conn() # type: Any
resp = (conn # pylint: disable=no-member
.projects()
.runQuery(projectId=project_id, body=body)
.execute(num_retries=self.num_retries))
return resp['batch']
def get_operation(self, name: str) -> Dict:
"""
Gets the latest state of a long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/get
:param name: the name of the operation resource.
:type name: str
:return: a resource operation instance.
:rtype: dict
"""
conn = self.get_conn() # type: Any
resp = (conn # pylint: disable=no-member
.projects()
.operations()
.get(name=name)
.execute(num_retries=self.num_retries))
return resp
def delete_operation(self, name: str) -> Dict:
"""
Deletes the long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete
:param name: the name of the operation resource.
:type name: str
:return: none if successful.
:rtype: dict
"""
conn = self.get_conn() # type: Any
resp = (conn # pylint: disable=no-member
.projects()
.operations()
.delete(name=name)
.execute(num_retries=self.num_retries))
return resp
def poll_operation_until_done(self, name: str, polling_interval_in_seconds: int) -> Dict:
"""
Poll backup operation state until it's completed.
:param name: the name of the operation resource
:type name: str
:param polling_interval_in_seconds: The number of seconds to wait before calling another request.
:type polling_interval_in_seconds: int
:return: a resource operation instance.
:rtype: dict
"""
while True:
result = self.get_operation(name) # type: Dict
state = result['metadata']['common']['state'] # type: str
if state == 'PROCESSING':
self.log.info(
'Operation is processing. Re-polling state in %s seconds', polling_interval_in_seconds
)
time.sleep(polling_interval_in_seconds)
else:
return result
@GoogleBaseHook.fallback_to_default_project_id
def export_to_storage_bucket(
self,
bucket: str,
project_id: str,
namespace: Optional[str] = None,
entity_filter: Optional[Dict] = None,
labels: Optional[Dict[str, str]] = None,
) -> Dict:
"""
Export entities from Cloud Datastore to Cloud Storage for backup.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/export
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: Description of what data from the project is included in the export.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:param project_id: Google Cloud Platform project ID against which to make the request.
:type project_id: str
:return: a resource operation instance.
:rtype: dict
"""
admin_conn = self.get_conn() # type: Any
output_uri_prefix = 'gs://' + '/'.join(filter(None, [bucket, namespace])) # type: str
if not entity_filter:
entity_filter = {}
if not labels:
labels = {}
body = {
'outputUrlPrefix': output_uri_prefix,
'entityFilter': entity_filter,
'labels': labels,
} # type: Dict
resp = (admin_conn # pylint: disable=no-member
.projects()
.export(projectId=project_id, body=body)
.execute(num_retries=self.num_retries))
return resp
@GoogleBaseHook.fallback_to_default_project_id
def import_from_storage_bucket(
self,
bucket: str,
file: str,
project_id: str,
namespace: Optional[str] = None,
entity_filter: Optional[Dict] = None,
labels: Optional[Union[Dict, str]] = None,
) -> Dict:
"""
Import a backup from Cloud Storage to Cloud Datastore.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/import
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param file: the metadata file written by the projects.export operation.
:type file: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: specify which kinds/namespaces are to be imported.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:param project_id: Google Cloud Platform project ID against which to make the request.
:type project_id: str
:return: a resource operation instance.
:rtype: dict
"""
admin_conn = self.get_conn() # type: Any
input_url = 'gs://' + '/'.join(filter(None, [bucket, namespace, file])) # type: str
if not entity_filter:
entity_filter = {}
if not labels:
labels = {}
body = {
'inputUrl': input_url,
'entityFilter': entity_filter,
'labels': labels,
} # type: Dict
resp = (admin_conn # pylint: disable=no-member
.projects()
.import_(projectId=project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | airflow/providers/google/cloud/hooks/datastore.py | import time
import warnings
from typing import Any, Dict, List, Optional, Sequence, Union
from googleapiclient.discovery import build
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class DatastoreHook(GoogleBaseHook):
"""
Interact with Google Cloud Datastore. This hook uses the Google Cloud Platform connection.
This object is not threads safe. If you want to make multiple requests
simultaneously, you will need to create a hook per thread.
:param api_version: The version of the API it is going to connect to.
:type api_version: str
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
api_version: str = 'v1',
datastore_conn_id: Optional[str] = None
) -> None:
if datastore_conn_id:
warnings.warn(
"The datastore_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=2)
gcp_conn_id = datastore_conn_id
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self.connection = None
self.api_version = api_version
def get_conn(self) -> Any:
"""
Establishes a connection to the Google API.
:return: a Google Cloud Datastore service object.
:rtype: Resource
"""
if not self.connection:
http_authorized = self._authorize()
self.connection = build('datastore', self.api_version, http=http_authorized,
cache_discovery=False)
return self.connection
@GoogleBaseHook.fallback_to_default_project_id
def allocate_ids(self, partial_keys: List, project_id: str) -> List:
"""
Allocate IDs for incomplete keys.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/allocateIds
:param partial_keys: a list of partial keys.
:type partial_keys: list
:param project_id: Google Cloud Platform project ID against which to make the request.
:type project_id: str
:return: a list of full keys.
:rtype: list
"""
conn = self.get_conn() # type: Any
resp = (conn # pylint: disable=no-member
.projects()
.allocateIds(projectId=project_id, body={'keys': partial_keys})
.execute(num_retries=self.num_retries))
return resp['keys']
@GoogleBaseHook.fallback_to_default_project_id
def begin_transaction(self, project_id: str, transaction_options: Dict[str, Any]) -> str:
"""
Begins a new transaction.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/beginTransaction
:param project_id: Google Cloud Platform project ID against which to make the request.
:type project_id: str
:param transaction_options: Options for a new transaction.
:type transaction_options: Dict[str, Any]
:return: a transaction handle.
:rtype: str
"""
conn = self.get_conn() # type: Any
resp = (conn # pylint: disable=no-member
.projects()
.beginTransaction(projectId=project_id, body={"transactionOptions": transaction_options})
.execute(num_retries=self.num_retries))
return resp['transaction']
@GoogleBaseHook.fallback_to_default_project_id
def commit(self, body: Dict, project_id: str) -> Dict:
"""
Commit a transaction, optionally creating, deleting or modifying some entities.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/commit
:param body: the body of the commit request.
:type body: dict
:param project_id: Google Cloud Platform project ID against which to make the request.
:type project_id: str
:return: the response body of the commit request.
:rtype: dict
"""
conn = self.get_conn() # type: Any
resp = (conn # pylint: disable=no-member
.projects()
.commit(projectId=project_id, body=body)
.execute(num_retries=self.num_retries))
return resp
@GoogleBaseHook.fallback_to_default_project_id
def lookup(
self,
keys: List,
project_id: str,
read_consistency: Optional[str] = None,
transaction: Optional[str] = None,
) -> Dict:
"""
Lookup some entities by key.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/lookup
:param keys: the keys to lookup.
:type keys: list
:param read_consistency: the read consistency to use. default, strong or eventual.
Cannot be used with a transaction.
:type read_consistency: str
:param transaction: the transaction to use, if any.
:type transaction: str
:param project_id: Google Cloud Platform project ID against which to make the request.
:type project_id: str
:return: the response body of the lookup request.
:rtype: dict
"""
conn = self.get_conn() # type: Any
body = {'keys': keys} # type: Dict[str, Any]
if read_consistency:
body['readConsistency'] = read_consistency
if transaction:
body['transaction'] = transaction
resp = (conn # pylint: disable=no-member
.projects()
.lookup(projectId=project_id, body=body)
.execute(num_retries=self.num_retries))
return resp
@GoogleBaseHook.fallback_to_default_project_id
def rollback(self, transaction: str, project_id: str) -> Any:
"""
Roll back a transaction.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/rollback
:param transaction: the transaction to roll back.
:type transaction: str
:param project_id: Google Cloud Platform project ID against which to make the request.
:type project_id: str
"""
conn = self.get_conn() # type: Any
conn.projects().rollback( # pylint: disable=no-member
projectId=project_id, body={'transaction': transaction}
).execute(num_retries=self.num_retries)
@GoogleBaseHook.fallback_to_default_project_id
def run_query(self, body: Dict, project_id: str) -> Dict:
"""
Run a query for entities.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/runQuery
:param body: the body of the query request.
:type body: dict
:param project_id: Google Cloud Platform project ID against which to make the request.
:type project_id: str
:return: the batch of query results.
:rtype: dict
"""
conn = self.get_conn() # type: Any
resp = (conn # pylint: disable=no-member
.projects()
.runQuery(projectId=project_id, body=body)
.execute(num_retries=self.num_retries))
return resp['batch']
def get_operation(self, name: str) -> Dict:
"""
Gets the latest state of a long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/get
:param name: the name of the operation resource.
:type name: str
:return: a resource operation instance.
:rtype: dict
"""
conn = self.get_conn() # type: Any
resp = (conn # pylint: disable=no-member
.projects()
.operations()
.get(name=name)
.execute(num_retries=self.num_retries))
return resp
def delete_operation(self, name: str) -> Dict:
"""
Deletes the long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete
:param name: the name of the operation resource.
:type name: str
:return: none if successful.
:rtype: dict
"""
conn = self.get_conn() # type: Any
resp = (conn # pylint: disable=no-member
.projects()
.operations()
.delete(name=name)
.execute(num_retries=self.num_retries))
return resp
def poll_operation_until_done(self, name: str, polling_interval_in_seconds: int) -> Dict:
"""
Poll backup operation state until it's completed.
:param name: the name of the operation resource
:type name: str
:param polling_interval_in_seconds: The number of seconds to wait before calling another request.
:type polling_interval_in_seconds: int
:return: a resource operation instance.
:rtype: dict
"""
while True:
result = self.get_operation(name) # type: Dict
state = result['metadata']['common']['state'] # type: str
if state == 'PROCESSING':
self.log.info(
'Operation is processing. Re-polling state in %s seconds', polling_interval_in_seconds
)
time.sleep(polling_interval_in_seconds)
else:
return result
@GoogleBaseHook.fallback_to_default_project_id
def export_to_storage_bucket(
self,
bucket: str,
project_id: str,
namespace: Optional[str] = None,
entity_filter: Optional[Dict] = None,
labels: Optional[Dict[str, str]] = None,
) -> Dict:
"""
Export entities from Cloud Datastore to Cloud Storage for backup.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/export
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: Description of what data from the project is included in the export.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:param project_id: Google Cloud Platform project ID against which to make the request.
:type project_id: str
:return: a resource operation instance.
:rtype: dict
"""
admin_conn = self.get_conn() # type: Any
output_uri_prefix = 'gs://' + '/'.join(filter(None, [bucket, namespace])) # type: str
if not entity_filter:
entity_filter = {}
if not labels:
labels = {}
body = {
'outputUrlPrefix': output_uri_prefix,
'entityFilter': entity_filter,
'labels': labels,
} # type: Dict
resp = (admin_conn # pylint: disable=no-member
.projects()
.export(projectId=project_id, body=body)
.execute(num_retries=self.num_retries))
return resp
@GoogleBaseHook.fallback_to_default_project_id
def import_from_storage_bucket(
self,
bucket: str,
file: str,
project_id: str,
namespace: Optional[str] = None,
entity_filter: Optional[Dict] = None,
labels: Optional[Union[Dict, str]] = None,
) -> Dict:
"""
Import a backup from Cloud Storage to Cloud Datastore.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/import
:param bucket: The name of the Cloud Storage bucket.
:type bucket: str
:param file: the metadata file written by the projects.export operation.
:type file: str
:param namespace: The Cloud Storage namespace path.
:type namespace: str
:param entity_filter: specify which kinds/namespaces are to be imported.
:type entity_filter: dict
:param labels: Client-assigned labels.
:type labels: dict of str
:param project_id: Google Cloud Platform project ID against which to make the request.
:type project_id: str
:return: a resource operation instance.
:rtype: dict
"""
admin_conn = self.get_conn() # type: Any
input_url = 'gs://' + '/'.join(filter(None, [bucket, namespace, file])) # type: str
if not entity_filter:
entity_filter = {}
if not labels:
labels = {}
body = {
'inputUrl': input_url,
'entityFilter': entity_filter,
'labels': labels,
} # type: Dict
resp = (admin_conn # pylint: disable=no-member
.projects()
.import_(projectId=project_id, body=body)
.execute(num_retries=self.num_retries))
return resp | 0.874734 | 0.251073 |
import os
import re
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
class TedliumReleaseConfig(tfds.core.BuilderConfig):
"""BuilderConfig for a release of the TED-LIUM dataset."""
def __init__(self, *, url, download_url, split_paths, citation, **kwargs):
super(TedliumReleaseConfig,
self).__init__(version=tfds.core.Version("1.0.1"), **kwargs)
self.url = url
self.download_url = download_url
# List of split, path pairs containing the relative path within the
# extracted tarball to the data for each split.
self.split_paths = split_paths
self.citation = citation
def _make_builder_configs():
"""Creates builder configs for all supported Tedlium dataset releases."""
release1 = TedliumReleaseConfig(
name="release1",
description="""\
The TED-LIUM corpus is English-language TED talks, with transcriptions,
sampled at 16kHz. It contains about 118 hours of speech.
This is the TED-LIUM corpus release 1,
licensed under Creative Commons BY-NC-ND 3.0
(http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en).
""",
citation="""\
@inproceedings{rousseau2012tedlium,
title={TED-LIUM: an Automatic Speech Recognition dedicated corpus},
author={<NAME> Del{\\'e}<NAME> and Est{\\`e}ve, Yannick},
booktitle={Conference on Language Resources and Evaluation (LREC)},
pages={125--129},
year={2012}
}
""",
url="https://www.openslr.org/7/",
download_url="http://www.openslr.org/resources/7/TEDLIUM_release1.tar.gz",
split_paths=[(tfds.Split.TRAIN, os.path.join("TEDLIUM_release1",
"train")),
(tfds.Split.VALIDATION,
os.path.join("TEDLIUM_release1", "dev")),
(tfds.Split.TEST, os.path.join("TEDLIUM_release1", "test"))])
release2 = TedliumReleaseConfig(
name="release2",
description="""\
This is the TED-LIUM corpus release 2,
licensed under Creative Commons BY-NC-ND 3.0
(http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en).
All talks and text are property of TED Conferences LLC.
The TED-LIUM corpus was made from audio talks and their transcriptions
available on the TED website. We have prepared and filtered these data
in order to train acoustic models to participate to the International
Workshop on Spoken Language Translation 2011 (the LIUM English/French
SLT system reached the first rank in the SLT task).
Contains 1495 talks and transcripts.
""",
citation="""\
@inproceedings{rousseau2014tedlium2,
title={Enhancing the {TED-LIUM} Corpus with Selected Data for Language Modeling and More {TED} Talks},
author={<NAME> and Del{\\'e}<NAME> and Est{\\`e}<NAME>},
booktitle={Conference on Language Resources and Evaluation (LREC)},
year={2014}
}
""",
url="https://www.openslr.org/19/",
download_url="http://www.openslr.org/resources/19/TEDLIUM_release2.tar.gz",
split_paths=[(tfds.Split.TRAIN, os.path.join("TEDLIUM_release2",
"train")),
(tfds.Split.VALIDATION,
os.path.join("TEDLIUM_release2", "dev")),
(tfds.Split.TEST, os.path.join("TEDLIUM_release2", "test"))])
release3 = TedliumReleaseConfig(
name="release3",
description="""\
This is the TED-LIUM corpus release 3, licensed under Creative Commons
BY-NC-ND 3.0.
All talks and text are property of TED Conferences LLC.
This new TED-LIUM release was made through a collaboration between the
Ubiqus company and the LIUM (University of Le Mans, France)
Contents:
- 2351 audio talks in NIST sphere format (SPH), including talks from
TED-LIUM 2: be careful, same talks but not same audio files (only
these audio file must be used with the TED-LIUM 3 STM files)
- 452 hours of audio
- 2351 aligned automatic transcripts in STM format
- TEDLIUM 2 dev and test data: 19 TED talks in SPH format with
corresponding manual transcriptions (cf. 'legacy' distribution below).
- Dictionary with pronunciations (159848 entries), same file as the one
included in TED-LIUM 2
- Selected monolingual data for language modeling from WMT12 publicly
available corpora: these files come from the TED-LIUM 2 release, but
have been modified to get a tokenization more relevant for English
language
Two corpus distributions:
- the legacy one, on which the dev and test datasets are the same as in
TED-LIUM 2 (and TED-LIUM 1).
- the 'speaker adaptation' one, especially designed for experiments on
speaker adaptation.
""",
citation="""\
@inproceedings{hernandez2018tedlium3,
title={TED-LIUM 3: twice as much data and corpus repartition for experiments on speaker adaptation},
author={<NAME>{\\c{c}}ois and Nguyen, Vincent and Ghannay, Sahar and Tomashenko, Natalia and Est{\\`e}<NAME>},
booktitle={International Conference on Speech and Computer},
pages={198--208},
year={2018},
organization={Springer}
}
""",
url="https://www.openslr.org/51/",
download_url="http://www.openslr.org/resources/51/TEDLIUM_release-3.tgz",
split_paths=[
(tfds.Split.VALIDATION,
os.path.join("TEDLIUM_release-3", "legacy", "dev")),
(tfds.Split.TEST, os.path.join("TEDLIUM_release-3", "legacy",
"test")),
# The legacy/train directory contains symlinks to "data",
# which are skipped by extraction (see above).
# Work around this by manually dereferencing the links here.
(tfds.Split.TRAIN, os.path.join("TEDLIUM_release-3", "data"))
])
return [release1, release2, release3]
class Tedlium(tfds.core.BeamBasedBuilder):
"""TED-LIUM speech recognition dataset."""
BUILDER_CONFIGS = _make_builder_configs()
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=self.builder_config.description,
features=tfds.features.FeaturesDict({
"speech":
tfds.features.Audio(sample_rate=16000),
"text":
tfds.features.Text(),
"speaker_id":
tf.string,
"gender":
tfds.features.ClassLabel(names=["unknown", "female", "male"]),
"id":
tf.string,
}),
supervised_keys=("speech", "text"),
homepage=self.builder_config.url,
citation=self.builder_config.citation,
metadata=tfds.core.MetadataDict(sample_rate=16000,),
)
def _split_generators(self, dl_manager):
extracted_dir = dl_manager.download_and_extract(
self.builder_config.download_url)
splits = []
for split, path in self.builder_config.split_paths:
kwargs = {"directory": os.path.join(extracted_dir, path)}
splits.append(tfds.core.SplitGenerator(name=split, gen_kwargs=kwargs))
return splits
def _build_pcollection(self, pipeline, directory):
beam = tfds.core.lazy_imports.apache_beam
stm_files = tf.io.gfile.glob(os.path.join(directory, "stm", "*stm"))
return (pipeline
| beam.Create(stm_files)
| beam.FlatMap(_generate_examples_from_stm_file))
def _generate_examples_from_stm_file(stm_path):
"""Generate examples from a TED-LIUM stm file."""
stm_dir = os.path.dirname(stm_path)
sph_dir = os.path.join(os.path.dirname(stm_dir), "sph")
with tf.io.gfile.GFile(stm_path) as f:
for line in f:
line = line.strip()
fn, channel, speaker, start, end, label, transcript = line.split(" ", 6)
transcript = _maybe_trim_suffix(transcript)
audio_file = "%s.sph" % fn
samples = _extract_audio_segment(
os.path.join(sph_dir, audio_file), int(channel), float(start),
float(end))
key = "-".join([speaker, start, end, label])
example = {
"speech": samples,
"text": transcript,
"speaker_id": speaker,
"gender": _parse_gender(label),
"id": key,
}
yield key, example
def _maybe_trim_suffix(transcript):
# stm files for the TEDLIUM release 1 train split contain a key (enclosed in
# parens) at the end.
splits = transcript.rsplit(" ", 1)
transcript = splits[0]
if len(splits) > 1:
suffix = splits[-1]
if not suffix.startswith("("):
transcript += " " + suffix
return transcript
def _parse_gender(label_str):
"""Parse gender string from STM "<label>" field."""
gender = re.split(",|_", label_str)[-1][:-1]
# Fix inconsistencies in the data.
if not gender:
gender = -1 # Missing label.
elif gender == "<NA": # In TEDLIUM release 3 training data.
gender = -1 # Missing label.
elif gender == "F":
gender = "female"
elif gender == "M":
gender = "male"
return gender
def _extract_audio_segment(sph_path, channel, start_sec, end_sec):
"""Extracts segment of audio samples (as an ndarray) from the given path."""
with tf.io.gfile.GFile(sph_path, "rb") as f:
segment = tfds.core.lazy_imports.pydub.AudioSegment.from_file(
f, format="nistsphere")
# The dataset only contains mono audio.
assert segment.channels == 1
assert channel == 1
start_ms = int(start_sec * 1000)
end_ms = int(end_sec * 1000)
segment = segment[start_ms:end_ms]
samples = np.array(segment.get_array_of_samples())
return samples | tensorflow_datasets/audio/tedlium.py | import os
import re
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
class TedliumReleaseConfig(tfds.core.BuilderConfig):
"""BuilderConfig for a release of the TED-LIUM dataset."""
def __init__(self, *, url, download_url, split_paths, citation, **kwargs):
super(TedliumReleaseConfig,
self).__init__(version=tfds.core.Version("1.0.1"), **kwargs)
self.url = url
self.download_url = download_url
# List of split, path pairs containing the relative path within the
# extracted tarball to the data for each split.
self.split_paths = split_paths
self.citation = citation
def _make_builder_configs():
"""Creates builder configs for all supported Tedlium dataset releases."""
release1 = TedliumReleaseConfig(
name="release1",
description="""\
The TED-LIUM corpus is English-language TED talks, with transcriptions,
sampled at 16kHz. It contains about 118 hours of speech.
This is the TED-LIUM corpus release 1,
licensed under Creative Commons BY-NC-ND 3.0
(http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en).
""",
citation="""\
@inproceedings{rousseau2012tedlium,
title={TED-LIUM: an Automatic Speech Recognition dedicated corpus},
author={<NAME> Del{\\'e}<NAME> and Est{\\`e}ve, Yannick},
booktitle={Conference on Language Resources and Evaluation (LREC)},
pages={125--129},
year={2012}
}
""",
url="https://www.openslr.org/7/",
download_url="http://www.openslr.org/resources/7/TEDLIUM_release1.tar.gz",
split_paths=[(tfds.Split.TRAIN, os.path.join("TEDLIUM_release1",
"train")),
(tfds.Split.VALIDATION,
os.path.join("TEDLIUM_release1", "dev")),
(tfds.Split.TEST, os.path.join("TEDLIUM_release1", "test"))])
release2 = TedliumReleaseConfig(
name="release2",
description="""\
This is the TED-LIUM corpus release 2,
licensed under Creative Commons BY-NC-ND 3.0
(http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en).
All talks and text are property of TED Conferences LLC.
The TED-LIUM corpus was made from audio talks and their transcriptions
available on the TED website. We have prepared and filtered these data
in order to train acoustic models to participate to the International
Workshop on Spoken Language Translation 2011 (the LIUM English/French
SLT system reached the first rank in the SLT task).
Contains 1495 talks and transcripts.
""",
citation="""\
@inproceedings{rousseau2014tedlium2,
title={Enhancing the {TED-LIUM} Corpus with Selected Data for Language Modeling and More {TED} Talks},
author={<NAME> and Del{\\'e}<NAME> and Est{\\`e}<NAME>},
booktitle={Conference on Language Resources and Evaluation (LREC)},
year={2014}
}
""",
url="https://www.openslr.org/19/",
download_url="http://www.openslr.org/resources/19/TEDLIUM_release2.tar.gz",
split_paths=[(tfds.Split.TRAIN, os.path.join("TEDLIUM_release2",
"train")),
(tfds.Split.VALIDATION,
os.path.join("TEDLIUM_release2", "dev")),
(tfds.Split.TEST, os.path.join("TEDLIUM_release2", "test"))])
release3 = TedliumReleaseConfig(
name="release3",
description="""\
This is the TED-LIUM corpus release 3, licensed under Creative Commons
BY-NC-ND 3.0.
All talks and text are property of TED Conferences LLC.
This new TED-LIUM release was made through a collaboration between the
Ubiqus company and the LIUM (University of Le Mans, France)
Contents:
- 2351 audio talks in NIST sphere format (SPH), including talks from
TED-LIUM 2: be careful, same talks but not same audio files (only
these audio file must be used with the TED-LIUM 3 STM files)
- 452 hours of audio
- 2351 aligned automatic transcripts in STM format
- TEDLIUM 2 dev and test data: 19 TED talks in SPH format with
corresponding manual transcriptions (cf. 'legacy' distribution below).
- Dictionary with pronunciations (159848 entries), same file as the one
included in TED-LIUM 2
- Selected monolingual data for language modeling from WMT12 publicly
available corpora: these files come from the TED-LIUM 2 release, but
have been modified to get a tokenization more relevant for English
language
Two corpus distributions:
- the legacy one, on which the dev and test datasets are the same as in
TED-LIUM 2 (and TED-LIUM 1).
- the 'speaker adaptation' one, especially designed for experiments on
speaker adaptation.
""",
citation="""\
@inproceedings{hernandez2018tedlium3,
title={TED-LIUM 3: twice as much data and corpus repartition for experiments on speaker adaptation},
author={<NAME>{\\c{c}}ois and Nguyen, Vincent and Ghannay, Sahar and Tomashenko, Natalia and Est{\\`e}<NAME>},
booktitle={International Conference on Speech and Computer},
pages={198--208},
year={2018},
organization={Springer}
}
""",
url="https://www.openslr.org/51/",
download_url="http://www.openslr.org/resources/51/TEDLIUM_release-3.tgz",
split_paths=[
(tfds.Split.VALIDATION,
os.path.join("TEDLIUM_release-3", "legacy", "dev")),
(tfds.Split.TEST, os.path.join("TEDLIUM_release-3", "legacy",
"test")),
# The legacy/train directory contains symlinks to "data",
# which are skipped by extraction (see above).
# Work around this by manually dereferencing the links here.
(tfds.Split.TRAIN, os.path.join("TEDLIUM_release-3", "data"))
])
return [release1, release2, release3]
class Tedlium(tfds.core.BeamBasedBuilder):
"""TED-LIUM speech recognition dataset."""
BUILDER_CONFIGS = _make_builder_configs()
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=self.builder_config.description,
features=tfds.features.FeaturesDict({
"speech":
tfds.features.Audio(sample_rate=16000),
"text":
tfds.features.Text(),
"speaker_id":
tf.string,
"gender":
tfds.features.ClassLabel(names=["unknown", "female", "male"]),
"id":
tf.string,
}),
supervised_keys=("speech", "text"),
homepage=self.builder_config.url,
citation=self.builder_config.citation,
metadata=tfds.core.MetadataDict(sample_rate=16000,),
)
def _split_generators(self, dl_manager):
extracted_dir = dl_manager.download_and_extract(
self.builder_config.download_url)
splits = []
for split, path in self.builder_config.split_paths:
kwargs = {"directory": os.path.join(extracted_dir, path)}
splits.append(tfds.core.SplitGenerator(name=split, gen_kwargs=kwargs))
return splits
def _build_pcollection(self, pipeline, directory):
beam = tfds.core.lazy_imports.apache_beam
stm_files = tf.io.gfile.glob(os.path.join(directory, "stm", "*stm"))
return (pipeline
| beam.Create(stm_files)
| beam.FlatMap(_generate_examples_from_stm_file))
def _generate_examples_from_stm_file(stm_path):
"""Generate examples from a TED-LIUM stm file."""
stm_dir = os.path.dirname(stm_path)
sph_dir = os.path.join(os.path.dirname(stm_dir), "sph")
with tf.io.gfile.GFile(stm_path) as f:
for line in f:
line = line.strip()
fn, channel, speaker, start, end, label, transcript = line.split(" ", 6)
transcript = _maybe_trim_suffix(transcript)
audio_file = "%s.sph" % fn
samples = _extract_audio_segment(
os.path.join(sph_dir, audio_file), int(channel), float(start),
float(end))
key = "-".join([speaker, start, end, label])
example = {
"speech": samples,
"text": transcript,
"speaker_id": speaker,
"gender": _parse_gender(label),
"id": key,
}
yield key, example
def _maybe_trim_suffix(transcript):
# stm files for the TEDLIUM release 1 train split contain a key (enclosed in
# parens) at the end.
splits = transcript.rsplit(" ", 1)
transcript = splits[0]
if len(splits) > 1:
suffix = splits[-1]
if not suffix.startswith("("):
transcript += " " + suffix
return transcript
def _parse_gender(label_str):
"""Parse gender string from STM "<label>" field."""
gender = re.split(",|_", label_str)[-1][:-1]
# Fix inconsistencies in the data.
if not gender:
gender = -1 # Missing label.
elif gender == "<NA": # In TEDLIUM release 3 training data.
gender = -1 # Missing label.
elif gender == "F":
gender = "female"
elif gender == "M":
gender = "male"
return gender
def _extract_audio_segment(sph_path, channel, start_sec, end_sec):
"""Extracts segment of audio samples (as an ndarray) from the given path."""
with tf.io.gfile.GFile(sph_path, "rb") as f:
segment = tfds.core.lazy_imports.pydub.AudioSegment.from_file(
f, format="nistsphere")
# The dataset only contains mono audio.
assert segment.channels == 1
assert channel == 1
start_ms = int(start_sec * 1000)
end_ms = int(end_sec * 1000)
segment = segment[start_ms:end_ms]
samples = np.array(segment.get_array_of_samples())
return samples | 0.788339 | 0.350394 |
import unittest
from user import User
from credentials import Credentials
class TestLocker(unittest.TestCase):
"""
Test class that defines test cases for the contact class behaviours.
Args:
unittest.TestCase: TestCase class that helps in creating test cases
"""
def setUp(self):
'''
Set up method to run before each test cases.
'''
self.new_user = User("JamesMutahi", "12139lenana**")
self.new_account = Credentials("GitHub", "JamesMutahi", "12139**")
def tearDown(self):
'''
does clean up/ destroys entries after each test case has run.
'''
User.user_list = []
Credentials.credentials_list = []
def test_init(self):
'''
test_init test case to test if the object is initialized properly
'''
self.assertEqual(self.new_user.user_name, "JamesMutahi")
self.assertEqual(self.new_user.password, "<PASSWORD>**")
self.assertEqual(self.new_account.account_name, "GitHub")
self.assertEqual(self.new_account.account_user_name, "JamesMutahi")
self.assertEqual(self.new_account.account_password, "<PASSWORD>**")
def test_create_user(self):
'''
test_create_user test case to test if the user object is saved into the user list
'''
self.new_user.create_user()
self.assertEqual(len(User.user_list),1)
def test_add_credentials(self):
'''
test_add_credentials test case to test if the credentials object is saved into
the credentials list
'''
self.new_account.add_credentials()
self.assertEqual(len(Credentials.credentials_list),1)
def test_create_multiple_users(self):
'''
test_create_multiple_users to check if we can create multiple user
objects to our user list
'''
self.new_user.create_user()
test_user = User("John", "Doe")
test_user.create_user()
self.assertEqual(len(User.user_list),2)
def test_add_multiple_credentials(self):
'''
test_create_multiple_credentials to check if we can create multiple credentials
objects to our credentials list
'''
self.new_account.add_credentials()
test_credentials = Credentials("Github", "John", "Doe")
test_credentials.add_credentials()
self.assertEqual(len(Credentials.credentials_list),2)
def test_delete_credentials(self):
'''
test_delete_credentials to test if we can remove an account's credentials from our credentials list
'''
self.new_account.add_credentials()
test_credentials = Credentials("Github", "John", "Doe")
test_credentials.add_credentials()
self.new_account.delete_credentials("Github")
self.assertEqual(len(Credentials.credentials_list),1)
def test_find_by_account(self):
'''
test_find_credentials_by_account to check if we can find an account's credentials by account name and display information
'''
self.new_account.add_credentials()
test_credentials = Credentials("Github", "John", "Doe")
test_credentials.add_credentials()
searched_account = Credentials.find_by_account("Github")
self.assertEqual(searched_account.account_name, test_credentials.account_name)
def test_display_all_credentials(self):
'''
returns a list of all credentials saved
'''
self.assertEqual(Credentials.display_all_accounts(),Credentials.credentials_list)
if __name__ == '__main__':
unittest.main() | test.py | import unittest
from user import User
from credentials import Credentials
class TestLocker(unittest.TestCase):
"""
Test class that defines test cases for the contact class behaviours.
Args:
unittest.TestCase: TestCase class that helps in creating test cases
"""
def setUp(self):
'''
Set up method to run before each test cases.
'''
self.new_user = User("JamesMutahi", "12139lenana**")
self.new_account = Credentials("GitHub", "JamesMutahi", "12139**")
def tearDown(self):
'''
does clean up/ destroys entries after each test case has run.
'''
User.user_list = []
Credentials.credentials_list = []
def test_init(self):
'''
test_init test case to test if the object is initialized properly
'''
self.assertEqual(self.new_user.user_name, "JamesMutahi")
self.assertEqual(self.new_user.password, "<PASSWORD>**")
self.assertEqual(self.new_account.account_name, "GitHub")
self.assertEqual(self.new_account.account_user_name, "JamesMutahi")
self.assertEqual(self.new_account.account_password, "<PASSWORD>**")
def test_create_user(self):
'''
test_create_user test case to test if the user object is saved into the user list
'''
self.new_user.create_user()
self.assertEqual(len(User.user_list),1)
def test_add_credentials(self):
'''
test_add_credentials test case to test if the credentials object is saved into
the credentials list
'''
self.new_account.add_credentials()
self.assertEqual(len(Credentials.credentials_list),1)
def test_create_multiple_users(self):
'''
test_create_multiple_users to check if we can create multiple user
objects to our user list
'''
self.new_user.create_user()
test_user = User("John", "Doe")
test_user.create_user()
self.assertEqual(len(User.user_list),2)
def test_add_multiple_credentials(self):
'''
test_create_multiple_credentials to check if we can create multiple credentials
objects to our credentials list
'''
self.new_account.add_credentials()
test_credentials = Credentials("Github", "John", "Doe")
test_credentials.add_credentials()
self.assertEqual(len(Credentials.credentials_list),2)
def test_delete_credentials(self):
'''
test_delete_credentials to test if we can remove an account's credentials from our credentials list
'''
self.new_account.add_credentials()
test_credentials = Credentials("Github", "John", "Doe")
test_credentials.add_credentials()
self.new_account.delete_credentials("Github")
self.assertEqual(len(Credentials.credentials_list),1)
def test_find_by_account(self):
'''
test_find_credentials_by_account to check if we can find an account's credentials by account name and display information
'''
self.new_account.add_credentials()
test_credentials = Credentials("Github", "John", "Doe")
test_credentials.add_credentials()
searched_account = Credentials.find_by_account("Github")
self.assertEqual(searched_account.account_name, test_credentials.account_name)
def test_display_all_credentials(self):
'''
returns a list of all credentials saved
'''
self.assertEqual(Credentials.display_all_accounts(),Credentials.credentials_list)
if __name__ == '__main__':
unittest.main() | 0.546012 | 0.398348 |
from enum import IntEnum
import dataclasses
from typing import Optional
class InstructionClass(IntEnum):
LD = 0x00
LDX = 0x01
ST = 0x02
STX = 0x03
ALU = 0x04
JMP = 0x05
JMP32 = 0x06
ALU64 = 0x07
@dataclasses.dataclass
class NormalOpcodeEncoding:
opcode: IntEnum
inst_class: InstructionClass
source: bool
class MemoryMode(IntEnum):
IMM = 0x00
ABS = 0x20
IND = 0x40
MEM = 0x60
RESERVED1 = 0x80
RESERVED2 = 0xa0
XADD = 0xc0
class MemorySize(IntEnum):
W = 0x00
H = 0x08
B = 0x10
DW = 0x18
@dataclasses.dataclass
class MemoryOpcodeEncoding:
mode: MemoryMode
size: MemorySize
inst_class: InstructionClass
class ALUOpcode(IntEnum):
ADD = 0x00
SUB = 0x10
MUL = 0x20
DIV = 0x30
OR = 0x40
AND = 0x50
LSH = 0x60
RSH = 0x70
NEG = 0x80
MOD = 0x90
XOR = 0xa0
MOV = 0xb0
ARSH = 0xc0
END = 0xd0
class JMPOpcode(IntEnum):
JA = 0x00
JEQ = 0x10
JGT = 0x20
JGE = 0x30
JSET = 0x40
JNE = 0x50
JSGT = 0x60
JSGE = 0x70
CALL = 0x80
EXIT = 0x90
JLT = 0xa0
JLE = 0xb0
JSLT = 0xc0
JSLE = 0xd0
class Instruction:
def __init__(self, encoding, regs, offset: int, immediate: int):
self.encoding = encoding
self.src_reg, self.dst_reg = regs
self.offset = offset
self.immediate = immediate
def string(self) -> str:
if type(self.encoding) == NormalOpcodeEncoding:
return self.normal_string()
elif type(self.encoding) == MemoryOpcodeEncoding:
return self.memory_string()
else:
return "unreachable"
def memory_string(self) -> str:
if self.encoding.inst_class == InstructionClass.LD:
return self.load_inst_string('ld')
elif self.encoding.inst_class == InstructionClass.LDX:
return self.load_inst_string('ldx')
elif self.encoding.inst_class == InstructionClass.ST:
return self.store_inst_string('st')
elif self.encoding.inst_class == InstructionClass.STX:
return self.store_inst_string('stx')
else:
return "unreachable"
def load_inst_string(self, mnemonic) -> str:
if self.encoding.mode == MemoryMode.IMM:
return f"{self.dst_string()} <- {self.src_string(True)} ll"
if self.encoding.mode == MemoryMode.ABS:
return f"{self.dst_string()} <- *({self.size_string()} *)sk_buff[{self.immediate + self.offset}]"
elif self.encoding.mode == MemoryMode.MEM:
return f"{self.dst_string()} <- *({self.size_string()} *)({self.src_string(False)} + {self.offset})"
else:
return "unreachable"
def store_inst_string(self, mnemonic) -> str:
if self.encoding.mode == MemoryMode.XADD:
return f"lock *({self.size_string()} *)({self.dst_string()} + {self.offset}) <- {self.dst_string()} + {self.src_string(False)}"
elif self.encoding.mode == MemoryMode.MEM:
return f"*({self.size_string()} *)({self.dst_string()} + {self.offset}) <- {self.src_string(False)}"
else:
return "unreachable"
def normal_string(self) -> str:
if self.encoding.inst_class in [InstructionClass.ALU, InstructionClass.ALU64]:
return self.normal_alu_string()
elif self.encoding.inst_class in [InstructionClass.JMP, InstructionClass.JMP32]:
return self.normal_jmp_string()
else:
return "unreachable"
def normal_alu_string(self) -> str:
# binary operands
if self.encoding.opcode == ALUOpcode.ADD:
return self.binary_operands_inst_string('+')
elif self.encoding.opcode == ALUOpcode.SUB:
return self.binary_operands_inst_string('-')
elif self.encoding.opcode == ALUOpcode.MUL:
return self.binary_operands_inst_string('*')
elif self.encoding.opcode == ALUOpcode.DIV:
return self.binary_operands_inst_string('/')
elif self.encoding.opcode == ALUOpcode.MOD:
return self.binary_operands_inst_string('%')
elif self.encoding.opcode == ALUOpcode.AND:
return self.binary_operands_inst_string('&')
elif self.encoding.opcode == ALUOpcode.OR:
return self.binary_operands_inst_string('|')
elif self.encoding.opcode == ALUOpcode.XOR:
return self.binary_operands_inst_string('^')
elif self.encoding.opcode == ALUOpcode.LSH:
return self.binary_operands_inst_string('<<')
elif self.encoding.opcode == ALUOpcode.RSH:
return self.binary_operands_inst_string('>>(logic)')
elif self.encoding.opcode == ALUOpcode.ARSH:
return self.binary_operands_inst_string('>>(arith)')
# unary operands
elif self.encoding.opcode == ALUOpcode.MOV:
return self.unary_operand_inst_string('')
elif self.encoding.opcode == ALUOpcode.NEG:
return self.unary_operand_inst_string('-')
# misc
elif self.encoding.opcode == ALUOpcode.END:
return self.endian_conversion_inst_string()
else:
return "unreachable!"
def normal_jmp_string(self) -> str:
if self.encoding.opcode == JMPOpcode.EXIT:
return "exit"
elif self.encoding.opcode == JMPOpcode.JA:
return self.jmp_inst_string('')
elif self.encoding.opcode == JMPOpcode.JGE:
return self.jmp_inst_string('>=')
elif self.encoding.opcode == JMPOpcode.JLE:
return self.jmp_inst_string('<=')
elif self.encoding.opcode == JMPOpcode.JGT:
return self.jmp_inst_string('>')
elif self.encoding.opcode == JMPOpcode.JLT:
return self.jmp_inst_string('<')
elif self.encoding.opcode == JMPOpcode.JSGE:
return self.jmp_inst_string('>=(signed)')
elif self.encoding.opcode == JMPOpcode.JSLE:
return self.jmp_inst_string('<=(signed)')
elif self.encoding.opcode == JMPOpcode.JSGT:
return self.jmp_inst_string('>(signed)')
elif self.encoding.opcode == JMPOpcode.JSLT:
return self.jmp_inst_string('<(signed)')
elif self.encoding.opcode == JMPOpcode.JEQ:
return self.jmp_inst_string('==')
elif self.encoding.opcode == JMPOpcode.JNE:
return self.jmp_inst_string('!=')
elif self.encoding.opcode == JMPOpcode.JSET:
return self.jmp_inst_string('&')
elif self.encoding.opcode == JMPOpcode.CALL:
return f"call {self.immediate}"
else:
return "unreachable!"
def jmp_inst_string(self, operator: str) -> str:
plus_sign = '' if self.offset < 0 else '+'
if operator == '':
return f"goto {plus_sign}{self.offset}"
return f"goto {plus_sign}{self.offset} if {self.dst_string()} {operator} {self.src_string(not self.encoding.source)}"
def unary_operand_inst_string(self, operator: str) -> str:
return f"{self.dst_string()} <- {operator}{self.src_string(not self.encoding.source)}"
def binary_operands_inst_string(self, operator: str) -> str:
return f"{self.dst_string()} <- {self.dst_string()} {operator} {self.src_string(not self.encoding.source)}"
def endian_conversion_inst_string(self) -> str:
conv_f = 'htobe' if self.encoding.source else 'htole'
return self.unary_operand_inst_string(conv_f + str(self.immediate))
def src_string(self, is_imm: bool) -> str:
if is_imm:
return self.immediate
else:
return self.num_to_reg(self.src_reg)
def dst_string(self) -> str:
return self.num_to_reg(self.dst_reg)
def size_string(self) -> str:
if self.encoding.size == MemorySize.B:
return "u8"
if self.encoding.size == MemorySize.H:
return "u16"
if self.encoding.size == MemorySize.W:
return "u32"
if self.encoding.size == MemorySize.DW:
return "u64"
else:
return "unknown"
def num_to_reg(self, number: int) -> str:
return f"r{number}" | disasm/instruction.py | from enum import IntEnum
import dataclasses
from typing import Optional
class InstructionClass(IntEnum):
LD = 0x00
LDX = 0x01
ST = 0x02
STX = 0x03
ALU = 0x04
JMP = 0x05
JMP32 = 0x06
ALU64 = 0x07
@dataclasses.dataclass
class NormalOpcodeEncoding:
opcode: IntEnum
inst_class: InstructionClass
source: bool
class MemoryMode(IntEnum):
IMM = 0x00
ABS = 0x20
IND = 0x40
MEM = 0x60
RESERVED1 = 0x80
RESERVED2 = 0xa0
XADD = 0xc0
class MemorySize(IntEnum):
W = 0x00
H = 0x08
B = 0x10
DW = 0x18
@dataclasses.dataclass
class MemoryOpcodeEncoding:
mode: MemoryMode
size: MemorySize
inst_class: InstructionClass
class ALUOpcode(IntEnum):
ADD = 0x00
SUB = 0x10
MUL = 0x20
DIV = 0x30
OR = 0x40
AND = 0x50
LSH = 0x60
RSH = 0x70
NEG = 0x80
MOD = 0x90
XOR = 0xa0
MOV = 0xb0
ARSH = 0xc0
END = 0xd0
class JMPOpcode(IntEnum):
JA = 0x00
JEQ = 0x10
JGT = 0x20
JGE = 0x30
JSET = 0x40
JNE = 0x50
JSGT = 0x60
JSGE = 0x70
CALL = 0x80
EXIT = 0x90
JLT = 0xa0
JLE = 0xb0
JSLT = 0xc0
JSLE = 0xd0
class Instruction:
def __init__(self, encoding, regs, offset: int, immediate: int):
self.encoding = encoding
self.src_reg, self.dst_reg = regs
self.offset = offset
self.immediate = immediate
def string(self) -> str:
if type(self.encoding) == NormalOpcodeEncoding:
return self.normal_string()
elif type(self.encoding) == MemoryOpcodeEncoding:
return self.memory_string()
else:
return "unreachable"
def memory_string(self) -> str:
if self.encoding.inst_class == InstructionClass.LD:
return self.load_inst_string('ld')
elif self.encoding.inst_class == InstructionClass.LDX:
return self.load_inst_string('ldx')
elif self.encoding.inst_class == InstructionClass.ST:
return self.store_inst_string('st')
elif self.encoding.inst_class == InstructionClass.STX:
return self.store_inst_string('stx')
else:
return "unreachable"
def load_inst_string(self, mnemonic) -> str:
if self.encoding.mode == MemoryMode.IMM:
return f"{self.dst_string()} <- {self.src_string(True)} ll"
if self.encoding.mode == MemoryMode.ABS:
return f"{self.dst_string()} <- *({self.size_string()} *)sk_buff[{self.immediate + self.offset}]"
elif self.encoding.mode == MemoryMode.MEM:
return f"{self.dst_string()} <- *({self.size_string()} *)({self.src_string(False)} + {self.offset})"
else:
return "unreachable"
def store_inst_string(self, mnemonic) -> str:
if self.encoding.mode == MemoryMode.XADD:
return f"lock *({self.size_string()} *)({self.dst_string()} + {self.offset}) <- {self.dst_string()} + {self.src_string(False)}"
elif self.encoding.mode == MemoryMode.MEM:
return f"*({self.size_string()} *)({self.dst_string()} + {self.offset}) <- {self.src_string(False)}"
else:
return "unreachable"
def normal_string(self) -> str:
if self.encoding.inst_class in [InstructionClass.ALU, InstructionClass.ALU64]:
return self.normal_alu_string()
elif self.encoding.inst_class in [InstructionClass.JMP, InstructionClass.JMP32]:
return self.normal_jmp_string()
else:
return "unreachable"
def normal_alu_string(self) -> str:
# binary operands
if self.encoding.opcode == ALUOpcode.ADD:
return self.binary_operands_inst_string('+')
elif self.encoding.opcode == ALUOpcode.SUB:
return self.binary_operands_inst_string('-')
elif self.encoding.opcode == ALUOpcode.MUL:
return self.binary_operands_inst_string('*')
elif self.encoding.opcode == ALUOpcode.DIV:
return self.binary_operands_inst_string('/')
elif self.encoding.opcode == ALUOpcode.MOD:
return self.binary_operands_inst_string('%')
elif self.encoding.opcode == ALUOpcode.AND:
return self.binary_operands_inst_string('&')
elif self.encoding.opcode == ALUOpcode.OR:
return self.binary_operands_inst_string('|')
elif self.encoding.opcode == ALUOpcode.XOR:
return self.binary_operands_inst_string('^')
elif self.encoding.opcode == ALUOpcode.LSH:
return self.binary_operands_inst_string('<<')
elif self.encoding.opcode == ALUOpcode.RSH:
return self.binary_operands_inst_string('>>(logic)')
elif self.encoding.opcode == ALUOpcode.ARSH:
return self.binary_operands_inst_string('>>(arith)')
# unary operands
elif self.encoding.opcode == ALUOpcode.MOV:
return self.unary_operand_inst_string('')
elif self.encoding.opcode == ALUOpcode.NEG:
return self.unary_operand_inst_string('-')
# misc
elif self.encoding.opcode == ALUOpcode.END:
return self.endian_conversion_inst_string()
else:
return "unreachable!"
def normal_jmp_string(self) -> str:
if self.encoding.opcode == JMPOpcode.EXIT:
return "exit"
elif self.encoding.opcode == JMPOpcode.JA:
return self.jmp_inst_string('')
elif self.encoding.opcode == JMPOpcode.JGE:
return self.jmp_inst_string('>=')
elif self.encoding.opcode == JMPOpcode.JLE:
return self.jmp_inst_string('<=')
elif self.encoding.opcode == JMPOpcode.JGT:
return self.jmp_inst_string('>')
elif self.encoding.opcode == JMPOpcode.JLT:
return self.jmp_inst_string('<')
elif self.encoding.opcode == JMPOpcode.JSGE:
return self.jmp_inst_string('>=(signed)')
elif self.encoding.opcode == JMPOpcode.JSLE:
return self.jmp_inst_string('<=(signed)')
elif self.encoding.opcode == JMPOpcode.JSGT:
return self.jmp_inst_string('>(signed)')
elif self.encoding.opcode == JMPOpcode.JSLT:
return self.jmp_inst_string('<(signed)')
elif self.encoding.opcode == JMPOpcode.JEQ:
return self.jmp_inst_string('==')
elif self.encoding.opcode == JMPOpcode.JNE:
return self.jmp_inst_string('!=')
elif self.encoding.opcode == JMPOpcode.JSET:
return self.jmp_inst_string('&')
elif self.encoding.opcode == JMPOpcode.CALL:
return f"call {self.immediate}"
else:
return "unreachable!"
def jmp_inst_string(self, operator: str) -> str:
plus_sign = '' if self.offset < 0 else '+'
if operator == '':
return f"goto {plus_sign}{self.offset}"
return f"goto {plus_sign}{self.offset} if {self.dst_string()} {operator} {self.src_string(not self.encoding.source)}"
def unary_operand_inst_string(self, operator: str) -> str:
return f"{self.dst_string()} <- {operator}{self.src_string(not self.encoding.source)}"
def binary_operands_inst_string(self, operator: str) -> str:
return f"{self.dst_string()} <- {self.dst_string()} {operator} {self.src_string(not self.encoding.source)}"
def endian_conversion_inst_string(self) -> str:
conv_f = 'htobe' if self.encoding.source else 'htole'
return self.unary_operand_inst_string(conv_f + str(self.immediate))
def src_string(self, is_imm: bool) -> str:
if is_imm:
return self.immediate
else:
return self.num_to_reg(self.src_reg)
def dst_string(self) -> str:
return self.num_to_reg(self.dst_reg)
def size_string(self) -> str:
if self.encoding.size == MemorySize.B:
return "u8"
if self.encoding.size == MemorySize.H:
return "u16"
if self.encoding.size == MemorySize.W:
return "u32"
if self.encoding.size == MemorySize.DW:
return "u64"
else:
return "unknown"
def num_to_reg(self, number: int) -> str:
return f"r{number}" | 0.71889 | 0.273424 |
from arkfbp.executer import Executer
# pylint: disable=line-too-long
from arkfbp.node import FunctionNode
from ...modeling import get_api_config, collect_model_mapping, get_serializer_node, set_flow_debug
# Editor your node here.
class SerializerCore(FunctionNode):
"""
it will generate all serializer nodes and field nodes dynamically,
finally return the result to the siteapi flow.
"""
def run(self, *args, **kwargs):
"""
initialize a master serializer node,
then run this master node to validate request params,
finally may run the handler function to contact with DB.
"""
try:
print('Initializing master serializer node...')
_, api_detail = get_api_config(self.inputs.method, self.flow.api_config)
self.intervene(api_detail)
# api_detail => {"name":"create_user", "type":"create", "request":{}, "response":{}}
serializer_node = get_serializer_node(api_detail['request'], self.flow.config)
print('Done!')
# 数据校验
print('Running master serializer node...')
outputs = Executer.start_node(serializer_node, self.flow)
print('Done!')
if not self.flow.valid_status():
return outputs
print('Running handle function...')
# 获取涉及到的所有原生model的class list
model_mapping = collect_model_mapping(api_detail['request'], self.flow.config)
# TODO 从相应的validated_data中获取指定model的字段值,然后每个model进行handler的处理
ret = serializer_node.handle(api_detail, model_mapping, *args, **kwargs)
print('Done!')
return ret
# pylint: disable=broad-except
except Exception as exception:
return self.flow.shutdown(exception.__str__(), response_status=500)
def intervene(self, api_detail):
"""
Change some properties of flow to interfere with the flow direction.
"""
# debug or not
set_flow_debug(self.flow, api_detail) | arkfbp/common/django/app/automation/flows/admin/nodes/serializer.py | from arkfbp.executer import Executer
# pylint: disable=line-too-long
from arkfbp.node import FunctionNode
from ...modeling import get_api_config, collect_model_mapping, get_serializer_node, set_flow_debug
# Editor your node here.
class SerializerCore(FunctionNode):
"""
it will generate all serializer nodes and field nodes dynamically,
finally return the result to the siteapi flow.
"""
def run(self, *args, **kwargs):
"""
initialize a master serializer node,
then run this master node to validate request params,
finally may run the handler function to contact with DB.
"""
try:
print('Initializing master serializer node...')
_, api_detail = get_api_config(self.inputs.method, self.flow.api_config)
self.intervene(api_detail)
# api_detail => {"name":"create_user", "type":"create", "request":{}, "response":{}}
serializer_node = get_serializer_node(api_detail['request'], self.flow.config)
print('Done!')
# 数据校验
print('Running master serializer node...')
outputs = Executer.start_node(serializer_node, self.flow)
print('Done!')
if not self.flow.valid_status():
return outputs
print('Running handle function...')
# 获取涉及到的所有原生model的class list
model_mapping = collect_model_mapping(api_detail['request'], self.flow.config)
# TODO 从相应的validated_data中获取指定model的字段值,然后每个model进行handler的处理
ret = serializer_node.handle(api_detail, model_mapping, *args, **kwargs)
print('Done!')
return ret
# pylint: disable=broad-except
except Exception as exception:
return self.flow.shutdown(exception.__str__(), response_status=500)
def intervene(self, api_detail):
"""
Change some properties of flow to interfere with the flow direction.
"""
# debug or not
set_flow_debug(self.flow, api_detail) | 0.341363 | 0.141934 |
import frappe
from frappe import _
from frappe.custom.doctype.custom_field.custom_field import create_custom_fields
def get_setup_stages(args=None):
return [
{
'status': _('Creating shipstation masters'),
'fail_msg': _('Failed to create shipstation masters'),
'tasks': [
{
'fn': create_customer_group,
'args': args,
'fail_msg': _("Failed to create Shipstation Customer Group")
},
{
'fn': create_price_list,
'args': args,
'fail_msg': _("Failed to create Shipstation Price List")
},
{
'fn': setup_custom_fields,
'args': args,
'fail_msg': _("Failed to create Shipstation custom fields")
}
]
}
]
def setup_shipstation():
"""
Development function to ease the process of creating the masters
and custom fields
"""
create_customer_group()
create_price_list()
setup_custom_fields()
def create_customer_group(args=None):
if frappe.db.get_value('Customer Group', {'customer_group_name': 'ShipStation'}):
return
customer_group = frappe.new_doc("Customer Group")
customer_group.customer_group_name = 'ShipStation'
customer_group.parent_customer_group = 'All Customer Groups'
customer_group.save()
def create_price_list(args=None):
if frappe.db.get_value('Price List', {'price_list_name': 'ShipStation'}):
return
price_list = frappe.new_doc("Price List")
price_list.price_list_name = 'ShipStation'
price_list.selling = True
price_list.save()
def setup_custom_fields(args=None):
common_custom_fields = [
dict(fieldtype="Data", fieldname="shipstation_order_id", read_only=1,
label="Shipstation Order ID", insert_after="sb_shipstation",
translatable=0),
dict(fieldtype="Column Break", fieldname="cb_shipstation",
insert_after="shipstation_order_id"),
dict(fieldtype="Data", fieldname="marketplace", read_only=1,
label="Marketplace", insert_after="cb_shipstation", translatable=0),
dict(fieldtype="Data", fieldname="marketplace_order_id", read_only=1,
label="Marketplace Order ID", insert_after="marketplace",
translatable=0),
dict(fieldtype="Check", fieldname="has_pii",
hidden=1, label="Has PII", insert_after="marketplace_order_id")
]
sales_order_fields = [
dict(fieldtype="Section Break", fieldname="sb_shipstation",
label="Shipstation", insert_after="tax_id"),
] + common_custom_fields
sales_invoice_fields = [
dict(fieldtype="Section Break", fieldname="sb_shipstation",
label="Shipstation", insert_after="amended_from"),
] + common_custom_fields + [
dict(fieldtype="Data", fieldname="shipstation_shipment_id", read_only=1,
label="Shipstation Shipment ID", insert_after="shipstation_order_id",
translatable=0)
]
delivery_note_fields = [
dict(fieldtype="Section Break", fieldname="sb_shipstation",
label="Shipstation", insert_after="return_against"),
] + common_custom_fields + [
dict(fieldtype="Data", fieldname="shipstation_shipment_id", read_only=1,
label="Shipstation Shipment ID", insert_after="shipstation_order_id",
translatable=0),
dict(fieldtype="Section Break", fieldname="shipment_details",
label="Shipment Details", insert_after="has_pii"),
dict(fieldtype="Data", fieldname="carrier", read_only=1,
label="Carrier", insert_after="shipment_details", translatable=0),
dict(fieldtype="Data", fieldname="tracking_number", read_only=1,
label="Tracking Number", insert_after="carrier", translatable=0),
dict(fieldtype="Column Break", fieldname="columnbreak91",
insert_after="tracking_number"),
dict(fieldtype="Data", fieldname="carrier_service", read_only=1,
label="Carrier Service", insert_after="columnbreak91",
translatable=0),
]
custom_fields = {
"Sales Order": sales_order_fields,
"Sales Invoice": sales_invoice_fields,
"Delivery Note": delivery_note_fields
}
create_custom_fields(custom_fields) | shipstation_integration/setup.py | import frappe
from frappe import _
from frappe.custom.doctype.custom_field.custom_field import create_custom_fields
def get_setup_stages(args=None):
return [
{
'status': _('Creating shipstation masters'),
'fail_msg': _('Failed to create shipstation masters'),
'tasks': [
{
'fn': create_customer_group,
'args': args,
'fail_msg': _("Failed to create Shipstation Customer Group")
},
{
'fn': create_price_list,
'args': args,
'fail_msg': _("Failed to create Shipstation Price List")
},
{
'fn': setup_custom_fields,
'args': args,
'fail_msg': _("Failed to create Shipstation custom fields")
}
]
}
]
def setup_shipstation():
"""
Development function to ease the process of creating the masters
and custom fields
"""
create_customer_group()
create_price_list()
setup_custom_fields()
def create_customer_group(args=None):
if frappe.db.get_value('Customer Group', {'customer_group_name': 'ShipStation'}):
return
customer_group = frappe.new_doc("Customer Group")
customer_group.customer_group_name = 'ShipStation'
customer_group.parent_customer_group = 'All Customer Groups'
customer_group.save()
def create_price_list(args=None):
if frappe.db.get_value('Price List', {'price_list_name': 'ShipStation'}):
return
price_list = frappe.new_doc("Price List")
price_list.price_list_name = 'ShipStation'
price_list.selling = True
price_list.save()
def setup_custom_fields(args=None):
common_custom_fields = [
dict(fieldtype="Data", fieldname="shipstation_order_id", read_only=1,
label="Shipstation Order ID", insert_after="sb_shipstation",
translatable=0),
dict(fieldtype="Column Break", fieldname="cb_shipstation",
insert_after="shipstation_order_id"),
dict(fieldtype="Data", fieldname="marketplace", read_only=1,
label="Marketplace", insert_after="cb_shipstation", translatable=0),
dict(fieldtype="Data", fieldname="marketplace_order_id", read_only=1,
label="Marketplace Order ID", insert_after="marketplace",
translatable=0),
dict(fieldtype="Check", fieldname="has_pii",
hidden=1, label="Has PII", insert_after="marketplace_order_id")
]
sales_order_fields = [
dict(fieldtype="Section Break", fieldname="sb_shipstation",
label="Shipstation", insert_after="tax_id"),
] + common_custom_fields
sales_invoice_fields = [
dict(fieldtype="Section Break", fieldname="sb_shipstation",
label="Shipstation", insert_after="amended_from"),
] + common_custom_fields + [
dict(fieldtype="Data", fieldname="shipstation_shipment_id", read_only=1,
label="Shipstation Shipment ID", insert_after="shipstation_order_id",
translatable=0)
]
delivery_note_fields = [
dict(fieldtype="Section Break", fieldname="sb_shipstation",
label="Shipstation", insert_after="return_against"),
] + common_custom_fields + [
dict(fieldtype="Data", fieldname="shipstation_shipment_id", read_only=1,
label="Shipstation Shipment ID", insert_after="shipstation_order_id",
translatable=0),
dict(fieldtype="Section Break", fieldname="shipment_details",
label="Shipment Details", insert_after="has_pii"),
dict(fieldtype="Data", fieldname="carrier", read_only=1,
label="Carrier", insert_after="shipment_details", translatable=0),
dict(fieldtype="Data", fieldname="tracking_number", read_only=1,
label="Tracking Number", insert_after="carrier", translatable=0),
dict(fieldtype="Column Break", fieldname="columnbreak91",
insert_after="tracking_number"),
dict(fieldtype="Data", fieldname="carrier_service", read_only=1,
label="Carrier Service", insert_after="columnbreak91",
translatable=0),
]
custom_fields = {
"Sales Order": sales_order_fields,
"Sales Invoice": sales_invoice_fields,
"Delivery Note": delivery_note_fields
}
create_custom_fields(custom_fields) | 0.366136 | 0.164752 |
from __future__ import print_function
from utils import *
import os
import json
import shutil
import time
import uuid
import yaml
def build_image(path_sample, config):
global log
service_name = config["service_name"]
image_name = config["image_name"]
tag_name = config["tag_name"]
image_tag = "{}:{}".format(image_name, tag_name)
try:
docker_client.images.get(image_tag)
log.info("[{}] Image has been built, skip it".format(service_name))
return
except:
pass
log.info("[{}] Start to build image".format(service_name))
response = docker_api_client.build(
path=path_sample, rm=True, tag=image_tag, forcerm=True, nocache=False
)
is_success = False
for line in response:
log_stream = json.loads(line)
if log_stream.keys()[0] in ('stream', 'error'):
value = log_stream.values()[0].strip()
if value:
try:
os.system("clear")
print(value)
except:
pass
if "Successfully built" in line:
is_success = True
assert is_success
def checkpoint_container(config, settings):
global log
service_name = config["service_name"]
image_name = config["image_name"]
tag_name = config["tag_name"]
checkpoint_tag_name = config["checkpoint_tag_name"]
checkpoint_name = config["checkpoint_name"]
service_port = config["service_port"]
start_cmd = config["start_cmd"]
msg_checkpoint = config["msg_checkpoint"]
path_checkpoint_parent = settings["checkpoint_dir"]
log.info("[{}] Start to create container".format(service_name))
container_name = "{}-{}-{}".format(image_name, tag_name, str(uuid.uuid4()))
command_create_checkpoint = "docker checkpoint create {} --checkpoint-dir={} {}".format(container_name,
path_checkpoint_parent,
checkpoint_name)
path_checkpoint_current = os.path.join(path_checkpoint_parent, checkpoint_name)
if os.path.exists(path_checkpoint_current):
log.info("[{}] Checkpoint {} exists, remove it".format(service_name,path_checkpoint_current))
shutil.rmtree(path_checkpoint_current)
docker_client.containers.run(
image="{}:{}".format(image_name, tag_name),
command=start_cmd,
detach=True,
user="root",
ports={service_port: service_port_host},
name=container_name,
security_opt=["seccomp=unconfined"]
)
try_count = 1
while True:
if try_count > tries_max:
log.error("[{}] create checkpoint failed: max tries reached".format(service_name))
break
time.sleep(3)
docker_logs = docker_client.containers.get(container_name).logs()
if msg_checkpoint in docker_logs:
log.info("[{}] Start to checkpoint".format(service_name))
docker_client.containers.get(container_name).commit(repository=image_name, tag=checkpoint_tag_name)
log.info("[{}] Container is committed as: {}:{}".format(service_name,image_name,checkpoint_tag_name))
os.system(command_create_checkpoint)
docker_client.containers.get(container_name).remove(force=True)
log.info("[{}] Checkpoint finished".format(service_name))
break
else:
try_count += 1
log.info("[{}] Waitting for container ready".format(service_name))
def main():
global log
log, settings = init()
code_dir = settings["code_dir"]
for path_code_dir in os.listdir(code_dir):
path_code_dir = os.path.join(code_dir, path_code_dir)
path_dockerfile = os.path.join(path_code_dir, 'dockerfile')
assert os.path.exists(path_dockerfile)
path_yaml = os.path.join(path_code_dir, 'config.yaml')
assert os.path.exists(path_yaml)
config = yaml.load(open(path_yaml).read(),Loader=yaml.FullLoader)
if os.path.exists(os.path.join(settings["checkpoint_dir"], config["checkpoint_name"], "config.json")):
log.info("[checkpoint_manager] Service {} checkpoint exists, skip it".format(path_code_dir))
continue
log.info("[checkpoint_manager] Start to process service {}".format(path_code_dir))
build_image(path_code_dir, config)
checkpoint_container(config, settings)
if __name__ == "__main__":
main() | checkpoint_manager.py | from __future__ import print_function
from utils import *
import os
import json
import shutil
import time
import uuid
import yaml
def build_image(path_sample, config):
global log
service_name = config["service_name"]
image_name = config["image_name"]
tag_name = config["tag_name"]
image_tag = "{}:{}".format(image_name, tag_name)
try:
docker_client.images.get(image_tag)
log.info("[{}] Image has been built, skip it".format(service_name))
return
except:
pass
log.info("[{}] Start to build image".format(service_name))
response = docker_api_client.build(
path=path_sample, rm=True, tag=image_tag, forcerm=True, nocache=False
)
is_success = False
for line in response:
log_stream = json.loads(line)
if log_stream.keys()[0] in ('stream', 'error'):
value = log_stream.values()[0].strip()
if value:
try:
os.system("clear")
print(value)
except:
pass
if "Successfully built" in line:
is_success = True
assert is_success
def checkpoint_container(config, settings):
global log
service_name = config["service_name"]
image_name = config["image_name"]
tag_name = config["tag_name"]
checkpoint_tag_name = config["checkpoint_tag_name"]
checkpoint_name = config["checkpoint_name"]
service_port = config["service_port"]
start_cmd = config["start_cmd"]
msg_checkpoint = config["msg_checkpoint"]
path_checkpoint_parent = settings["checkpoint_dir"]
log.info("[{}] Start to create container".format(service_name))
container_name = "{}-{}-{}".format(image_name, tag_name, str(uuid.uuid4()))
command_create_checkpoint = "docker checkpoint create {} --checkpoint-dir={} {}".format(container_name,
path_checkpoint_parent,
checkpoint_name)
path_checkpoint_current = os.path.join(path_checkpoint_parent, checkpoint_name)
if os.path.exists(path_checkpoint_current):
log.info("[{}] Checkpoint {} exists, remove it".format(service_name,path_checkpoint_current))
shutil.rmtree(path_checkpoint_current)
docker_client.containers.run(
image="{}:{}".format(image_name, tag_name),
command=start_cmd,
detach=True,
user="root",
ports={service_port: service_port_host},
name=container_name,
security_opt=["seccomp=unconfined"]
)
try_count = 1
while True:
if try_count > tries_max:
log.error("[{}] create checkpoint failed: max tries reached".format(service_name))
break
time.sleep(3)
docker_logs = docker_client.containers.get(container_name).logs()
if msg_checkpoint in docker_logs:
log.info("[{}] Start to checkpoint".format(service_name))
docker_client.containers.get(container_name).commit(repository=image_name, tag=checkpoint_tag_name)
log.info("[{}] Container is committed as: {}:{}".format(service_name,image_name,checkpoint_tag_name))
os.system(command_create_checkpoint)
docker_client.containers.get(container_name).remove(force=True)
log.info("[{}] Checkpoint finished".format(service_name))
break
else:
try_count += 1
log.info("[{}] Waitting for container ready".format(service_name))
def main():
global log
log, settings = init()
code_dir = settings["code_dir"]
for path_code_dir in os.listdir(code_dir):
path_code_dir = os.path.join(code_dir, path_code_dir)
path_dockerfile = os.path.join(path_code_dir, 'dockerfile')
assert os.path.exists(path_dockerfile)
path_yaml = os.path.join(path_code_dir, 'config.yaml')
assert os.path.exists(path_yaml)
config = yaml.load(open(path_yaml).read(),Loader=yaml.FullLoader)
if os.path.exists(os.path.join(settings["checkpoint_dir"], config["checkpoint_name"], "config.json")):
log.info("[checkpoint_manager] Service {} checkpoint exists, skip it".format(path_code_dir))
continue
log.info("[checkpoint_manager] Start to process service {}".format(path_code_dir))
build_image(path_code_dir, config)
checkpoint_container(config, settings)
if __name__ == "__main__":
main() | 0.273963 | 0.081593 |
from ad_api.base import Client, sp_endpoint, fill_query_params, ApiResponse
class BidRecommendations(Client):
"""Amazon Advertising API for Sponsored Display
Documentation: https://advertising.amazon.com/API/docs/en-us/sponsored-display/3-0/openapi#/Bid%20Recommendations
This API enables programmatic access for campaign creation, management, and reporting for Sponsored Display campaigns. For more information on the functionality, see the `Sponsored Display Support Center <https://advertising.amazon.com/help#GTPPHE6RAWC2C4LZ>`_ . For API onboarding information, see the `account setup <https://advertising.amazon.com/API/docs/en-us/setting-up/account-setup>`_ topic.
This specification is available for download from the `Advertising API developer portal <https://d3a0d0y2hgofx6.cloudfront.net/openapi/en-us/sponsored-display/3-0/openapi.yaml>`_.
"""
@sp_endpoint('/sd/targets/bid/recommendations', method='POST')
def list_targets_bid_recommendations(self, **kwargs) -> ApiResponse:
r"""
Provides a list of bid recommendations based on the list of input advertised ASINs and targeting clauses in the same format as the targeting API. For each targeting clause in the request a corresponding bid recommendation will be returned in the response. Currently the API will accept up to 100 targeting clauses.
The recommended bids are derrived from the last 7 days of winning auction bids for the related targeting clause.
body: SDTargetingBidRecommendationsRequestV32 | REQUIRED {'description': 'A list of products to tailor bid recommendations for category and audience based targeting clauses.}'
| '**products**': *SDGoalProduct*, {'description': 'A list of products for which to get targeting recommendations. minItems: 1, maxItems: 10000'}
| '**bidOptimization**': *SDBidOptimizationV32 string*, {'description': 'Determines what the recommended bids will be optimized for. Note that "reach" for bid optimization is not currently supported. This note will be removed when these operations become available.', 'Enum': '[ clicks, conversions, reach ]'}
| '**costType**': *SDCostTypeV31 string*, {'description': 'Determines what performance metric the bid recommendations will be optimized for. Note that "vcpm" for cost type is not currently supported. This note will be removed when these operations become available.', 'Enum': '[ cpc, vcpm ]'}
| '**targetingClauses**': SDTargetingClauseV31
type: object
description: A list of targeting clauses to receive bid recommendations for.
| '**expressionType**': *string*, {'description': 'Tactic T00020 ad groups only allow manual targeting.', 'Enum': '[ manual, auto ]'}
| '**expression**': SDTargetingExpressionV31
type: object
description: The targeting expression to match against.
oneOf->
TargetingPredicate:
type: object
description: A predicate to match against in the Targeting Expression (only applicable to Product targeting - T00020).
* All IDs passed for category and brand-targeting predicates must be valid IDs in the Amazon Advertising browse system.
* Brand, price, and review predicates are optional and may only be specified if category is also specified.
* Review predicates accept numbers between 0 and 5 and are inclusive.
* When using either of the 'between' strings to construct a targeting expression the format of the string is 'double-double' where the first double must be smaller than the second double. Prices are not inclusive.
| '**type**': *string*, {'enum': '[ asinSameAs, asinCategorySameAs, asinBrandSameAs, asinPriceBetween, asinPriceGreaterThan, asinPriceLessThan, asinReviewRatingLessThan, asinReviewRatingGreaterThan, asinReviewRatingBetween, asinIsPrimeShippingEligible, asinAgeRangeSameAs, asinGenreSameAs ]'}
| '**value**': *string*, {'description': 'The value to be targeted. example: B0123456789'}
TargetingPredicateNested:
type: object
description: A behavioral event and list of targeting predicates that represents an Audience to target (only applicable to Audience targeting - T00030).
* For manual ASIN-grain targeting, the value array must contain only, 'exactProduct', 'similarProduct', 'releatedProduct' and 'lookback' TargetingPredicateBase components. The 'lookback' is mandatory and the value should be set to '7', '14', '30', '60', '90', '180' or '365'
* For manual Category-grain targeting, the value array must contain a 'lookback' and 'asinCategorySameAs' TargetingPredicateBase component, which can be further refined with optional brand, price, star-rating and shipping eligibility refinements. The 'lookback' is mandatory and the value should be set to '7', '14', '30', '60', '90', '180' or '365'
* For manual Category-grain targeting, the value array must contain a 'lookback' and 'asinCategorySameAs' TargetingPredicateBase component, which can be further refined with optional brand, price, star-rating and shipping eligibility refinements.
* For Amazon Audiences targeting, the TargetingPredicateNested type should be set to 'audience' and the value array should include one TargetingPredicateBase component with type set to 'audienceSameAs'.
* Future For manual Category-grain targeting, adding a 'negative' TargetingPredicateBase will exclude that TargetingPredicateNested from the overall audience.
| '**type**': *string*, {'enum': '[ views, audience, purchases ]'}
| '**value**': TargetingPredicateBase
type: object
description: A predicate to match against inside the TargetingPredicateNested component (only applicable to Audience targeting - T00030).
* All IDs passed for category and brand-targeting predicates must be valid IDs in the Amazon Advertising browse system.
* Brand, price, and review predicates are optional and may only be specified if category is also specified.
* Review predicates accept numbers between 0 and 5 and are inclusive.
* When using either of the 'between' strings to construct a targeting expression the format of the string is 'double-double' where the first double must be smaller than the second double. Prices are not inclusive.
* The exactProduct, similarProduct, and negative types do not utilize the value field.
* The only type currently applicable to Amazon Audiences targeting is 'audienceSameAs'.
* A 'relatedProduct' TargetingPredicateBase will Target an audience that has purchased a related product in the past 7,14,30,60,90,180, or 365 days.
* **Future** A 'negative' TargetingPredicateBase will exclude that TargetingPredicateNested from the overall audience.
| '**type**': *string*, {'enum': '[ asinCategorySameAs, asinBrandSameAs, asinPriceBetween, asinPriceGreaterThan, asinPriceLessThan, asinReviewRatingLessThan, asinReviewRatingGreaterThan, asinReviewRatingBetween, similarProduct, exactProduct, asinIsPrimeShippingEligible, asinAgeRangeSameAs, asinGenreSameAs, audienceSameAs, lookback ]'}
| '**value**': *string*, {'description': 'The value to be targeted. example: B0123456789'}
Returns:
ApiResponse
"""
contentType = 'application/vnd.sdtargetingrecommendations.v3.2+json'
headers = {'Content-Type': contentType}
return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs, headers=headers) | ad_api/api/sd/bid_recommendations.py | from ad_api.base import Client, sp_endpoint, fill_query_params, ApiResponse
class BidRecommendations(Client):
"""Amazon Advertising API for Sponsored Display
Documentation: https://advertising.amazon.com/API/docs/en-us/sponsored-display/3-0/openapi#/Bid%20Recommendations
This API enables programmatic access for campaign creation, management, and reporting for Sponsored Display campaigns. For more information on the functionality, see the `Sponsored Display Support Center <https://advertising.amazon.com/help#GTPPHE6RAWC2C4LZ>`_ . For API onboarding information, see the `account setup <https://advertising.amazon.com/API/docs/en-us/setting-up/account-setup>`_ topic.
This specification is available for download from the `Advertising API developer portal <https://d3a0d0y2hgofx6.cloudfront.net/openapi/en-us/sponsored-display/3-0/openapi.yaml>`_.
"""
@sp_endpoint('/sd/targets/bid/recommendations', method='POST')
def list_targets_bid_recommendations(self, **kwargs) -> ApiResponse:
r"""
Provides a list of bid recommendations based on the list of input advertised ASINs and targeting clauses in the same format as the targeting API. For each targeting clause in the request a corresponding bid recommendation will be returned in the response. Currently the API will accept up to 100 targeting clauses.
The recommended bids are derrived from the last 7 days of winning auction bids for the related targeting clause.
body: SDTargetingBidRecommendationsRequestV32 | REQUIRED {'description': 'A list of products to tailor bid recommendations for category and audience based targeting clauses.}'
| '**products**': *SDGoalProduct*, {'description': 'A list of products for which to get targeting recommendations. minItems: 1, maxItems: 10000'}
| '**bidOptimization**': *SDBidOptimizationV32 string*, {'description': 'Determines what the recommended bids will be optimized for. Note that "reach" for bid optimization is not currently supported. This note will be removed when these operations become available.', 'Enum': '[ clicks, conversions, reach ]'}
| '**costType**': *SDCostTypeV31 string*, {'description': 'Determines what performance metric the bid recommendations will be optimized for. Note that "vcpm" for cost type is not currently supported. This note will be removed when these operations become available.', 'Enum': '[ cpc, vcpm ]'}
| '**targetingClauses**': SDTargetingClauseV31
type: object
description: A list of targeting clauses to receive bid recommendations for.
| '**expressionType**': *string*, {'description': 'Tactic T00020 ad groups only allow manual targeting.', 'Enum': '[ manual, auto ]'}
| '**expression**': SDTargetingExpressionV31
type: object
description: The targeting expression to match against.
oneOf->
TargetingPredicate:
type: object
description: A predicate to match against in the Targeting Expression (only applicable to Product targeting - T00020).
* All IDs passed for category and brand-targeting predicates must be valid IDs in the Amazon Advertising browse system.
* Brand, price, and review predicates are optional and may only be specified if category is also specified.
* Review predicates accept numbers between 0 and 5 and are inclusive.
* When using either of the 'between' strings to construct a targeting expression the format of the string is 'double-double' where the first double must be smaller than the second double. Prices are not inclusive.
| '**type**': *string*, {'enum': '[ asinSameAs, asinCategorySameAs, asinBrandSameAs, asinPriceBetween, asinPriceGreaterThan, asinPriceLessThan, asinReviewRatingLessThan, asinReviewRatingGreaterThan, asinReviewRatingBetween, asinIsPrimeShippingEligible, asinAgeRangeSameAs, asinGenreSameAs ]'}
| '**value**': *string*, {'description': 'The value to be targeted. example: B0123456789'}
TargetingPredicateNested:
type: object
description: A behavioral event and list of targeting predicates that represents an Audience to target (only applicable to Audience targeting - T00030).
* For manual ASIN-grain targeting, the value array must contain only, 'exactProduct', 'similarProduct', 'releatedProduct' and 'lookback' TargetingPredicateBase components. The 'lookback' is mandatory and the value should be set to '7', '14', '30', '60', '90', '180' or '365'
* For manual Category-grain targeting, the value array must contain a 'lookback' and 'asinCategorySameAs' TargetingPredicateBase component, which can be further refined with optional brand, price, star-rating and shipping eligibility refinements. The 'lookback' is mandatory and the value should be set to '7', '14', '30', '60', '90', '180' or '365'
* For manual Category-grain targeting, the value array must contain a 'lookback' and 'asinCategorySameAs' TargetingPredicateBase component, which can be further refined with optional brand, price, star-rating and shipping eligibility refinements.
* For Amazon Audiences targeting, the TargetingPredicateNested type should be set to 'audience' and the value array should include one TargetingPredicateBase component with type set to 'audienceSameAs'.
* Future For manual Category-grain targeting, adding a 'negative' TargetingPredicateBase will exclude that TargetingPredicateNested from the overall audience.
| '**type**': *string*, {'enum': '[ views, audience, purchases ]'}
| '**value**': TargetingPredicateBase
type: object
description: A predicate to match against inside the TargetingPredicateNested component (only applicable to Audience targeting - T00030).
* All IDs passed for category and brand-targeting predicates must be valid IDs in the Amazon Advertising browse system.
* Brand, price, and review predicates are optional and may only be specified if category is also specified.
* Review predicates accept numbers between 0 and 5 and are inclusive.
* When using either of the 'between' strings to construct a targeting expression the format of the string is 'double-double' where the first double must be smaller than the second double. Prices are not inclusive.
* The exactProduct, similarProduct, and negative types do not utilize the value field.
* The only type currently applicable to Amazon Audiences targeting is 'audienceSameAs'.
* A 'relatedProduct' TargetingPredicateBase will Target an audience that has purchased a related product in the past 7,14,30,60,90,180, or 365 days.
* **Future** A 'negative' TargetingPredicateBase will exclude that TargetingPredicateNested from the overall audience.
| '**type**': *string*, {'enum': '[ asinCategorySameAs, asinBrandSameAs, asinPriceBetween, asinPriceGreaterThan, asinPriceLessThan, asinReviewRatingLessThan, asinReviewRatingGreaterThan, asinReviewRatingBetween, similarProduct, exactProduct, asinIsPrimeShippingEligible, asinAgeRangeSameAs, asinGenreSameAs, audienceSameAs, lookback ]'}
| '**value**': *string*, {'description': 'The value to be targeted. example: B0123456789'}
Returns:
ApiResponse
"""
contentType = 'application/vnd.sdtargetingrecommendations.v3.2+json'
headers = {'Content-Type': contentType}
return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs, headers=headers) | 0.859266 | 0.36458 |
import torch
import torch.distributions as dist
from torch import nn
import os
from im2mesh.encoder import encoder_dict, encoder_temporal_dict
from im2mesh.oflow import models, training, generation
from im2mesh import data
def get_decoder(cfg, device, dim=3, c_dim=0, z_dim=0):
''' Returns a decoder instance.
Args:
cfg (yaml config): yaml config object
device (device): PyTorch device
dim (int): points dimension
c_dim (int): dimension of conditioned code c
z_dim (int): dimension of latent code z
'''
decoder = cfg['model']['decoder']
decoder_kwargs = cfg['model']['decoder_kwargs']
if decoder:
decoder = models.decoder_dict[decoder](
dim=dim, z_dim=z_dim, c_dim=c_dim,
**decoder_kwargs).to(device)
else:
decoder = None
return decoder
def get_velocity_field(cfg, device, dim=3, c_dim=0, z_dim=0):
''' Returns a velocity field instance.
Args:
cfg (yaml config): yaml config object
device (device): PyTorch device
dim (int): points dimension
c_dim (int): dimension of conditioned code c
z_dim (int): dimension of latent code z
'''
velocity_field = cfg['model']['velocity_field']
velocity_field_kwargs = cfg['model']['velocity_field_kwargs']
if velocity_field:
velocity_field = models.velocity_field_dict[velocity_field](
out_dim=dim, z_dim=z_dim,
c_dim=c_dim, **velocity_field_kwargs
).to(device)
else:
velocity_field = None
return velocity_field
def get_encoder(cfg, device, dataset=None, c_dim=0):
''' Returns an encoder instance.
If input type if 'idx', the encoder consists of an embedding layer.
Args:
cfg (yaml config): yaml config object
device (device): PyTorch device
dataset (dataset): dataset
c_dim (int): dimension of conditioned code c
'''
encoder = cfg['model']['encoder']
encoder_kwargs = cfg['model']['encoder_kwargs']
if encoder == 'idx':
if cfg['model']['learn_embedding']:
encoder = nn.Sequential(
nn.Embedding(len(dataset), 128),
nn.Linear(128, c_dim)).to(device)
else:
encoder = nn.Embedding(len(dataset), c_dim).to(device)
elif encoder is not None:
encoder = encoder_dict[encoder](
c_dim=c_dim, **encoder_kwargs).to(device)
else:
encoder = None
return encoder
def get_encoder_latent_temporal(cfg, device, c_dim=0, z_dim=0):
''' Returns a latent encoder instance.
Args:
cfg (yaml config): yaml config object
device (device): PyTorch device
c_dim (int): dimension of conditioned code c
z_dim (int): dimension of latent code z
'''
encoder_latent_temporal_kwargs = \
cfg['model']['encoder_latent_temporal_kwargs']
encoder_latent_temporal = cfg['model']['encoder_latent_temporal']
if encoder_latent_temporal:
encoder_latent_temporal = \
models.encoder_latent_dict[encoder_latent_temporal](
z_dim=z_dim, c_dim=c_dim,
**encoder_latent_temporal_kwargs).to(device)
else:
encoder_latent_temporal = None
return encoder_latent_temporal
def get_encoder_latent(cfg, device, c_dim=0, z_dim=0):
''' Returns a latent encoder instance.
Args:
cfg (yaml config): yaml config object
device (device): PyTorch device
c_dim (int): dimension of conditioned code c
z_dim (int): dimension of latent code z
'''
encoder_latent_kwargs = cfg['model']['encoder_latent_kwargs']
encoder_latent = cfg['model']['encoder_latent']
if encoder_latent:
encoder_latent = models.encoder_latent_dict[encoder_latent](
z_dim=z_dim, c_dim=c_dim,
**encoder_latent_kwargs
).to(device)
else:
encoder_latent = None
return encoder_latent
def get_encoder_temporal(cfg, device, dataset=None, c_dim=0, z_dim=0):
''' Returns a temporal encoder instance.
Args:
cfg (yaml config): yaml config object
device (device): PyTorch device
c_dim (int): dimension of conditioned code c
z_dim (int): dimension of latent code z
'''
encoder_temporal = cfg['model']['encoder_temporal']
encoder_temporal_kwargs = cfg['model']['encoder_temporal_kwargs']
if encoder_temporal:
if encoder_temporal == 'idx':
if cfg['model']['learn_embedding']:
encoder_temporal = nn.Sequential(
nn.Embedding(len(dataset), 128),
nn.Linear(128, c_dim)).to(device)
else:
encoder_temporal = nn.Embedding(len(dataset), c_dim).to(device)
else:
encoder_temporal = encoder_temporal_dict[encoder_temporal](
c_dim=c_dim, **encoder_temporal_kwargs).to(device)
else:
encoder_temporal = None
return encoder_temporal
def get_model(cfg, device=None, dataset=None, **kwargs):
''' Returns an OFlow model instance.
Depending on the experimental setup, it consists of encoders,
latent encoders, a velocity field, and a decoder instance.
Args:
cfg (yaml config): yaml config object
device (device): PyTorch device
c_dim (int): dimension of conditioned code c
z_dim (int): dimension of latent code z
'''
# Shortcuts
dim = cfg['data']['dim']
z_dim = cfg['model']['z_dim']
c_dim = cfg['model']['c_dim']
input_type = cfg['data']['input_type']
ode_solver = cfg['model']['ode_solver']
ode_step_size = cfg['model']['ode_step_size']
use_adjoint = cfg['model']['use_adjoint']
rtol = cfg['model']['rtol']
atol = cfg['model']['atol']
# Get individual components
decoder = get_decoder(cfg, device, dim, c_dim, z_dim)
velocity_field = get_velocity_field(cfg, device, dim, c_dim, z_dim)
encoder = get_encoder(cfg, device, dataset, c_dim)
encoder_latent = get_encoder_latent(cfg, device, c_dim, z_dim)
encoder_latent_temporal = get_encoder_latent_temporal(
cfg, device, c_dim, z_dim)
encoder_temporal = get_encoder_temporal(cfg, device, dataset, c_dim, z_dim)
p0_z = get_prior_z(cfg, device)
# Get full OFlow model
model = models.OccupancyFlow(
decoder=decoder, encoder=encoder, encoder_latent=encoder_latent,
encoder_latent_temporal=encoder_latent_temporal,
encoder_temporal=encoder_temporal, vector_field=velocity_field,
ode_step_size=ode_step_size, use_adjoint=use_adjoint,
rtol=rtol, atol=atol, ode_solver=ode_solver,
p0_z=p0_z, device=device, input_type=input_type)
return model
def get_trainer(model, optimizer, cfg, device, **kwargs):
''' Returns an OFlow trainer instance.
Args:
model (nn.Module): OFlow model
optimizer (optimizer): PyTorch optimizer
cfg (yaml config): yaml config object
device (device): PyTorch device
'''
threshold = cfg['test']['threshold']
out_dir = cfg['training']['out_dir']
vis_dir = os.path.join(out_dir, 'vis')
input_type = cfg['data']['input_type']
loss_corr = cfg['model']['loss_corr']
loss_recon = cfg['model']['loss_recon']
loss_corr_bw = cfg['model']['loss_corr_bw']
eval_sample = cfg['training']['eval_sample']
vae_beta = cfg['model']['vae_beta']
trainer = training.Trainer(
model, optimizer, device=device, input_type=input_type,
vis_dir=vis_dir, threshold=threshold,
eval_sample=eval_sample, loss_corr=loss_corr,
loss_recon=loss_recon, loss_corr_bw=loss_corr_bw,
vae_beta=vae_beta)
return trainer
def get_generator(model, cfg, device, **kwargs):
''' Returns an OFlow generator instance.
It provides methods to extract the final meshes from the OFlow
representation.
Args:
model (nn.Module): OFlow model
cfg (yaml config): yaml config object
device (device): PyTorch device
'''
generator = generation.Generator3D(
model,
device=device,
threshold=cfg['test']['threshold'],
resolution0=cfg['generation']['resolution_0'],
upsampling_steps=cfg['generation']['upsampling_steps'],
padding=cfg['generation']['padding'],
sample=cfg['generation']['use_sampling'],
refinement_step=cfg['generation']['refinement_step'],
simplify_nfaces=cfg['generation']['simplify_nfaces'],
n_time_steps=cfg['generation']['n_time_steps'],
mesh_color=cfg['generation']['mesh_color'],
only_end_time_points=cfg['generation']['only_end_time_points'],
interpolate=cfg['generation']['interpolate'],
fix_z=cfg['generation']['fix_z'],
fix_zt=cfg['generation']['fix_zt'],
)
return generator
def get_prior_z(cfg, device, **kwargs):
''' Returns the prior distribution of latent code z.
Args:
cfg (yaml config): yaml config object
device (device): PyTorch device
'''
z_dim = cfg['model']['z_dim']
p0_z = dist.Normal(
torch.zeros(z_dim, device=device),
torch.ones(z_dim, device=device)
)
return p0_z
def get_transforms(cfg):
''' Returns transform objects.
Args:
cfg (yaml config): yaml config object
'''
n_pcl = cfg['data']['n_training_pcl_points']
n_pt = cfg['data']['n_training_points']
n_pt_eval = cfg['training']['n_eval_points']
transf_pt = data.SubsamplePoints(n_pt)
transf_pt_val = data.SubsamplePointsSeq(n_pt_eval, random=False)
transf_pcl_val = data.SubsamplePointcloudSeq(n_pt_eval, random=False)
transf_pcl = data.SubsamplePointcloudSeq(n_pcl, connected_samples=True)
return transf_pt, transf_pt_val, transf_pcl, transf_pcl_val
def get_data_fields(mode, cfg):
''' Returns data fields.
Args:
mode (str): mode (train|val|test)
cfg (yaml config): yaml config object
'''
fields = {}
seq_len = cfg['data']['length_sequence']
p_folder = cfg['data']['points_iou_seq_folder']
pcl_folder = cfg['data']['pointcloud_seq_folder']
mesh_folder = cfg['data']['mesh_seq_folder']
generate_interpolate = cfg['generation']['interpolate']
correspondence = cfg['generation']['correspondence']
unpackbits = cfg['data']['points_unpackbits']
# Transformation
transf_pt, transf_pt_val, transf_pcl, transf_pcl_val = get_transforms(cfg)
# Fields
pts_iou_field = data.PointsSubseqField
pts_corr_field = data.PointCloudSubseqField
if mode == 'train':
if cfg['model']['loss_recon']:
fields['points'] = pts_iou_field(p_folder, transform=transf_pt,
seq_len=seq_len,
fixed_time_step=0,
unpackbits=unpackbits)
fields['points_t'] = pts_iou_field(p_folder,
transform=transf_pt,
seq_len=seq_len,
unpackbits=unpackbits)
# Connectivity Loss:
if cfg['model']['loss_corr']:
fields['pointcloud'] = pts_corr_field(pcl_folder,
transform=transf_pcl,
seq_len=seq_len)
elif mode == 'val':
fields['points'] = pts_iou_field(p_folder, transform=transf_pt_val,
all_steps=True, seq_len=seq_len,
unpackbits=unpackbits)
fields['points_mesh'] = pts_corr_field(pcl_folder,
transform=transf_pcl_val,
seq_len=seq_len)
elif mode == 'test' and (generate_interpolate or correspondence):
fields['mesh'] = data.MeshSubseqField(mesh_folder, seq_len=seq_len,
only_end_points=True)
return fields | im2mesh/oflow/config.py | import torch
import torch.distributions as dist
from torch import nn
import os
from im2mesh.encoder import encoder_dict, encoder_temporal_dict
from im2mesh.oflow import models, training, generation
from im2mesh import data
def get_decoder(cfg, device, dim=3, c_dim=0, z_dim=0):
''' Returns a decoder instance.
Args:
cfg (yaml config): yaml config object
device (device): PyTorch device
dim (int): points dimension
c_dim (int): dimension of conditioned code c
z_dim (int): dimension of latent code z
'''
decoder = cfg['model']['decoder']
decoder_kwargs = cfg['model']['decoder_kwargs']
if decoder:
decoder = models.decoder_dict[decoder](
dim=dim, z_dim=z_dim, c_dim=c_dim,
**decoder_kwargs).to(device)
else:
decoder = None
return decoder
def get_velocity_field(cfg, device, dim=3, c_dim=0, z_dim=0):
''' Returns a velocity field instance.
Args:
cfg (yaml config): yaml config object
device (device): PyTorch device
dim (int): points dimension
c_dim (int): dimension of conditioned code c
z_dim (int): dimension of latent code z
'''
velocity_field = cfg['model']['velocity_field']
velocity_field_kwargs = cfg['model']['velocity_field_kwargs']
if velocity_field:
velocity_field = models.velocity_field_dict[velocity_field](
out_dim=dim, z_dim=z_dim,
c_dim=c_dim, **velocity_field_kwargs
).to(device)
else:
velocity_field = None
return velocity_field
def get_encoder(cfg, device, dataset=None, c_dim=0):
''' Returns an encoder instance.
If input type if 'idx', the encoder consists of an embedding layer.
Args:
cfg (yaml config): yaml config object
device (device): PyTorch device
dataset (dataset): dataset
c_dim (int): dimension of conditioned code c
'''
encoder = cfg['model']['encoder']
encoder_kwargs = cfg['model']['encoder_kwargs']
if encoder == 'idx':
if cfg['model']['learn_embedding']:
encoder = nn.Sequential(
nn.Embedding(len(dataset), 128),
nn.Linear(128, c_dim)).to(device)
else:
encoder = nn.Embedding(len(dataset), c_dim).to(device)
elif encoder is not None:
encoder = encoder_dict[encoder](
c_dim=c_dim, **encoder_kwargs).to(device)
else:
encoder = None
return encoder
def get_encoder_latent_temporal(cfg, device, c_dim=0, z_dim=0):
''' Returns a latent encoder instance.
Args:
cfg (yaml config): yaml config object
device (device): PyTorch device
c_dim (int): dimension of conditioned code c
z_dim (int): dimension of latent code z
'''
encoder_latent_temporal_kwargs = \
cfg['model']['encoder_latent_temporal_kwargs']
encoder_latent_temporal = cfg['model']['encoder_latent_temporal']
if encoder_latent_temporal:
encoder_latent_temporal = \
models.encoder_latent_dict[encoder_latent_temporal](
z_dim=z_dim, c_dim=c_dim,
**encoder_latent_temporal_kwargs).to(device)
else:
encoder_latent_temporal = None
return encoder_latent_temporal
def get_encoder_latent(cfg, device, c_dim=0, z_dim=0):
''' Returns a latent encoder instance.
Args:
cfg (yaml config): yaml config object
device (device): PyTorch device
c_dim (int): dimension of conditioned code c
z_dim (int): dimension of latent code z
'''
encoder_latent_kwargs = cfg['model']['encoder_latent_kwargs']
encoder_latent = cfg['model']['encoder_latent']
if encoder_latent:
encoder_latent = models.encoder_latent_dict[encoder_latent](
z_dim=z_dim, c_dim=c_dim,
**encoder_latent_kwargs
).to(device)
else:
encoder_latent = None
return encoder_latent
def get_encoder_temporal(cfg, device, dataset=None, c_dim=0, z_dim=0):
''' Returns a temporal encoder instance.
Args:
cfg (yaml config): yaml config object
device (device): PyTorch device
c_dim (int): dimension of conditioned code c
z_dim (int): dimension of latent code z
'''
encoder_temporal = cfg['model']['encoder_temporal']
encoder_temporal_kwargs = cfg['model']['encoder_temporal_kwargs']
if encoder_temporal:
if encoder_temporal == 'idx':
if cfg['model']['learn_embedding']:
encoder_temporal = nn.Sequential(
nn.Embedding(len(dataset), 128),
nn.Linear(128, c_dim)).to(device)
else:
encoder_temporal = nn.Embedding(len(dataset), c_dim).to(device)
else:
encoder_temporal = encoder_temporal_dict[encoder_temporal](
c_dim=c_dim, **encoder_temporal_kwargs).to(device)
else:
encoder_temporal = None
return encoder_temporal
def get_model(cfg, device=None, dataset=None, **kwargs):
''' Returns an OFlow model instance.
Depending on the experimental setup, it consists of encoders,
latent encoders, a velocity field, and a decoder instance.
Args:
cfg (yaml config): yaml config object
device (device): PyTorch device
c_dim (int): dimension of conditioned code c
z_dim (int): dimension of latent code z
'''
# Shortcuts
dim = cfg['data']['dim']
z_dim = cfg['model']['z_dim']
c_dim = cfg['model']['c_dim']
input_type = cfg['data']['input_type']
ode_solver = cfg['model']['ode_solver']
ode_step_size = cfg['model']['ode_step_size']
use_adjoint = cfg['model']['use_adjoint']
rtol = cfg['model']['rtol']
atol = cfg['model']['atol']
# Get individual components
decoder = get_decoder(cfg, device, dim, c_dim, z_dim)
velocity_field = get_velocity_field(cfg, device, dim, c_dim, z_dim)
encoder = get_encoder(cfg, device, dataset, c_dim)
encoder_latent = get_encoder_latent(cfg, device, c_dim, z_dim)
encoder_latent_temporal = get_encoder_latent_temporal(
cfg, device, c_dim, z_dim)
encoder_temporal = get_encoder_temporal(cfg, device, dataset, c_dim, z_dim)
p0_z = get_prior_z(cfg, device)
# Get full OFlow model
model = models.OccupancyFlow(
decoder=decoder, encoder=encoder, encoder_latent=encoder_latent,
encoder_latent_temporal=encoder_latent_temporal,
encoder_temporal=encoder_temporal, vector_field=velocity_field,
ode_step_size=ode_step_size, use_adjoint=use_adjoint,
rtol=rtol, atol=atol, ode_solver=ode_solver,
p0_z=p0_z, device=device, input_type=input_type)
return model
def get_trainer(model, optimizer, cfg, device, **kwargs):
''' Returns an OFlow trainer instance.
Args:
model (nn.Module): OFlow model
optimizer (optimizer): PyTorch optimizer
cfg (yaml config): yaml config object
device (device): PyTorch device
'''
threshold = cfg['test']['threshold']
out_dir = cfg['training']['out_dir']
vis_dir = os.path.join(out_dir, 'vis')
input_type = cfg['data']['input_type']
loss_corr = cfg['model']['loss_corr']
loss_recon = cfg['model']['loss_recon']
loss_corr_bw = cfg['model']['loss_corr_bw']
eval_sample = cfg['training']['eval_sample']
vae_beta = cfg['model']['vae_beta']
trainer = training.Trainer(
model, optimizer, device=device, input_type=input_type,
vis_dir=vis_dir, threshold=threshold,
eval_sample=eval_sample, loss_corr=loss_corr,
loss_recon=loss_recon, loss_corr_bw=loss_corr_bw,
vae_beta=vae_beta)
return trainer
def get_generator(model, cfg, device, **kwargs):
''' Returns an OFlow generator instance.
It provides methods to extract the final meshes from the OFlow
representation.
Args:
model (nn.Module): OFlow model
cfg (yaml config): yaml config object
device (device): PyTorch device
'''
generator = generation.Generator3D(
model,
device=device,
threshold=cfg['test']['threshold'],
resolution0=cfg['generation']['resolution_0'],
upsampling_steps=cfg['generation']['upsampling_steps'],
padding=cfg['generation']['padding'],
sample=cfg['generation']['use_sampling'],
refinement_step=cfg['generation']['refinement_step'],
simplify_nfaces=cfg['generation']['simplify_nfaces'],
n_time_steps=cfg['generation']['n_time_steps'],
mesh_color=cfg['generation']['mesh_color'],
only_end_time_points=cfg['generation']['only_end_time_points'],
interpolate=cfg['generation']['interpolate'],
fix_z=cfg['generation']['fix_z'],
fix_zt=cfg['generation']['fix_zt'],
)
return generator
def get_prior_z(cfg, device, **kwargs):
''' Returns the prior distribution of latent code z.
Args:
cfg (yaml config): yaml config object
device (device): PyTorch device
'''
z_dim = cfg['model']['z_dim']
p0_z = dist.Normal(
torch.zeros(z_dim, device=device),
torch.ones(z_dim, device=device)
)
return p0_z
def get_transforms(cfg):
''' Returns transform objects.
Args:
cfg (yaml config): yaml config object
'''
n_pcl = cfg['data']['n_training_pcl_points']
n_pt = cfg['data']['n_training_points']
n_pt_eval = cfg['training']['n_eval_points']
transf_pt = data.SubsamplePoints(n_pt)
transf_pt_val = data.SubsamplePointsSeq(n_pt_eval, random=False)
transf_pcl_val = data.SubsamplePointcloudSeq(n_pt_eval, random=False)
transf_pcl = data.SubsamplePointcloudSeq(n_pcl, connected_samples=True)
return transf_pt, transf_pt_val, transf_pcl, transf_pcl_val
def get_data_fields(mode, cfg):
''' Returns data fields.
Args:
mode (str): mode (train|val|test)
cfg (yaml config): yaml config object
'''
fields = {}
seq_len = cfg['data']['length_sequence']
p_folder = cfg['data']['points_iou_seq_folder']
pcl_folder = cfg['data']['pointcloud_seq_folder']
mesh_folder = cfg['data']['mesh_seq_folder']
generate_interpolate = cfg['generation']['interpolate']
correspondence = cfg['generation']['correspondence']
unpackbits = cfg['data']['points_unpackbits']
# Transformation
transf_pt, transf_pt_val, transf_pcl, transf_pcl_val = get_transforms(cfg)
# Fields
pts_iou_field = data.PointsSubseqField
pts_corr_field = data.PointCloudSubseqField
if mode == 'train':
if cfg['model']['loss_recon']:
fields['points'] = pts_iou_field(p_folder, transform=transf_pt,
seq_len=seq_len,
fixed_time_step=0,
unpackbits=unpackbits)
fields['points_t'] = pts_iou_field(p_folder,
transform=transf_pt,
seq_len=seq_len,
unpackbits=unpackbits)
# Connectivity Loss:
if cfg['model']['loss_corr']:
fields['pointcloud'] = pts_corr_field(pcl_folder,
transform=transf_pcl,
seq_len=seq_len)
elif mode == 'val':
fields['points'] = pts_iou_field(p_folder, transform=transf_pt_val,
all_steps=True, seq_len=seq_len,
unpackbits=unpackbits)
fields['points_mesh'] = pts_corr_field(pcl_folder,
transform=transf_pcl_val,
seq_len=seq_len)
elif mode == 'test' and (generate_interpolate or correspondence):
fields['mesh'] = data.MeshSubseqField(mesh_folder, seq_len=seq_len,
only_end_points=True)
return fields | 0.957147 | 0.452536 |
import os
import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import datasets
from torchvision import transforms as pth_transforms
from torchvision.transforms import InterpolationMode
import utils
import vision_transformer as vits
def eval_or_predict_knn(args, k_list, temperature=0.07, eval=True, use_cuda=True):
cudnn.benchmark = True
# ============ preparing data ... ============
transform = pth_transforms.Compose([
pth_transforms.Resize(256, interpolation=InterpolationMode.BICUBIC),
pth_transforms.CenterCrop(224),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_train = ReturnIndexDataset(os.path.join(args.data_path, "train/labelled"), transform=transform)
dataset_val = ReturnIndexDataset(os.path.join(args.data_path, "val" if eval else "test"), transform=transform)
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
batch_size=args.batch_size_per_gpu * 16,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size_per_gpu * 16,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val/test imgs.")
# ============ preparing model ... ============
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.")
model.cuda()
if os.path.exists(os.path.join(args.output_dir, 'checkpoint.pth')):
pretrained_weights = os.path.join(args.output_dir, 'checkpoint.pth')
elif os.path.exists(args.pretrained_weights):
pretrained_weights = args.pretrained_weights
else:
print('No pretrained weights found, cancelling KNN an returning')
return None, None, None
utils.load_pretrained_weights(model, pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)
model.eval()
# ============ extract features ... ============
print("Extracting features for train set...")
train_features = extract_features(model, data_loader_train)
print("Extracting features for val set...")
test_features = extract_features(model, data_loader_val)
train_features = nn.functional.normalize(train_features, dim=1, p=2)
test_features = nn.functional.normalize(test_features, dim=1, p=2)
train_labels = torch.tensor([s[-1] for s in dataset_train.samples]).long()
test_labels = torch.tensor([s[-1] for s in dataset_val.samples]).long()
if use_cuda:
train_features = train_features.cuda()
test_features = test_features.cuda()
train_labels = train_labels.cuda()
test_labels = test_labels.cuda()
print("Features are ready!\nStart the k-NN classification.")
num_classes = len(dataset_train.classes)
top1_list, top5_list, preds_list = [], [], []
for k in k_list:
top1, top5, preds = knn_classifier(train_features, train_labels,
test_features, test_labels, k, temperature, num_classes)
print(f"{k}-NN classifier result: Top1: {top1}, Top5: {top5}")
top1_list.append(top1)
top5_list.append(top5)
preds_list.append(preds)
return top1_list, top5_list, preds_list
@torch.no_grad()
def extract_features(model, data_loader, use_cuda=False):
metric_logger = utils.MetricLogger(delimiter=" ")
features = None
for samples, index in metric_logger.log_every(data_loader, 20):
samples = samples.cuda(non_blocking=True)
index = index.cuda(non_blocking=True)
feats = model(samples).clone()
# init storage feature matrix
if features is None:
features = torch.zeros(len(data_loader.dataset), feats.shape[-1])
if use_cuda:
features = features.cuda(non_blocking=True)
print(f"Storing features into tensor of shape {features.shape}")
if use_cuda:
features.index_copy_(0, index, feats)
else:
features.index_copy_(0, index.cpu(), feats.cpu())
return features
@torch.no_grad()
def knn_classifier(train_features, train_labels, test_features, test_labels, k, T, num_classes):
top1, top5, total = 0.0, 0.0, 0
train_features = train_features.t()
num_test_images, num_chunks = test_labels.shape[0], 100
imgs_per_chunk = num_test_images // num_chunks
retrieval_one_hot = torch.zeros(k, num_classes).cuda()
for idx in range(0, num_test_images, imgs_per_chunk):
# get the features for test images
features = test_features[
idx : min((idx + imgs_per_chunk), num_test_images), :
]
targets = test_labels[idx : min((idx + imgs_per_chunk), num_test_images)]
batch_size = targets.shape[0]
# calculate the dot product and compute top-k neighbors
similarity = torch.mm(features, train_features)
distances, indices = similarity.topk(k, largest=True, sorted=True)
candidates = train_labels.view(1, -1).expand(batch_size, -1)
retrieved_neighbors = torch.gather(candidates, 1, indices)
retrieval_one_hot.resize_(batch_size * k, num_classes).zero_()
retrieval_one_hot.scatter_(1, retrieved_neighbors.view(-1, 1), 1)
distances_transform = distances.clone().div_(T).exp_()
probs = torch.sum(
torch.mul(
retrieval_one_hot.view(batch_size, -1, num_classes),
distances_transform.view(batch_size, -1, 1),
),
1,
)
_, predictions = probs.sort(1, True)
# find the predictions that match the target
correct = predictions.eq(targets.data.view(-1, 1))
top1 = top1 + correct.narrow(1, 0, 1).sum().item()
if num_classes >=5:
top5 = top5 + correct.narrow(1, 0, 5).sum().item()
else:
top5 = 0
total += targets.size(0)
top1 = top1 * 100.0 / total
top5 = top5 * 100.0 / total
return top1, top5, predictions
class ReturnIndexDataset(datasets.ImageFolder):
def __getitem__(self, idx):
img, lab = super(ReturnIndexDataset, self).__getitem__(idx)
return img, idx | eval_or_predict_knn.py | import os
import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import datasets
from torchvision import transforms as pth_transforms
from torchvision.transforms import InterpolationMode
import utils
import vision_transformer as vits
def eval_or_predict_knn(args, k_list, temperature=0.07, eval=True, use_cuda=True):
cudnn.benchmark = True
# ============ preparing data ... ============
transform = pth_transforms.Compose([
pth_transforms.Resize(256, interpolation=InterpolationMode.BICUBIC),
pth_transforms.CenterCrop(224),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_train = ReturnIndexDataset(os.path.join(args.data_path, "train/labelled"), transform=transform)
dataset_val = ReturnIndexDataset(os.path.join(args.data_path, "val" if eval else "test"), transform=transform)
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
batch_size=args.batch_size_per_gpu * 16,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size_per_gpu * 16,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val/test imgs.")
# ============ preparing model ... ============
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.")
model.cuda()
if os.path.exists(os.path.join(args.output_dir, 'checkpoint.pth')):
pretrained_weights = os.path.join(args.output_dir, 'checkpoint.pth')
elif os.path.exists(args.pretrained_weights):
pretrained_weights = args.pretrained_weights
else:
print('No pretrained weights found, cancelling KNN an returning')
return None, None, None
utils.load_pretrained_weights(model, pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)
model.eval()
# ============ extract features ... ============
print("Extracting features for train set...")
train_features = extract_features(model, data_loader_train)
print("Extracting features for val set...")
test_features = extract_features(model, data_loader_val)
train_features = nn.functional.normalize(train_features, dim=1, p=2)
test_features = nn.functional.normalize(test_features, dim=1, p=2)
train_labels = torch.tensor([s[-1] for s in dataset_train.samples]).long()
test_labels = torch.tensor([s[-1] for s in dataset_val.samples]).long()
if use_cuda:
train_features = train_features.cuda()
test_features = test_features.cuda()
train_labels = train_labels.cuda()
test_labels = test_labels.cuda()
print("Features are ready!\nStart the k-NN classification.")
num_classes = len(dataset_train.classes)
top1_list, top5_list, preds_list = [], [], []
for k in k_list:
top1, top5, preds = knn_classifier(train_features, train_labels,
test_features, test_labels, k, temperature, num_classes)
print(f"{k}-NN classifier result: Top1: {top1}, Top5: {top5}")
top1_list.append(top1)
top5_list.append(top5)
preds_list.append(preds)
return top1_list, top5_list, preds_list
@torch.no_grad()
def extract_features(model, data_loader, use_cuda=False):
metric_logger = utils.MetricLogger(delimiter=" ")
features = None
for samples, index in metric_logger.log_every(data_loader, 20):
samples = samples.cuda(non_blocking=True)
index = index.cuda(non_blocking=True)
feats = model(samples).clone()
# init storage feature matrix
if features is None:
features = torch.zeros(len(data_loader.dataset), feats.shape[-1])
if use_cuda:
features = features.cuda(non_blocking=True)
print(f"Storing features into tensor of shape {features.shape}")
if use_cuda:
features.index_copy_(0, index, feats)
else:
features.index_copy_(0, index.cpu(), feats.cpu())
return features
@torch.no_grad()
def knn_classifier(train_features, train_labels, test_features, test_labels, k, T, num_classes):
top1, top5, total = 0.0, 0.0, 0
train_features = train_features.t()
num_test_images, num_chunks = test_labels.shape[0], 100
imgs_per_chunk = num_test_images // num_chunks
retrieval_one_hot = torch.zeros(k, num_classes).cuda()
for idx in range(0, num_test_images, imgs_per_chunk):
# get the features for test images
features = test_features[
idx : min((idx + imgs_per_chunk), num_test_images), :
]
targets = test_labels[idx : min((idx + imgs_per_chunk), num_test_images)]
batch_size = targets.shape[0]
# calculate the dot product and compute top-k neighbors
similarity = torch.mm(features, train_features)
distances, indices = similarity.topk(k, largest=True, sorted=True)
candidates = train_labels.view(1, -1).expand(batch_size, -1)
retrieved_neighbors = torch.gather(candidates, 1, indices)
retrieval_one_hot.resize_(batch_size * k, num_classes).zero_()
retrieval_one_hot.scatter_(1, retrieved_neighbors.view(-1, 1), 1)
distances_transform = distances.clone().div_(T).exp_()
probs = torch.sum(
torch.mul(
retrieval_one_hot.view(batch_size, -1, num_classes),
distances_transform.view(batch_size, -1, 1),
),
1,
)
_, predictions = probs.sort(1, True)
# find the predictions that match the target
correct = predictions.eq(targets.data.view(-1, 1))
top1 = top1 + correct.narrow(1, 0, 1).sum().item()
if num_classes >=5:
top5 = top5 + correct.narrow(1, 0, 5).sum().item()
else:
top5 = 0
total += targets.size(0)
top1 = top1 * 100.0 / total
top5 = top5 * 100.0 / total
return top1, top5, predictions
class ReturnIndexDataset(datasets.ImageFolder):
def __getitem__(self, idx):
img, lab = super(ReturnIndexDataset, self).__getitem__(idx)
return img, idx | 0.605099 | 0.434101 |
import numpy as np
from ..base import TransformerMixin
from ..utils.validation import check_is_fitted
from scipy.sparse import issparse
###############################################################################
# Mixin class for feature agglomeration.
class AgglomerationTransform(TransformerMixin):
"""
A class for feature agglomeration via the transform interface.
"""
def transform(self, X):
"""
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
if self.pooling_func == np.mean and not issparse(X):
size = np.bincount(self.labels_)
n_samples = X.shape[0]
# a fast way to compute the mean of grouped features
nX = np.array(
[np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
)
else:
nX = [
self.pooling_func(X[:, self.labels_ == l], axis=1)
for l in np.unique(self.labels_)
]
nX = np.array(nX).T
return nX
def inverse_transform(self, Xred):
"""
Inverse the transformation and return a vector of size `n_features`.
Parameters
----------
Xred : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
Returns
-------
X : ndarray of shape (n_samples, n_features) or (n_features,)
A vector of size `n_samples` with the values of `Xred` assigned to
each of the cluster of samples.
"""
check_is_fitted(self)
unil, inverse = np.unique(self.labels_, return_inverse=True)
return Xred[..., inverse] | venv/Lib/site-packages/sklearn/cluster/_feature_agglomeration.py |
import numpy as np
from ..base import TransformerMixin
from ..utils.validation import check_is_fitted
from scipy.sparse import issparse
###############################################################################
# Mixin class for feature agglomeration.
class AgglomerationTransform(TransformerMixin):
"""
A class for feature agglomeration via the transform interface.
"""
def transform(self, X):
"""
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
if self.pooling_func == np.mean and not issparse(X):
size = np.bincount(self.labels_)
n_samples = X.shape[0]
# a fast way to compute the mean of grouped features
nX = np.array(
[np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
)
else:
nX = [
self.pooling_func(X[:, self.labels_ == l], axis=1)
for l in np.unique(self.labels_)
]
nX = np.array(nX).T
return nX
def inverse_transform(self, Xred):
"""
Inverse the transformation and return a vector of size `n_features`.
Parameters
----------
Xred : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
Returns
-------
X : ndarray of shape (n_samples, n_features) or (n_features,)
A vector of size `n_samples` with the values of `Xred` assigned to
each of the cluster of samples.
"""
check_is_fitted(self)
unil, inverse = np.unique(self.labels_, return_inverse=True)
return Xred[..., inverse] | 0.855021 | 0.46308 |