code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import json
import numpy
from RecomandEngine.restrictions.RestrictionConflicts import RestrictionConflict, RestrictionAlphaOrBeta
from RecomandEngine.restrictions.RestrictionDependences import RestrictionOneToOneDependency, \
RestrictionOneToManyDependency, RestrictionManyToManyDependency, RestrictionManyToManyDependencyNew
from RecomandEngine.restrictions.RestrictionNumberOfInstances import RestrictionUpperLowerEqualBound, \
RestrictionRangeBound, RestrictionFullDeployment, RestrictionRequireProvideDependency
from RecomandEngine.restrictions.RestrictionHardware import RestrictionHardware
from RecomandEngine.problem.Component import Component
import logging.config
import sys
class ManeuverProblem:
def __init__(self):
self.VM_MaxLoad = 20
self.componentsList = {} # list of components
self.restrictionsList = [] # list of restrictions
self.IpInfo = {} # list with other information
self.applicationName = None
logging.config.fileConfig('loggerConfig.conf')
self.logger = logging.getLogger("maneuverApp")
self.priceOffersFile = None
self.nrComp = 0
self.nrVM = 0
def init(self, nr_vm, nr_comp):# used at initilization for test instances
self.nrVM = nr_vm
self.nrComp = nr_comp
self.R = numpy.zeros((self.nrComp, self.nrComp), dtype=numpy.int) #conflicts graph
self.D = numpy.zeros((self.nrComp, self.nrComp), dtype=numpy.int) #corelation hraph
def solveSMT(self, availableConfigs, smt2lib, smt2libsol, solver_type, solver, option):
"""
Solves the optimization problem using the imported SMT solver and available VMs configurations
:param self: the optimization problem
:param available_configurations: available VMs configurations
:param solver_type: the Z3 solver type (optimize/debug)
:return:
"""
if solver == "SMT_Solver_Z3_RealSymBreak" and option == "linear":
from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_RealSymBreak
SMTSolver = SMT_Solver_Z3_RealSymBreak.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self, solver_type)
if solver == "SMT_Solver_Z3_IntBool" and option == "linear":
from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_IntBool
SMTSolver = SMT_Solver_Z3_IntBool.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self, solver_type)
if solver == "SMT_Solver_Z3_FD" and option == "linear":
from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_FD
SMTSolver = SMT_Solver_Z3_FD.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self, solver_type)
if solver == "SMT_Solver_Z3_RealRealME" and option == "linear":
from RecomandEngine.exactsolvers.linear import IntIntOrSymBreaking
SMTSolver = IntIntOrSymBreaking.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self, solver_type)
if solver == "SMT_Solver_Z3_RealBool" and option == "linear":
from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_RealBool
SMTSolver = SMT_Solver_Z3_RealBool.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self, solver_type)
if solver == "SMT_Solver_Z3_RealBool" and option == "nonlinear":
from RecomandEngine.exactsolvers.nonlinear import SMT_Solver_Z3_RealBool
SMTSolver = SMT_Solver_Z3_RealBool.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self, solver_type)
if solver == "SMT_Solver_Z3_RealReal" and option == "linear":
from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_RealReal
SMTSolver = SMT_Solver_Z3_RealReal.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self, solver_type)
if solver == "SMT_Solver_Z3_RealReal" and option == "nonlinear":
from RecomandEngine.exactsolvers.nonlinear import SMT_Solver_Z3_RealReal
SMTSolver = SMT_Solver_Z3_RealReal.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self, solver_type)
if solver == "SMT_Solver_Z3_IntIntLessThan" and option == "linear":
from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_IntIntLessThan
SMTSolver = SMT_Solver_Z3_IntIntLessThan.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self, solver_type)
if solver == "SMT_Solver_Z3_IntIntLessThan" and option == "nonlinear":
from RecomandEngine.exactsolvers.nonlinear import SMT_Solver_Z3_IntIntLessThan
SMTSolver = SMT_Solver_Z3_IntIntLessThan.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self, solver_type)
if solver == "SMT_Solver_Z3_IntIntOr" and option == "linear":
from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_IntIntOr
SMTSolver = SMT_Solver_Z3_IntIntOr.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self, solver_type)
if solver == "SMT_Solver_Z3_IntIntOr" and option == "nonlinear":
from RecomandEngine.exactsolvers.nonlinear import SMT_Solver_Z3_IntIntOr
SMTSolver = SMT_Solver_Z3_IntIntOr.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self, solver_type)
if solver == "SMT_Solver_Z3_BV" and option == "linear":
from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_BV
SMTSolver = SMT_Solver_Z3_BV.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self, solver_type)
if solver == "SMT_Solver_Z3_BV" and option == "nonlinear":
from RecomandEngine.exactsolvers.nonlinear import SMT_Solver_Z3_BV
SMTSolver = SMT_Solver_Z3_BV.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self, solver_type)
if solver == "SMT_Solver_Z3_RealPBC" and option == "linear":
from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_RealPBC
SMTSolver = SMT_Solver_Z3_RealPBC.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self, solver_type)
if solver == "SMT_Solver_Z3_RealPBC" and option == "nonlinear":
sys.exit("No support for nonlinear PBC")
if solver == "SMT_Solver_Z3_IntIntOrSymBreaking" and option == "linear":
from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_IntIntOrSymBreaking
SMTSolver = SMT_Solver_Z3_IntIntOrSymBreaking.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self,
solver_type)
if solver == "SMT_Solver_Z3_RealPBCSymBreaking" and option == "linear":
from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_RealPBCSymBreaking
SMTSolver = SMT_Solver_Z3_RealPBCSymBreaking.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self,
solver_type)
if solver == "SMT_Solver_Z3_RealPBCMultiObjectives" and option == "linear":
from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_RealPBCMultiObjectives
SMTSolver = SMT_Solver_Z3_RealPBCMultiObjectives.Z3_Solver(self.nrVM, self.nrComp, availableConfigs, self, solver_type)
if solver == "SMT_Solver_Z3_RealPBCMultiObjectives" and option == "nonlinear":
sys.exit("No support for nonlinear PBC")
if SMTSolver.availableConfigurations is not None:
self.restrictionsList.append(
RestrictionHardware(self._getComponentsHardwareRestrictions(), SMTSolver.availableConfigurations, self))
for restriction in self.restrictionsList:
restriction.generateRestrictions(SMTSolver)
return SMTSolver.run(smt2lib, smt2libsol)
def findPartitionsBasedOnConflictsMatrix(self):# inspired from tarjan algorithm
visitedComponents = {}
for i in range(self.nrComp):
visitedComponents[i] = False
#print("visitedComponents", visitedComponents)
#print(self.R)
partitions = [[]]
for i in range(self.nrComp):
if visitedComponents[i]:
continue
visitedComponents[i] = True
print(i, "visitedComponents", visitedComponents)
for partition in partitions:
inConflic = False # in conflict cu cele din partitia curenta
for j in partition:
if self.R[i][j] == 1:
inConflic = True
#print("conflict direct", i, j, partition)
else: #uitatate sa nu fie in conflict cu cele colocate
for compId in self.componentsList[j].dependenceComponentsList:
if self.R[i][compId] == 1:
inConflic = True
#print("conflict dependences", i, j, partition)
break
if not inConflic:
partition.append(i)
break
else:
partitions.append([i])
#print(i, partitions)
#print("!!!!!!!!!!!!!!!", partitions)
return partitions
def solveCPNrOfInstances(self, choosing_stategy, solutions_limit):
"""
Start solving components number problem using the chosen solver and available configurations for VM
:param cpSolver: Solver choosed to solve the problem
:return:
"""
self.logger.info("Find number of needed virtual machines based on components number restrictions")
from RecomandEngine.exactsolvers.CP_Solver_Number_of_Instances import CP_Solver_Got_Nr_Instances
cpSolver = CP_Solver_Got_Nr_Instances(self, choosing_stategy, solutions_limit)
for restriction in self.restrictionsList:
restriction.generateRestrictions(cpSolver)
return cpSolver.run()
def readConfiguration(self, jsonFilePath):
"""
Open json file that contains problem configurations and fills problem data
:param jsonFilePath:
:return:
"""
with open(jsonFilePath) as json_data:
dictionary = json.load(json_data)
self.logger.info(dictionary)
self.applicationName = dictionary["application"]
for component in dictionary["components"]:
self._addComponent(component)
self.init(len(self.componentsList), len(self.componentsList))
orComponents = set()
for restriction in dictionary["restrictions"]:
for comp in self._addRestriction(restriction):
orComponents.add(comp)
self.nrComp = len(self.componentsList)
self.IpInfo = dictionary["IP"]
# add restriction OS
self.__addOperationSystemRestriction()
# add information about minimum number of instances of each component
self.__addRestrictionsComponentsNumber(orComponents)
# find minimum components number based on problem description regarding components number
self.nrVM = self.__findMinimumNumberOfInstancesForEachComponent(orComponents)
print("..........self.nrVM", self.nrVM)
# add other useful information for EA alg
# -- like number of conflicts that a component is in
# self.__addInformationForEA()
# def __addInformationForEA(self):
# for i in range(self.nrComp):
# for j in range(self.nrComp):
# if self.R[i][j] == 1:
# self.componentsList[i].conflictComponentsList.add(j)
# self.componentsList[i].numberOfConflictComponents = len(self.componentsList[i].conflictComponentsList)
# for i in range(self.nrComp):
# for j in range(self.nrComp):
# if self.D[i][j] == 1:
# self.componentsList[i].dependenceComponentsList.add(j)
def __findMinimumNumberOfInstancesForEachComponent(self, orComponents):
"""
Resolve CP problem regarding minimum number of instances needed for each component and add this information to
each component
:param orComponents:
:return: number of VM, if each component in in conflict with the others components
"""
runningTime, components = self.solveCPNrOfInstances("LOWEST_MIN_MIN", 10000)
print("components", components)
for (compId, comp) in self.componentsList.items():
comp.minimumNumberOfInstances = components[compId]
#minimumBeanNr = self.findPartitionsBasedOnConflictsMatrix()
return numpy.sum(components) #+ len(minimumBeanNr)
def __addRestrictionsComponentsNumber(self, orComponents):
"""
Adds restriction regarding the fact that at least one component should be deployed.
The components that are not in 'OR' relation should be deployed alt least one time.
:param orComponents: list of components in 'OR' relation
:return:
"""
all_comps = set()
for component in self.componentsList:
all_comps.add(component+1)
all_comps = all_comps.difference(orComponents)
print ("all_comps", all_comps, "orcomps", orComponents, "elf.componentsList", self.componentsList)
for compId in all_comps:
self.restrictionsList.append(RestrictionUpperLowerEqualBound([compId], ">=", 1, self))
def __addOperationSystemRestriction(self):
"""
Add conflict restriction induced by the fact that different components need different operating system.
The restriction is induced by the fact that the user is not adding explicit restrictions
:return:
"""
dict = {}
for comp in self.componentsList:
if not ((self.componentsList[comp].operatingSystem is None) or (len(self.componentsList[comp].operatingSystem) == 0)):
if self.componentsList[comp].operatingSystem in dict:
dict[self.componentsList[comp].operatingSystem].append(comp)
else:
dict[self.componentsList[comp].operatingSystem] = [comp]
self.logger.debug("Operating system of the components: {}".format(dict))
dict2 = dict.copy()
if len(dict) > 1:
for i in dict:
dict2.pop(i, None)
for j in dict2:
for k in dict[i]:
self.restrictionsList.append(RestrictionConflict(k + 1, [u + 1 for u in dict2[j]], self))
def _addComponent(self, comp_dictionary):
"""
From json description of the component extract the properties and stores them into a instance of Component class
:param comp_dictionary: a dictionary loaded from json component description
:return:
"""
id = comp_dictionary["id"] - 1
c = Component(id, comp_dictionary["name"] if "name" in comp_dictionary else None,
comp_dictionary["Compute"]["CPU"] if "CPU" in comp_dictionary["Compute"] else None,
comp_dictionary["Compute"]["GPU"] if "GPU" in comp_dictionary["Compute"] else "false",
comp_dictionary["Compute"]["Memory"] if "Memory" in comp_dictionary["Compute"] else None,
comp_dictionary["Storage"]["StorageSize"] if "StorageSize" in comp_dictionary["Storage"] else 50,
comp_dictionary["Storage"]["StorageType"] if "StorageType" in comp_dictionary["Storage"] else None,
comp_dictionary["Storage"]["StorageValue"] if "StorageValue" in comp_dictionary["Storage"] else None,
comp_dictionary["Network"]["dataIn"] if "dataIn" in comp_dictionary["Network"] else None,
comp_dictionary["Network"]["dataOut"] if "dataOut" in comp_dictionary["Network"] else None,
comp_dictionary["Network"]["networkConnections"] if "networkConnections" in comp_dictionary[
"Network"] else None,
comp_dictionary["keywords"] if "keywords" in comp_dictionary else None,
comp_dictionary["operatingSystem"] if "operatingSystem" in comp_dictionary else None)
self.componentsList[id] = c
def _addRestriction(self, dictionary):
"""
From json description extracts the restrictions (conflict, multiplicity and dependency between components
:param restrictionDictionary: From json description of the components restrictions
:return:
"""
dictionaryOrRelation = set()
restrictionType = dictionary["type"]
if restrictionType == "Conflicts":
self.restrictionsList.append(RestrictionConflict(dictionary["alphaCompId"], dictionary["compsIdList"], self))
elif restrictionType == "OneToOneDependency":
self.restrictionsList.append(RestrictionOneToOneDependency(dictionary["alphaCompId"],
dictionary["betaCompId"], self))
elif restrictionType == "ManyToManyDependency":
self.restrictionsList.append(RestrictionManyToManyDependency(dictionary["alphaCompId"],
dictionary["betaCompId"],
dictionary["sign"], self))
elif restrictionType == "ManyToManyDependencyNew":
self.restrictionsList.append(RestrictionManyToManyDependencyNew(dictionary["alphaCompId"],
dictionary["betaCompId"],
dictionary["n"],
dictionary["m"], self))
elif restrictionType == "OneToManyDependency":
self.restrictionsList.append(
RestrictionOneToManyDependency(dictionary["alphaCompId"], dictionary["betaCompId"],
dictionary["number"], self))
elif restrictionType == "RangeBound":
self.restrictionsList.append(RestrictionRangeBound(dictionary["components"],
dictionary["lowerBound"],
dictionary["upperBound"], self))
elif restrictionType == "UpperBound":
self.restrictionsList.append(RestrictionUpperLowerEqualBound(dictionary["compsIdList"], "<=",
dictionary["bound"], self))
elif restrictionType == "LowerBound":
self.restrictionsList.append(RestrictionUpperLowerEqualBound(dictionary["compsIdList"], ">=",
dictionary["bound"], self))
elif restrictionType == "EqualBound":
self.restrictionsList.append(RestrictionUpperLowerEqualBound(dictionary["compsIdList"], "=",
dictionary["bound"], self))
elif restrictionType == "FullDeployment":
self.restrictionsList.append(RestrictionFullDeployment(dictionary["alphaCompId"],
dictionary["compsIdList"], self))
elif restrictionType == "RequireProvideDependency":
self.restrictionsList.append(RestrictionRequireProvideDependency(dictionary["alphaCompId"],
dictionary["betaCompId"],
dictionary["alphaCompIdInstances"],
dictionary["betaCompIdInstances"], self))
elif restrictionType == "AlternativeComponents":
self.restrictionsList.append(RestrictionAlphaOrBeta(dictionary["alphaCompId"], dictionary["betaCompId"],
self))
dictionaryOrRelation.add(dictionary["alphaCompId"])
dictionaryOrRelation.add(dictionary["betaCompId"])
return dictionaryOrRelation
def __repr__(self):
for i in self.componentsList:
print(i)
return str(self.componentsList)
def _getComponentsHardwareRestrictions(self):
"""
Resturns a list with hardware restriction for each component
:return:
"""
print("len(self.componentsList)", len(self.componentsList))
return [self.componentsList[compId].getComponentHardWareResources() for compId in range(0, len(self.componentsList))]
def compareComponentsRegardingHardwareRestrictions(self, compAlphaId, compBetaId):
"""
Finds which component alpha or beta needs more hardware resources
:param compAlphaId: alpha component id
:param compBetaId: beta component id
:return: sumAlpha - in how many cases component alpha needs less resources that component beta
sumBeta - in how many cases component beta needs less resources that component alpha
"""
compAlpha = self.componentsList[compAlphaId]
compBeta = self.componentsList[compBetaId]
sumAlpha = 0 # add 1 if alpha component need less resources then beta components
sumBeta = 0 # add 1 if beta component need less resources then alpha components
retAlpha, retBeta = self.__compareResource(compAlpha.HM, compBeta.HM)
sumAlpha += retAlpha
sumBeta += retBeta
retAlpha, retBeta = self.__compareResource(compAlpha.HC, compBeta.HC)
sumAlpha += retAlpha
sumBeta += retBeta
retAlpha, retBeta = self.__compareResource(compAlpha.HS, compBeta.HS)
sumAlpha += retAlpha
sumBeta += retBeta
retAlpha, retBeta = self.__compareResource(compAlpha.NIn, compBeta.NIn)
sumAlpha += retAlpha
sumBeta += sumBeta
retAlpha, retBeta = self.__compareResource(compAlpha.NOut, compBeta.NOut)
sumAlpha += retAlpha
sumBeta += retBeta
retAlpha, retBeta = self.__compareResource(compAlpha.NConnections, compBeta.NConnections)
sumAlpha += retAlpha
sumBeta += retBeta
return sumAlpha, sumBeta
def __compareResource(self,alphaValue, betaValue):
"""
Compare 2 hardware resources to which component alpha or beta needs more resources
:param alphaValue: alpha component resource value
:param betaValue: beta component resource value
:return: retAlpha - 1 if component alpha needs more resources that component beta, 0 otherwais
retBeta - 1 if component beta needs more resources that component alpha, 0 otherwais
"""
retAlpha = 0
retBeta = 0
if (alphaValue is None) and (betaValue is None):
return retAlpha, retBeta
if (alphaValue is None) and (betaValue is not None):
if betaValue.HM > 0:
retAlpha = 1
elif (betaValue is None) and (alphaValue is not None):
if alphaValue > 0:
retBeta = 1
return retAlpha, retBeta | [
"RecomandEngine.exactsolvers.nonlinear.SMT_Solver_Z3_RealBool.Z3_Solver",
"RecomandEngine.exactsolvers.nonlinear.SMT_Solver_Z3_RealReal.Z3_Solver",
"RecomandEngine.exactsolvers.linear.SMT_Solver_Z3_RealPBC.Z3_Solver",
"RecomandEngine.problem.Component.Component",
"RecomandEngine.exactsolvers.linear.IntIntOr... | [((1320, 1376), 'numpy.zeros', 'numpy.zeros', (['(self.nrComp, self.nrComp)'], {'dtype': 'numpy.int'}), '((self.nrComp, self.nrComp), dtype=numpy.int)\n', (1331, 1376), False, 'import numpy\n'), ((1412, 1468), 'numpy.zeros', 'numpy.zeros', (['(self.nrComp, self.nrComp)'], {'dtype': 'numpy.int'}), '((self.nrComp, self.nrComp), dtype=numpy.int)\n', (1423, 1468), False, 'import numpy\n'), ((9656, 9723), 'RecomandEngine.exactsolvers.CP_Solver_Number_of_Instances.CP_Solver_Got_Nr_Instances', 'CP_Solver_Got_Nr_Instances', (['self', 'choosing_stategy', 'solutions_limit'], {}), '(self, choosing_stategy, solutions_limit)\n', (9682, 9723), False, 'from RecomandEngine.exactsolvers.CP_Solver_Number_of_Instances import CP_Solver_Got_Nr_Instances\n'), ((12537, 12558), 'numpy.sum', 'numpy.sum', (['components'], {}), '(components)\n', (12546, 12558), False, 'import numpy\n'), ((14805, 15969), 'RecomandEngine.problem.Component.Component', 'Component', (['id', "(comp_dictionary['name'] if 'name' in comp_dictionary else None)", "(comp_dictionary['Compute']['CPU'] if 'CPU' in comp_dictionary['Compute'] else\n None)", "(comp_dictionary['Compute']['GPU'] if 'GPU' in comp_dictionary['Compute'] else\n 'false')", "(comp_dictionary['Compute']['Memory'] if 'Memory' in comp_dictionary[\n 'Compute'] else None)", "(comp_dictionary['Storage']['StorageSize'] if 'StorageSize' in\n comp_dictionary['Storage'] else 50)", "(comp_dictionary['Storage']['StorageType'] if 'StorageType' in\n comp_dictionary['Storage'] else None)", "(comp_dictionary['Storage']['StorageValue'] if 'StorageValue' in\n comp_dictionary['Storage'] else None)", "(comp_dictionary['Network']['dataIn'] if 'dataIn' in comp_dictionary[\n 'Network'] else None)", "(comp_dictionary['Network']['dataOut'] if 'dataOut' in comp_dictionary[\n 'Network'] else None)", "(comp_dictionary['Network']['networkConnections'] if 'networkConnections' in\n comp_dictionary['Network'] else None)", "(comp_dictionary['keywords'] if 'keywords' in comp_dictionary else None)", "(comp_dictionary['operatingSystem'] if 'operatingSystem' in comp_dictionary\n else None)"], {}), "(id, comp_dictionary['name'] if 'name' in comp_dictionary else\n None, comp_dictionary['Compute']['CPU'] if 'CPU' in comp_dictionary[\n 'Compute'] else None, comp_dictionary['Compute']['GPU'] if 'GPU' in\n comp_dictionary['Compute'] else 'false', comp_dictionary['Compute'][\n 'Memory'] if 'Memory' in comp_dictionary['Compute'] else None, \n comp_dictionary['Storage']['StorageSize'] if 'StorageSize' in\n comp_dictionary['Storage'] else 50, comp_dictionary['Storage'][\n 'StorageType'] if 'StorageType' in comp_dictionary['Storage'] else None,\n comp_dictionary['Storage']['StorageValue'] if 'StorageValue' in\n comp_dictionary['Storage'] else None, comp_dictionary['Network'][\n 'dataIn'] if 'dataIn' in comp_dictionary['Network'] else None, \n comp_dictionary['Network']['dataOut'] if 'dataOut' in comp_dictionary[\n 'Network'] else None, comp_dictionary['Network']['networkConnections'] if\n 'networkConnections' in comp_dictionary['Network'] else None, \n comp_dictionary['keywords'] if 'keywords' in comp_dictionary else None,\n comp_dictionary['operatingSystem'] if 'operatingSystem' in\n comp_dictionary else None)\n", (14814, 15969), False, 'from RecomandEngine.problem.Component import Component\n'), ((2090, 2191), 'RecomandEngine.exactsolvers.linear.SMT_Solver_Z3_RealSymBreak.Z3_Solver', 'SMT_Solver_Z3_RealSymBreak.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp,\n availableConfigs, self, solver_type)\n', (2126, 2191), False, 'from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_RealSymBreak\n'), ((2363, 2459), 'RecomandEngine.exactsolvers.linear.SMT_Solver_Z3_IntBool.Z3_Solver', 'SMT_Solver_Z3_IntBool.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp, availableConfigs,\n self, solver_type)\n', (2394, 2459), False, 'from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_IntBool\n'), ((2621, 2712), 'RecomandEngine.exactsolvers.linear.SMT_Solver_Z3_FD.Z3_Solver', 'SMT_Solver_Z3_FD.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp, availableConfigs, self,\n solver_type)\n', (2647, 2712), False, 'from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_FD\n'), ((2886, 2980), 'RecomandEngine.exactsolvers.linear.IntIntOrSymBreaking.Z3_Solver', 'IntIntOrSymBreaking.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp, availableConfigs,\n self, solver_type)\n', (2915, 2980), False, 'from RecomandEngine.exactsolvers.linear import IntIntOrSymBreaking\n'), ((3154, 3251), 'RecomandEngine.exactsolvers.nonlinear.SMT_Solver_Z3_RealBool.Z3_Solver', 'SMT_Solver_Z3_RealBool.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp, availableConfigs,\n self, solver_type)\n', (3186, 3251), False, 'from RecomandEngine.exactsolvers.nonlinear import SMT_Solver_Z3_RealBool\n'), ((3430, 3527), 'RecomandEngine.exactsolvers.nonlinear.SMT_Solver_Z3_RealBool.Z3_Solver', 'SMT_Solver_Z3_RealBool.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp, availableConfigs,\n self, solver_type)\n', (3462, 3527), False, 'from RecomandEngine.exactsolvers.nonlinear import SMT_Solver_Z3_RealBool\n'), ((3701, 3798), 'RecomandEngine.exactsolvers.nonlinear.SMT_Solver_Z3_RealReal.Z3_Solver', 'SMT_Solver_Z3_RealReal.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp, availableConfigs,\n self, solver_type)\n', (3733, 3798), False, 'from RecomandEngine.exactsolvers.nonlinear import SMT_Solver_Z3_RealReal\n'), ((3977, 4074), 'RecomandEngine.exactsolvers.nonlinear.SMT_Solver_Z3_RealReal.Z3_Solver', 'SMT_Solver_Z3_RealReal.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp, availableConfigs,\n self, solver_type)\n', (4009, 4074), False, 'from RecomandEngine.exactsolvers.nonlinear import SMT_Solver_Z3_RealReal\n'), ((4260, 4363), 'RecomandEngine.exactsolvers.nonlinear.SMT_Solver_Z3_IntIntLessThan.Z3_Solver', 'SMT_Solver_Z3_IntIntLessThan.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp,\n availableConfigs, self, solver_type)\n', (4298, 4363), False, 'from RecomandEngine.exactsolvers.nonlinear import SMT_Solver_Z3_IntIntLessThan\n'), ((4554, 4657), 'RecomandEngine.exactsolvers.nonlinear.SMT_Solver_Z3_IntIntLessThan.Z3_Solver', 'SMT_Solver_Z3_IntIntLessThan.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp,\n availableConfigs, self, solver_type)\n', (4592, 4657), False, 'from RecomandEngine.exactsolvers.nonlinear import SMT_Solver_Z3_IntIntLessThan\n'), ((4831, 4928), 'RecomandEngine.exactsolvers.nonlinear.SMT_Solver_Z3_IntIntOr.Z3_Solver', 'SMT_Solver_Z3_IntIntOr.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp, availableConfigs,\n self, solver_type)\n', (4863, 4928), False, 'from RecomandEngine.exactsolvers.nonlinear import SMT_Solver_Z3_IntIntOr\n'), ((5107, 5204), 'RecomandEngine.exactsolvers.nonlinear.SMT_Solver_Z3_IntIntOr.Z3_Solver', 'SMT_Solver_Z3_IntIntOr.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp, availableConfigs,\n self, solver_type)\n', (5139, 5204), False, 'from RecomandEngine.exactsolvers.nonlinear import SMT_Solver_Z3_IntIntOr\n'), ((5366, 5457), 'RecomandEngine.exactsolvers.nonlinear.SMT_Solver_Z3_BV.Z3_Solver', 'SMT_Solver_Z3_BV.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp, availableConfigs, self,\n solver_type)\n', (5392, 5457), False, 'from RecomandEngine.exactsolvers.nonlinear import SMT_Solver_Z3_BV\n'), ((5624, 5715), 'RecomandEngine.exactsolvers.nonlinear.SMT_Solver_Z3_BV.Z3_Solver', 'SMT_Solver_Z3_BV.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp, availableConfigs, self,\n solver_type)\n', (5650, 5715), False, 'from RecomandEngine.exactsolvers.nonlinear import SMT_Solver_Z3_BV\n'), ((5887, 5983), 'RecomandEngine.exactsolvers.linear.SMT_Solver_Z3_RealPBC.Z3_Solver', 'SMT_Solver_Z3_RealPBC.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp, availableConfigs,\n self, solver_type)\n', (5918, 5983), False, 'from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_RealPBC\n'), ((6064, 6104), 'sys.exit', 'sys.exit', (['"""No support for nonlinear PBC"""'], {}), "('No support for nonlinear PBC')\n", (6072, 6104), False, 'import sys\n'), ((6304, 6412), 'RecomandEngine.exactsolvers.linear.SMT_Solver_Z3_IntIntOrSymBreaking.Z3_Solver', 'SMT_Solver_Z3_IntIntOrSymBreaking.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp,\n availableConfigs, self, solver_type)\n', (6347, 6412), False, 'from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_IntIntOrSymBreaking\n'), ((6677, 6784), 'RecomandEngine.exactsolvers.linear.SMT_Solver_Z3_RealPBCSymBreaking.Z3_Solver', 'SMT_Solver_Z3_RealPBCSymBreaking.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp,\n availableConfigs, self, solver_type)\n', (6719, 6784), False, 'from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_RealPBCSymBreaking\n'), ((7057, 7168), 'RecomandEngine.exactsolvers.linear.SMT_Solver_Z3_RealPBCMultiObjectives.Z3_Solver', 'SMT_Solver_Z3_RealPBCMultiObjectives.Z3_Solver', (['self.nrVM', 'self.nrComp', 'availableConfigs', 'self', 'solver_type'], {}), '(self.nrVM, self.nrComp,\n availableConfigs, self, solver_type)\n', (7103, 7168), False, 'from RecomandEngine.exactsolvers.linear import SMT_Solver_Z3_RealPBCMultiObjectives\n'), ((7264, 7304), 'sys.exit', 'sys.exit', (['"""No support for nonlinear PBC"""'], {}), "('No support for nonlinear PBC')\n", (7272, 7304), False, 'import sys\n'), ((10133, 10153), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (10142, 10153), False, 'import json\n'), ((13283, 13339), 'RecomandEngine.restrictions.RestrictionNumberOfInstances.RestrictionUpperLowerEqualBound', 'RestrictionUpperLowerEqualBound', (['[compId]', '""">="""', '(1)', 'self'], {}), "([compId], '>=', 1, self)\n", (13314, 13339), False, 'from RecomandEngine.restrictions.RestrictionNumberOfInstances import RestrictionUpperLowerEqualBound, RestrictionRangeBound, RestrictionFullDeployment, RestrictionRequireProvideDependency\n'), ((16663, 16742), 'RecomandEngine.restrictions.RestrictionConflicts.RestrictionConflict', 'RestrictionConflict', (["dictionary['alphaCompId']", "dictionary['compsIdList']", 'self'], {}), "(dictionary['alphaCompId'], dictionary['compsIdList'], self)\n", (16682, 16742), False, 'from RecomandEngine.restrictions.RestrictionConflicts import RestrictionConflict, RestrictionAlphaOrBeta\n'), ((16840, 16933), 'RecomandEngine.restrictions.RestrictionDependences.RestrictionOneToOneDependency', 'RestrictionOneToOneDependency', (["dictionary['alphaCompId']", "dictionary['betaCompId']", 'self'], {}), "(dictionary['alphaCompId'], dictionary[\n 'betaCompId'], self)\n", (16869, 16933), False, 'from RecomandEngine.restrictions.RestrictionDependences import RestrictionOneToOneDependency, RestrictionOneToManyDependency, RestrictionManyToManyDependency, RestrictionManyToManyDependencyNew\n'), ((17098, 17213), 'RecomandEngine.restrictions.RestrictionDependences.RestrictionManyToManyDependency', 'RestrictionManyToManyDependency', (["dictionary['alphaCompId']", "dictionary['betaCompId']", "dictionary['sign']", 'self'], {}), "(dictionary['alphaCompId'], dictionary[\n 'betaCompId'], dictionary['sign'], self)\n", (17129, 17213), False, 'from RecomandEngine.restrictions.RestrictionDependences import RestrictionOneToOneDependency, RestrictionOneToManyDependency, RestrictionManyToManyDependency, RestrictionManyToManyDependencyNew\n'), ((14399, 14460), 'RecomandEngine.restrictions.RestrictionConflicts.RestrictionConflict', 'RestrictionConflict', (['(k + 1)', '[(u + 1) for u in dict2[j]]', 'self'], {}), '(k + 1, [(u + 1) for u in dict2[j]], self)\n', (14418, 14460), False, 'from RecomandEngine.restrictions.RestrictionConflicts import RestrictionConflict, RestrictionAlphaOrBeta\n'), ((17456, 17588), 'RecomandEngine.restrictions.RestrictionDependences.RestrictionManyToManyDependencyNew', 'RestrictionManyToManyDependencyNew', (["dictionary['alphaCompId']", "dictionary['betaCompId']", "dictionary['n']", "dictionary['m']", 'self'], {}), "(dictionary['alphaCompId'], dictionary[\n 'betaCompId'], dictionary['n'], dictionary['m'], self)\n", (17490, 17588), False, 'from RecomandEngine.restrictions.RestrictionDependences import RestrictionOneToOneDependency, RestrictionOneToManyDependency, RestrictionManyToManyDependency, RestrictionManyToManyDependencyNew\n'), ((17917, 18033), 'RecomandEngine.restrictions.RestrictionDependences.RestrictionOneToManyDependency', 'RestrictionOneToManyDependency', (["dictionary['alphaCompId']", "dictionary['betaCompId']", "dictionary['number']", 'self'], {}), "(dictionary['alphaCompId'], dictionary[\n 'betaCompId'], dictionary['number'], self)\n", (17947, 18033), False, 'from RecomandEngine.restrictions.RestrictionDependences import RestrictionOneToOneDependency, RestrictionOneToManyDependency, RestrictionManyToManyDependency, RestrictionManyToManyDependencyNew\n'), ((18164, 18273), 'RecomandEngine.restrictions.RestrictionNumberOfInstances.RestrictionRangeBound', 'RestrictionRangeBound', (["dictionary['components']", "dictionary['lowerBound']", "dictionary['upperBound']", 'self'], {}), "(dictionary['components'], dictionary['lowerBound'],\n dictionary['upperBound'], self)\n", (18185, 18273), False, 'from RecomandEngine.restrictions.RestrictionNumberOfInstances import RestrictionUpperLowerEqualBound, RestrictionRangeBound, RestrictionFullDeployment, RestrictionRequireProvideDependency\n'), ((18484, 18580), 'RecomandEngine.restrictions.RestrictionNumberOfInstances.RestrictionUpperLowerEqualBound', 'RestrictionUpperLowerEqualBound', (["dictionary['compsIdList']", '"""<="""', "dictionary['bound']", 'self'], {}), "(dictionary['compsIdList'], '<=', dictionary\n ['bound'], self)\n", (18515, 18580), False, 'from RecomandEngine.restrictions.RestrictionNumberOfInstances import RestrictionUpperLowerEqualBound, RestrictionRangeBound, RestrictionFullDeployment, RestrictionRequireProvideDependency\n'), ((18737, 18833), 'RecomandEngine.restrictions.RestrictionNumberOfInstances.RestrictionUpperLowerEqualBound', 'RestrictionUpperLowerEqualBound', (["dictionary['compsIdList']", '""">="""', "dictionary['bound']", 'self'], {}), "(dictionary['compsIdList'], '>=', dictionary\n ['bound'], self)\n", (18768, 18833), False, 'from RecomandEngine.restrictions.RestrictionNumberOfInstances import RestrictionUpperLowerEqualBound, RestrictionRangeBound, RestrictionFullDeployment, RestrictionRequireProvideDependency\n'), ((18990, 19085), 'RecomandEngine.restrictions.RestrictionNumberOfInstances.RestrictionUpperLowerEqualBound', 'RestrictionUpperLowerEqualBound', (["dictionary['compsIdList']", '"""="""', "dictionary['bound']", 'self'], {}), "(dictionary['compsIdList'], '=', dictionary[\n 'bound'], self)\n", (19021, 19085), False, 'from RecomandEngine.restrictions.RestrictionNumberOfInstances import RestrictionUpperLowerEqualBound, RestrictionRangeBound, RestrictionFullDeployment, RestrictionRequireProvideDependency\n'), ((19246, 19336), 'RecomandEngine.restrictions.RestrictionNumberOfInstances.RestrictionFullDeployment', 'RestrictionFullDeployment', (["dictionary['alphaCompId']", "dictionary['compsIdList']", 'self'], {}), "(dictionary['alphaCompId'], dictionary[\n 'compsIdList'], self)\n", (19271, 19336), False, 'from RecomandEngine.restrictions.RestrictionNumberOfInstances import RestrictionUpperLowerEqualBound, RestrictionRangeBound, RestrictionFullDeployment, RestrictionRequireProvideDependency\n'), ((19501, 19676), 'RecomandEngine.restrictions.RestrictionNumberOfInstances.RestrictionRequireProvideDependency', 'RestrictionRequireProvideDependency', (["dictionary['alphaCompId']", "dictionary['betaCompId']", "dictionary['alphaCompIdInstances']", "dictionary['betaCompIdInstances']", 'self'], {}), "(dictionary['alphaCompId'], dictionary[\n 'betaCompId'], dictionary['alphaCompIdInstances'], dictionary[\n 'betaCompIdInstances'], self)\n", (19536, 19676), False, 'from RecomandEngine.restrictions.RestrictionNumberOfInstances import RestrictionUpperLowerEqualBound, RestrictionRangeBound, RestrictionFullDeployment, RestrictionRequireProvideDependency\n'), ((19997, 20082), 'RecomandEngine.restrictions.RestrictionConflicts.RestrictionAlphaOrBeta', 'RestrictionAlphaOrBeta', (["dictionary['alphaCompId']", "dictionary['betaCompId']", 'self'], {}), "(dictionary['alphaCompId'], dictionary['betaCompId'],\n self)\n", (20019, 20082), False, 'from RecomandEngine.restrictions.RestrictionConflicts import RestrictionConflict, RestrictionAlphaOrBeta\n')] |
import os
import numpy as np
from ..model.model_zoo import *
from ..model.ssd import SSDLearner
from ..dataset.pascal_voc import PascalVOCObjectDataset
from ..dataset.coco import COCOObjectDataset
from ..model.configs import cfg
def set_up_pascalvoc_detection(config, output_dir, logger, device=0, queries_name='queries.txt'):
logger.info('Setting up datasets...')
backbone = config['model']['backbone']
model, cfg = get_model_config(backbone, 'voc')
init_size = config['active_learning']['init_size']
index_train = np.arange(config['dataset']['train_size'])
index_test = np.arange(config['dataset']['test_size'])
logger_name = config['experiment']['logger_name']
dataset = PascalVOCObjectDataset(
index_train, n_init=init_size, output_dir=output_dir, cfg=cfg, queries_name=queries_name)
test_dataset = PascalVOCObjectDataset(
index_test, n_init=init_size, output_dir=output_dir, cfg=cfg, train=False, queries_name=queries_name)
dataset.set_validation_dataset(test_dataset.dataset)
logger.info(f'Dataset initial train size : {len(dataset.init_dataset)}')
logger.info(f'Dataset used train size : {len(dataset.dataset)}')
logger.info(f'Dataset initial test size : {len(test_dataset.init_dataset)}')
logger.info(f'Dataset test size : {len(test_dataset.dataset)}')
logger.info('Setting up models...')
learner = SSDLearner(model=model, cfg=cfg, logger_name=logger_name, device=device, dataset='voc')
return dataset, learner
def set_up_coco_object_detection(config, output_dir, logger, device=0, queries_name='queries.txt'):
logger.info('Setting up datasets...')
backbone = config['model']['backbone']
model, cfg = get_model_config(backbone, 'coco')
init_size = config['active_learning']['init_size']
index_train = np.arange(config['dataset']['train_size'])
index_test = np.arange(config['dataset']['test_size'])
logger_name = config['experiment']['logger_name']
dataset = COCOObjectDataset(
index_train, n_init=init_size, output_dir=output_dir, cfg=cfg, queries_name=queries_name)
test_dataset = COCOObjectDataset(
index_test, n_init=init_size, output_dir=output_dir, cfg=cfg, train=False, queries_name=queries_name)
logger.info(f'Dataset initial train size : {len(dataset.init_dataset)}')
logger.info(f'Dataset used train size : {len(dataset.dataset)}')
logger.info(f'Dataset initial test size : {len(test_dataset.init_dataset)}')
logger.info(f'Dataset test size : {len(test_dataset.dataset)}')
dataset.set_validation_dataset(test_dataset.dataset)
logger.info('Setting up models...')
learner = SSDLearner(model=model, cfg=cfg, logger_name=logger_name, device=device, dataset='coco')
return dataset, learner
def get_model_config(backbone, dataset):
config_path = os.getenv('MODULE_PATH')
if dataset == 'voc':
if backbone == 'mobilenet_v2':
config_file = 'mobilenet_v2_ssd320_voc0712.yaml'
elif backbone == 'vgg':
config_file = 'vgg_ssd300_voc0712.yaml'
elif dataset == 'coco':
if backbone == 'vgg':
config_file = 'vgg_ssd300_coco_trainval35k.yaml'
elif backbone == 'mobilenet_v2':
config_file = 'mobilenet_v2_ssd320_coco.yaml'
path = os.path.expanduser(os.path.join(config_path, config_file))
cfg.merge_from_file(path)
model = SSDDetector(cfg, backbone)
cfg.freeze()
return model, cfg | [
"os.path.join",
"numpy.arange",
"os.getenv"
] | [((541, 583), 'numpy.arange', 'np.arange', (["config['dataset']['train_size']"], {}), "(config['dataset']['train_size'])\n", (550, 583), True, 'import numpy as np\n'), ((601, 642), 'numpy.arange', 'np.arange', (["config['dataset']['test_size']"], {}), "(config['dataset']['test_size'])\n", (610, 642), True, 'import numpy as np\n'), ((1825, 1867), 'numpy.arange', 'np.arange', (["config['dataset']['train_size']"], {}), "(config['dataset']['train_size'])\n", (1834, 1867), True, 'import numpy as np\n'), ((1885, 1926), 'numpy.arange', 'np.arange', (["config['dataset']['test_size']"], {}), "(config['dataset']['test_size'])\n", (1894, 1926), True, 'import numpy as np\n'), ((2853, 2877), 'os.getenv', 'os.getenv', (['"""MODULE_PATH"""'], {}), "('MODULE_PATH')\n", (2862, 2877), False, 'import os\n'), ((3335, 3373), 'os.path.join', 'os.path.join', (['config_path', 'config_file'], {}), '(config_path, config_file)\n', (3347, 3373), False, 'import os\n')] |
import numpy as np
import unittest
import os
import shutil
# Data from Geradin
# time[s] theta[rad]
geradin_FoR0 = np.array([[-0.0117973, 1.56808],
[0.0816564, 1.5394],
[0.171988, 1.41698],
[0.235203, 1.31521],
[0.307327, 1.09265],
[0.427399, 0.601124],
[0.526338, 0.0899229],
[0.646417, -0.394903],
[0.748531, -0.765465],
[0.868959, -0.94209],
[0.905131, -0.956217],
[0.965504, -0.903829],
[1.06828, -0.69149],
[1.16508, -0.425429],
[1.29798, -0.240495],
[1.42483, -0.0688408],
[1.56382, 0.169571],
[1.78751, 0.634087],
[1.89627, 0.806105],
[1.98075, 0.844608],
[2.10125, 0.734983],
[2.19143, 0.478564],
[2.26934, 0.0347871],
[2.37751, -0.315796],
[2.52803, -0.546627],
[2.60034, -0.601682],
[2.76314, -0.645155],
[2.88987, -0.580701],
[3.05893, -0.423295],
[3.24611, -0.239453],
[3.43335, -0.00201041],
[3.51194, 0.157214],
[3.59668, 0.423517],
[3.6815, 0.756821],
[3.73591, 0.87633],
[3.81435, 0.901554],
[3.93481, 0.751729],
[4.04305, 0.468146],
[4.17525, 0.0366783],
[4.34949, -0.556439],
[4.44551, -0.987179],
[4.57784, -1.29805],
[4.65016, -1.3531],
[4.70444, -1.35419],
[4.78294, -1.27537],
[4.86762, -1.06267],
[4.99464, -0.743611]])
geradin_FoR1 = np.array([[0.00756934, 0.0266485],
[0.134225, 0.0241027],
[0.309222, 0.100987],
[0.418117, 0.393606],
[0.490855, 0.713752],
[0.533195, 0.820103],
[0.635787, 0.871642],
[0.762124, 0.587696],
[0.85826, 0.264156],
[0.918194, -0.0720575],
[0.996205, -0.422034],
[1.05008, -0.784926],
[1.09792, -1.1477],
[1.1639, -1.47063],
[1.27207, -1.82121],
[1.38636, -2.09152],
[1.47067, -2.20042],
[1.53694, -2.26875],
[1.67582, -2.12414],
[1.8028, -1.84528],
[1.89365, -1.5121],
[1.97843, -1.2056],
[2.00271, -1.07208],
[2.08146, -0.765457],
[2.14818, -0.431789],
[2.19686, -0.0575583],
[2.24552, 0.303273],
[2.29421, 0.690904],
[2.37299, 1.02433],
[2.44573, 1.34447],
[2.55464, 1.65049],
[2.65749, 1.92983],
[2.7904, 2.12817],
[2.94135, 2.27254],
[3.03182, 2.27072],
[3.18853, 2.17376],
[3.30891, 1.95694],
[3.42312, 1.61964],
[3.50121, 1.33666],
[3.56714, 0.973525],
[3.61495, 0.583954],
[3.66883, 0.221062],
[3.71673, -0.0881084],
[3.80076, -0.451606],
[3.87271, -0.828262],
[3.95678, -1.15156],
[3.98681, -1.25937],
[4.08307, -1.47571],
[4.13729, -1.5304],
[4.27618, -1.38579],
[4.36701, -1.066],
[4.4217, -0.705294],
[4.50652, -0.37199],
[4.59132, -0.0520868],
[4.68815, 0.240774],
[4.79703, 0.519993],
[4.91188, 0.74549],
[4.98432, 0.797635]])
class TestDoublePendulum(unittest.TestCase):
"""
Validation of a double pendulum with a mass at each tip position
Reference case: <NAME> and <NAME>, "Flexible multibody dynamics : a finite element approach"
"""
def setUp(self):
import sharpy.utils.generate_cases as gc
from sharpy.utils.constants import deg2rad
# Structural properties
mass_per_unit_length = 1.
mass_iner = 1e-4
EA = 1e9
GA = 1e9
GJ = 1e9
EI = 1e9
# Beam1
global nnodes1
nnodes1 = 11
l1 = 1.0
m1 = 1.0
theta_ini1 = 90.*deg2rad
# Beam2
nnodes2 = nnodes1
l2 = l1
m2 = m1
theta_ini2 = 00.*deg2rad
# airfoils
airfoil = np.zeros((1,20,2),)
airfoil[0,:,0] = np.linspace(0.,1.,20)
# Simulation
numtimesteps = 10
dt = 0.01
# Create the structure
beam1 = gc.AeroelasticInformation()
r1 = np.linspace(0.0, l1, nnodes1)
node_pos1 = np.zeros((nnodes1,3),)
node_pos1[:, 0] = r1*np.sin(theta_ini1)
node_pos1[:, 2] = -r1*np.cos(theta_ini1)
beam1.StructuralInformation.generate_uniform_sym_beam(node_pos1, mass_per_unit_length, mass_iner, EA, GA, GJ, EI, num_node_elem = 3, y_BFoR = 'y_AFoR', num_lumped_mass=1)
beam1.StructuralInformation.body_number = np.zeros((beam1.StructuralInformation.num_elem,), dtype = int)
beam1.StructuralInformation.boundary_conditions[0] = 1
beam1.StructuralInformation.boundary_conditions[-1] = -1
beam1.StructuralInformation.lumped_mass_nodes = np.array([nnodes1-1], dtype = int)
beam1.StructuralInformation.lumped_mass = np.ones((1,))*m1
beam1.StructuralInformation.lumped_mass_inertia = np.zeros((1,3,3))
beam1.StructuralInformation.lumped_mass_position = np.zeros((1,3))
beam1.AerodynamicInformation.create_one_uniform_aerodynamics(
beam1.StructuralInformation,
chord = 1.,
twist = 0.,
sweep = 0.,
num_chord_panels = 4,
m_distribution = 'uniform',
elastic_axis = 0.25,
num_points_camber = 20,
airfoil = airfoil)
beam2 = gc.AeroelasticInformation()
r2 = np.linspace(0.0, l2, nnodes2)
node_pos2 = np.zeros((nnodes2,3),)
node_pos2[:, 0] = r2*np.sin(theta_ini2) + node_pos1[-1, 0]
node_pos2[:, 2] = -r2*np.cos(theta_ini2) + node_pos1[-1, 2]
beam2.StructuralInformation.generate_uniform_sym_beam(node_pos2, mass_per_unit_length, mass_iner, EA, GA, GJ, EI, num_node_elem = 3, y_BFoR = 'y_AFoR', num_lumped_mass=1)
beam2.StructuralInformation.body_number = np.zeros((beam1.StructuralInformation.num_elem,), dtype = int)
beam2.StructuralInformation.boundary_conditions[0] = 1
beam2.StructuralInformation.boundary_conditions[-1] = -1
beam2.StructuralInformation.lumped_mass_nodes = np.array([nnodes2-1], dtype = int)
beam2.StructuralInformation.lumped_mass = np.ones((1,))*m2
beam2.StructuralInformation.lumped_mass_inertia = np.zeros((1,3,3))
beam2.StructuralInformation.lumped_mass_position = np.zeros((1,3))
beam2.AerodynamicInformation.create_one_uniform_aerodynamics(
beam2.StructuralInformation,
chord = 1.,
twist = 0.,
sweep = 0.,
num_chord_panels = 4,
m_distribution = 'uniform',
elastic_axis = 0.25,
num_points_camber = 20,
airfoil = airfoil)
beam1.assembly(beam2)
# Simulation details
SimInfo = gc.SimulationInformation()
SimInfo.set_default_values()
SimInfo.define_uinf(np.array([0.0,1.0,0.0]), 1.)
SimInfo.solvers['SHARPy']['flow'] = ['BeamLoader',
'AerogridLoader',
# 'InitializeMultibody',
'DynamicCoupled']
global name
name = 'double_pendulum_geradin'
SimInfo.solvers['SHARPy']['case'] = 'double_pendulum_geradin'
SimInfo.solvers['SHARPy']['write_screen'] = 'off'
SimInfo.solvers['SHARPy']['route'] = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/'
SimInfo.solvers['SHARPy']['log_folder'] = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/'
SimInfo.set_variable_all_dicts('dt', dt)
SimInfo.define_num_steps(numtimesteps)
SimInfo.set_variable_all_dicts('rho', 0.0)
SimInfo.set_variable_all_dicts('velocity_field_input', SimInfo.solvers['SteadyVelocityField'])
SimInfo.set_variable_all_dicts('output', os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/output/')
SimInfo.solvers['BeamLoader']['unsteady'] = 'on'
SimInfo.solvers['AerogridLoader']['unsteady'] = 'on'
SimInfo.solvers['AerogridLoader']['mstar'] = 2
SimInfo.solvers['AerogridLoader']['wake_shape_generator'] = 'StraightWake'
SimInfo.solvers['AerogridLoader']['wake_shape_generator_input'] = {'u_inf':1.,
'u_inf_direction': np.array([0., 1., 0.]),
'dt': dt}
SimInfo.solvers['WriteVariablesTime']['FoR_number'] = np.array([0, 1], dtype = int)
SimInfo.solvers['WriteVariablesTime']['FoR_variables'] = ['mb_quat']
SimInfo.solvers['WriteVariablesTime']['structure_nodes'] = np.array([nnodes1-1, nnodes1+nnodes2-1], dtype = int)
SimInfo.solvers['WriteVariablesTime']['structure_variables'] = ['pos']
SimInfo.solvers['NonLinearDynamicMultibody']['gravity_on'] = True
SimInfo.solvers['NonLinearDynamicMultibody']['newmark_damp'] = 0.15
SimInfo.solvers['BeamPlot']['include_FoR'] = True
SimInfo.solvers['DynamicCoupled']['structural_solver'] = 'NonLinearDynamicMultibody'
SimInfo.solvers['DynamicCoupled']['structural_solver_settings'] = SimInfo.solvers['NonLinearDynamicMultibody']
SimInfo.solvers['DynamicCoupled']['aero_solver'] = 'StepUvlm'
SimInfo.solvers['DynamicCoupled']['aero_solver_settings'] = SimInfo.solvers['StepUvlm']
SimInfo.solvers['DynamicCoupled']['postprocessors'] = ['WriteVariablesTime', 'BeamPlot', 'AerogridPlot']
SimInfo.solvers['DynamicCoupled']['postprocessors_settings'] = {'WriteVariablesTime': SimInfo.solvers['WriteVariablesTime'],
'BeamPlot': SimInfo.solvers['BeamPlot'],
'AerogridPlot': SimInfo.solvers['AerogridPlot']}
SimInfo.solvers['DynamicCoupled']['postprocessors_settings']['WriteVariablesTime']['folder'] = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/output/'
SimInfo.solvers['DynamicCoupled']['postprocessors_settings']['BeamPlot']['folder'] = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/output/'
SimInfo.solvers['DynamicCoupled']['postprocessors_settings']['AerogridPlot']['folder'] = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/output/'
SimInfo.with_forced_vel = False
SimInfo.with_dynamic_forces = False
# Create the MB and BC files
LC1 = gc.LagrangeConstraint()
LC1.behaviour = 'hinge_FoR'
LC1.body_FoR = 0
LC1.rot_axis_AFoR = np.array([0.0,1.0,0.0])
LC2 = gc.LagrangeConstraint()
LC2.behaviour = 'hinge_node_FoR'
LC2.node_in_body = nnodes1-1
LC2.body = 0
LC2.body_FoR = 1
LC2.rot_axisB = np.array([0.0,1.0,0.0])
LC = []
LC.append(LC1)
LC.append(LC2)
MB1 = gc.BodyInformation()
MB1.body_number = 0
MB1.FoR_position = np.zeros((6,),)
MB1.FoR_velocity = np.zeros((6,),)
MB1.FoR_acceleration = np.zeros((6,),)
MB1.FoR_movement = 'free'
MB1.quat = np.array([1.0,0.0,0.0,0.0])
MB2 = gc.BodyInformation()
MB2.body_number = 1
MB2.FoR_position = np.array([node_pos2[0, 0], node_pos2[0, 1], node_pos2[0, 2], 0.0, 0.0, 0.0])
MB2.FoR_velocity = np.zeros((6,),)
MB2.FoR_acceleration = np.zeros((6,),)
MB2.FoR_movement = 'free'
MB2.quat = np.array([1.0,0.0,0.0,0.0])
MB = []
MB.append(MB1)
MB.append(MB2)
# Write files
gc.clean_test_files(SimInfo.solvers['SHARPy']['route'], SimInfo.solvers['SHARPy']['case'])
SimInfo.generate_solver_file()
SimInfo.generate_dyn_file(numtimesteps)
beam1.generate_h5_files(SimInfo.solvers['SHARPy']['route'], SimInfo.solvers['SHARPy']['case'])
gc.generate_multibody_file(LC, MB,SimInfo.solvers['SHARPy']['route'], SimInfo.solvers['SHARPy']['case'])
def test_doublependulum(self):
import sharpy.sharpy_main
solver_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + '/double_pendulum_geradin.sharpy')
sharpy.sharpy_main.main(['', solver_path])
# read output and compare
output_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/output/double_pendulum_geradin/WriteVariablesTime/'
pos_tip_data = np.loadtxt(("%sstruct_pos_node%d.dat" % (output_path, nnodes1*2-1)), )
self.assertAlmostEqual(pos_tip_data[-1, 1], 1.051004, 4)
self.assertAlmostEqual(pos_tip_data[-1, 2], 0.000000, 4)
self.assertAlmostEqual(pos_tip_data[-1, 3], -0.9986984, 4)
def tearDown(self):
solver_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
solver_path += '/'
files_to_delete = [name + '.aero.h5',
name + '.dyn.h5',
name + '.fem.h5',
name + '.mb.h5',
name + '.sharpy']
for f in files_to_delete:
os.remove(solver_path + f)
shutil.rmtree(solver_path + 'output/')
| [
"sharpy.utils.generate_cases.LagrangeConstraint",
"sharpy.utils.generate_cases.BodyInformation",
"sharpy.utils.generate_cases.clean_test_files",
"numpy.ones",
"sharpy.utils.generate_cases.SimulationInformation",
"sharpy.utils.generate_cases.generate_multibody_file",
"sharpy.utils.generate_cases.Aeroelas... | [((116, 1209), 'numpy.array', 'np.array', (['[[-0.0117973, 1.56808], [0.0816564, 1.5394], [0.171988, 1.41698], [0.235203,\n 1.31521], [0.307327, 1.09265], [0.427399, 0.601124], [0.526338, \n 0.0899229], [0.646417, -0.394903], [0.748531, -0.765465], [0.868959, -\n 0.94209], [0.905131, -0.956217], [0.965504, -0.903829], [1.06828, -\n 0.69149], [1.16508, -0.425429], [1.29798, -0.240495], [1.42483, -\n 0.0688408], [1.56382, 0.169571], [1.78751, 0.634087], [1.89627, \n 0.806105], [1.98075, 0.844608], [2.10125, 0.734983], [2.19143, 0.478564\n ], [2.26934, 0.0347871], [2.37751, -0.315796], [2.52803, -0.546627], [\n 2.60034, -0.601682], [2.76314, -0.645155], [2.88987, -0.580701], [\n 3.05893, -0.423295], [3.24611, -0.239453], [3.43335, -0.00201041], [\n 3.51194, 0.157214], [3.59668, 0.423517], [3.6815, 0.756821], [3.73591, \n 0.87633], [3.81435, 0.901554], [3.93481, 0.751729], [4.04305, 0.468146],\n [4.17525, 0.0366783], [4.34949, -0.556439], [4.44551, -0.987179], [\n 4.57784, -1.29805], [4.65016, -1.3531], [4.70444, -1.35419], [4.78294, \n -1.27537], [4.86762, -1.06267], [4.99464, -0.743611]]'], {}), '([[-0.0117973, 1.56808], [0.0816564, 1.5394], [0.171988, 1.41698],\n [0.235203, 1.31521], [0.307327, 1.09265], [0.427399, 0.601124], [\n 0.526338, 0.0899229], [0.646417, -0.394903], [0.748531, -0.765465], [\n 0.868959, -0.94209], [0.905131, -0.956217], [0.965504, -0.903829], [\n 1.06828, -0.69149], [1.16508, -0.425429], [1.29798, -0.240495], [\n 1.42483, -0.0688408], [1.56382, 0.169571], [1.78751, 0.634087], [\n 1.89627, 0.806105], [1.98075, 0.844608], [2.10125, 0.734983], [2.19143,\n 0.478564], [2.26934, 0.0347871], [2.37751, -0.315796], [2.52803, -\n 0.546627], [2.60034, -0.601682], [2.76314, -0.645155], [2.88987, -\n 0.580701], [3.05893, -0.423295], [3.24611, -0.239453], [3.43335, -\n 0.00201041], [3.51194, 0.157214], [3.59668, 0.423517], [3.6815, \n 0.756821], [3.73591, 0.87633], [3.81435, 0.901554], [3.93481, 0.751729],\n [4.04305, 0.468146], [4.17525, 0.0366783], [4.34949, -0.556439], [\n 4.44551, -0.987179], [4.57784, -1.29805], [4.65016, -1.3531], [4.70444,\n -1.35419], [4.78294, -1.27537], [4.86762, -1.06267], [4.99464, -0.743611]])\n', (124, 1209), True, 'import numpy as np\n'), ((2264, 3578), 'numpy.array', 'np.array', (['[[0.00756934, 0.0266485], [0.134225, 0.0241027], [0.309222, 0.100987], [\n 0.418117, 0.393606], [0.490855, 0.713752], [0.533195, 0.820103], [\n 0.635787, 0.871642], [0.762124, 0.587696], [0.85826, 0.264156], [\n 0.918194, -0.0720575], [0.996205, -0.422034], [1.05008, -0.784926], [\n 1.09792, -1.1477], [1.1639, -1.47063], [1.27207, -1.82121], [1.38636, -\n 2.09152], [1.47067, -2.20042], [1.53694, -2.26875], [1.67582, -2.12414],\n [1.8028, -1.84528], [1.89365, -1.5121], [1.97843, -1.2056], [2.00271, -\n 1.07208], [2.08146, -0.765457], [2.14818, -0.431789], [2.19686, -\n 0.0575583], [2.24552, 0.303273], [2.29421, 0.690904], [2.37299, 1.02433\n ], [2.44573, 1.34447], [2.55464, 1.65049], [2.65749, 1.92983], [2.7904,\n 2.12817], [2.94135, 2.27254], [3.03182, 2.27072], [3.18853, 2.17376], [\n 3.30891, 1.95694], [3.42312, 1.61964], [3.50121, 1.33666], [3.56714, \n 0.973525], [3.61495, 0.583954], [3.66883, 0.221062], [3.71673, -\n 0.0881084], [3.80076, -0.451606], [3.87271, -0.828262], [3.95678, -\n 1.15156], [3.98681, -1.25937], [4.08307, -1.47571], [4.13729, -1.5304],\n [4.27618, -1.38579], [4.36701, -1.066], [4.4217, -0.705294], [4.50652, \n -0.37199], [4.59132, -0.0520868], [4.68815, 0.240774], [4.79703, \n 0.519993], [4.91188, 0.74549], [4.98432, 0.797635]]'], {}), '([[0.00756934, 0.0266485], [0.134225, 0.0241027], [0.309222, \n 0.100987], [0.418117, 0.393606], [0.490855, 0.713752], [0.533195, \n 0.820103], [0.635787, 0.871642], [0.762124, 0.587696], [0.85826, \n 0.264156], [0.918194, -0.0720575], [0.996205, -0.422034], [1.05008, -\n 0.784926], [1.09792, -1.1477], [1.1639, -1.47063], [1.27207, -1.82121],\n [1.38636, -2.09152], [1.47067, -2.20042], [1.53694, -2.26875], [1.67582,\n -2.12414], [1.8028, -1.84528], [1.89365, -1.5121], [1.97843, -1.2056],\n [2.00271, -1.07208], [2.08146, -0.765457], [2.14818, -0.431789], [\n 2.19686, -0.0575583], [2.24552, 0.303273], [2.29421, 0.690904], [\n 2.37299, 1.02433], [2.44573, 1.34447], [2.55464, 1.65049], [2.65749, \n 1.92983], [2.7904, 2.12817], [2.94135, 2.27254], [3.03182, 2.27072], [\n 3.18853, 2.17376], [3.30891, 1.95694], [3.42312, 1.61964], [3.50121, \n 1.33666], [3.56714, 0.973525], [3.61495, 0.583954], [3.66883, 0.221062],\n [3.71673, -0.0881084], [3.80076, -0.451606], [3.87271, -0.828262], [\n 3.95678, -1.15156], [3.98681, -1.25937], [4.08307, -1.47571], [4.13729,\n -1.5304], [4.27618, -1.38579], [4.36701, -1.066], [4.4217, -0.705294],\n [4.50652, -0.37199], [4.59132, -0.0520868], [4.68815, 0.240774], [\n 4.79703, 0.519993], [4.91188, 0.74549], [4.98432, 0.797635]])\n', (2272, 3578), True, 'import numpy as np\n'), ((5654, 5674), 'numpy.zeros', 'np.zeros', (['(1, 20, 2)'], {}), '((1, 20, 2))\n', (5662, 5674), True, 'import numpy as np\n'), ((5699, 5724), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(20)'], {}), '(0.0, 1.0, 20)\n', (5710, 5724), True, 'import numpy as np\n'), ((5835, 5862), 'sharpy.utils.generate_cases.AeroelasticInformation', 'gc.AeroelasticInformation', ([], {}), '()\n', (5860, 5862), True, 'import sharpy.utils.generate_cases as gc\n'), ((5876, 5905), 'numpy.linspace', 'np.linspace', (['(0.0)', 'l1', 'nnodes1'], {}), '(0.0, l1, nnodes1)\n', (5887, 5905), True, 'import numpy as np\n'), ((5926, 5948), 'numpy.zeros', 'np.zeros', (['(nnodes1, 3)'], {}), '((nnodes1, 3))\n', (5934, 5948), True, 'import numpy as np\n'), ((6275, 6335), 'numpy.zeros', 'np.zeros', (['(beam1.StructuralInformation.num_elem,)'], {'dtype': 'int'}), '((beam1.StructuralInformation.num_elem,), dtype=int)\n', (6283, 6335), True, 'import numpy as np\n'), ((6522, 6556), 'numpy.array', 'np.array', (['[nnodes1 - 1]'], {'dtype': 'int'}), '([nnodes1 - 1], dtype=int)\n', (6530, 6556), True, 'import numpy as np\n'), ((6682, 6701), 'numpy.zeros', 'np.zeros', (['(1, 3, 3)'], {}), '((1, 3, 3))\n', (6690, 6701), True, 'import numpy as np\n'), ((6759, 6775), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (6767, 6775), True, 'import numpy as np\n'), ((7437, 7464), 'sharpy.utils.generate_cases.AeroelasticInformation', 'gc.AeroelasticInformation', ([], {}), '()\n', (7462, 7464), True, 'import sharpy.utils.generate_cases as gc\n'), ((7478, 7507), 'numpy.linspace', 'np.linspace', (['(0.0)', 'l2', 'nnodes2'], {}), '(0.0, l2, nnodes2)\n', (7489, 7507), True, 'import numpy as np\n'), ((7528, 7550), 'numpy.zeros', 'np.zeros', (['(nnodes2, 3)'], {}), '((nnodes2, 3))\n', (7536, 7550), True, 'import numpy as np\n'), ((7915, 7975), 'numpy.zeros', 'np.zeros', (['(beam1.StructuralInformation.num_elem,)'], {'dtype': 'int'}), '((beam1.StructuralInformation.num_elem,), dtype=int)\n', (7923, 7975), True, 'import numpy as np\n'), ((8162, 8196), 'numpy.array', 'np.array', (['[nnodes2 - 1]'], {'dtype': 'int'}), '([nnodes2 - 1], dtype=int)\n', (8170, 8196), True, 'import numpy as np\n'), ((8322, 8341), 'numpy.zeros', 'np.zeros', (['(1, 3, 3)'], {}), '((1, 3, 3))\n', (8330, 8341), True, 'import numpy as np\n'), ((8399, 8415), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (8407, 8415), True, 'import numpy as np\n'), ((9139, 9165), 'sharpy.utils.generate_cases.SimulationInformation', 'gc.SimulationInformation', ([], {}), '()\n', (9163, 9165), True, 'import sharpy.utils.generate_cases as gc\n'), ((10882, 10909), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'int'}), '([0, 1], dtype=int)\n', (10890, 10909), True, 'import numpy as np\n'), ((11056, 11113), 'numpy.array', 'np.array', (['[nnodes1 - 1, nnodes1 + nnodes2 - 1]'], {'dtype': 'int'}), '([nnodes1 - 1, nnodes1 + nnodes2 - 1], dtype=int)\n', (11064, 11113), True, 'import numpy as np\n'), ((12910, 12933), 'sharpy.utils.generate_cases.LagrangeConstraint', 'gc.LagrangeConstraint', ([], {}), '()\n', (12931, 12933), True, 'import sharpy.utils.generate_cases as gc\n'), ((13023, 13048), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (13031, 13048), True, 'import numpy as np\n'), ((13062, 13085), 'sharpy.utils.generate_cases.LagrangeConstraint', 'gc.LagrangeConstraint', ([], {}), '()\n', (13083, 13085), True, 'import sharpy.utils.generate_cases as gc\n'), ((13234, 13259), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (13242, 13259), True, 'import numpy as np\n'), ((13336, 13356), 'sharpy.utils.generate_cases.BodyInformation', 'gc.BodyInformation', ([], {}), '()\n', (13354, 13356), True, 'import sharpy.utils.generate_cases as gc\n'), ((13412, 13426), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (13420, 13426), True, 'import numpy as np\n'), ((13455, 13469), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (13463, 13469), True, 'import numpy as np\n'), ((13502, 13516), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (13510, 13516), True, 'import numpy as np\n'), ((13571, 13601), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0, 0.0])\n', (13579, 13601), True, 'import numpy as np\n'), ((13614, 13634), 'sharpy.utils.generate_cases.BodyInformation', 'gc.BodyInformation', ([], {}), '()\n', (13632, 13634), True, 'import sharpy.utils.generate_cases as gc\n'), ((13690, 13766), 'numpy.array', 'np.array', (['[node_pos2[0, 0], node_pos2[0, 1], node_pos2[0, 2], 0.0, 0.0, 0.0]'], {}), '([node_pos2[0, 0], node_pos2[0, 1], node_pos2[0, 2], 0.0, 0.0, 0.0])\n', (13698, 13766), True, 'import numpy as np\n'), ((13794, 13808), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (13802, 13808), True, 'import numpy as np\n'), ((13841, 13855), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (13849, 13855), True, 'import numpy as np\n'), ((13910, 13940), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0, 0.0])\n', (13918, 13940), True, 'import numpy as np\n'), ((14032, 14127), 'sharpy.utils.generate_cases.clean_test_files', 'gc.clean_test_files', (["SimInfo.solvers['SHARPy']['route']", "SimInfo.solvers['SHARPy']['case']"], {}), "(SimInfo.solvers['SHARPy']['route'], SimInfo.solvers[\n 'SHARPy']['case'])\n", (14051, 14127), True, 'import sharpy.utils.generate_cases as gc\n'), ((14321, 14430), 'sharpy.utils.generate_cases.generate_multibody_file', 'gc.generate_multibody_file', (['LC', 'MB', "SimInfo.solvers['SHARPy']['route']", "SimInfo.solvers['SHARPy']['case']"], {}), "(LC, MB, SimInfo.solvers['SHARPy']['route'],\n SimInfo.solvers['SHARPy']['case'])\n", (14347, 14430), True, 'import sharpy.utils.generate_cases as gc\n'), ((14864, 14934), 'numpy.loadtxt', 'np.loadtxt', (["('%sstruct_pos_node%d.dat' % (output_path, nnodes1 * 2 - 1))"], {}), "('%sstruct_pos_node%d.dat' % (output_path, nnodes1 * 2 - 1))\n", (14874, 14934), True, 'import numpy as np\n'), ((15574, 15612), 'shutil.rmtree', 'shutil.rmtree', (["(solver_path + 'output/')"], {}), "(solver_path + 'output/')\n", (15587, 15612), False, 'import shutil\n'), ((5978, 5996), 'numpy.sin', 'np.sin', (['theta_ini1'], {}), '(theta_ini1)\n', (5984, 5996), True, 'import numpy as np\n'), ((6027, 6045), 'numpy.cos', 'np.cos', (['theta_ini1'], {}), '(theta_ini1)\n', (6033, 6045), True, 'import numpy as np\n'), ((6607, 6620), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (6614, 6620), True, 'import numpy as np\n'), ((8247, 8260), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (8254, 8260), True, 'import numpy as np\n'), ((9232, 9257), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (9240, 9257), True, 'import numpy as np\n'), ((10709, 10734), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (10717, 10734), True, 'import numpy as np\n'), ((15538, 15564), 'os.remove', 'os.remove', (['(solver_path + f)'], {}), '(solver_path + f)\n', (15547, 15564), False, 'import os\n'), ((7580, 7598), 'numpy.sin', 'np.sin', (['theta_ini2'], {}), '(theta_ini2)\n', (7586, 7598), True, 'import numpy as np\n'), ((7648, 7666), 'numpy.cos', 'np.cos', (['theta_ini2'], {}), '(theta_ini2)\n', (7654, 7666), True, 'import numpy as np\n'), ((15211, 15237), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (15227, 15237), False, 'import os\n'), ((9744, 9770), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (9760, 9770), False, 'import os\n'), ((9861, 9887), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (9877, 9887), False, 'import os\n'), ((12394, 12420), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (12410, 12420), False, 'import os\n'), ((12561, 12587), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (12577, 12587), False, 'import os\n'), ((12732, 12758), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (12748, 12758), False, 'import os\n'), ((14551, 14577), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (14567, 14577), False, 'import os\n'), ((14756, 14782), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (14772, 14782), False, 'import os\n'), ((10227, 10253), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (10243, 10253), False, 'import os\n')] |
import numpy as np
import random
import os
import datetime
def str_list_to_float(str_list):
return [float(item) for item in str_list]
def str_list_to_int(str_list):
return [int(item) for item in str_list]
def read_embeddings(filename, n_node, n_embed):
with open(filename, "r") as f:
embedding_matrix = np.random.rand(n_node, n_embed)
f.readline() # skip the first line
for line in f:
emd = line.split()
embedding_matrix[int(emd[0]), :] = str_list_to_float(emd[1:])
return embedding_matrix
def read_embeddings_with_id_convert(filename, graph, n_embed):
with open(filename, "r") as f:
embedding_matrix = np.random.rand(graph.n_node, n_embed)
f.readline() # skip the first line
for line in f:
emd = line.split()
embedding_matrix[graph.name2id[emd[0]], :] = str_list_to_float(emd[1:])
return embedding_matrix
def agm(x): # x is 1d-array
agm_x = 1 - np.exp(-x)
agm_x[np.isnan(agm_x)] = 0
return np.clip(agm_x, 1e-6, 1)
def agm_softmax(x): # x is 1d-array
agm_x = 1 - np.exp(-x)
agm_x[np.isnan(agm_x)] = 0
agm_x = np.clip(agm_x, 1e-6, 1)
return agm_x / agm_x.sum()
def read_edges_from_file(filename):
with open(filename, "r") as f:
lines = f.readlines()
edges = [str_list_to_int(line.split()) for line in lines if not line[0].startswith('#')]
return edges
def create_file_dir_in_config(config):
for k, v in config.__dict__.items():
if not k.startswith('_') and 'filename' in k:
if not isinstance(v, list):
v = [v]
for path in v:
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
def shuffle(*args):
idx = list(range(len(args[0])))
random.shuffle(idx)
results = []
for array in args:
results.append([array[i] for i in idx])
return tuple(results)
def genearate_tmp_filename(config):
return ('tmp-' + str(hash(str(config.__dict__))) + str(datetime.datetime.now()) + '.pkl').replace(' ', '_').replace(':', '_')
| [
"numpy.clip",
"os.path.exists",
"numpy.random.rand",
"random.shuffle",
"os.makedirs",
"numpy.exp",
"os.path.dirname",
"datetime.datetime.now",
"numpy.isnan"
] | [((1046, 1070), 'numpy.clip', 'np.clip', (['agm_x', '(1e-06)', '(1)'], {}), '(agm_x, 1e-06, 1)\n', (1053, 1070), True, 'import numpy as np\n'), ((1179, 1203), 'numpy.clip', 'np.clip', (['agm_x', '(1e-06)', '(1)'], {}), '(agm_x, 1e-06, 1)\n', (1186, 1203), True, 'import numpy as np\n'), ((1877, 1896), 'random.shuffle', 'random.shuffle', (['idx'], {}), '(idx)\n', (1891, 1896), False, 'import random\n'), ((330, 361), 'numpy.random.rand', 'np.random.rand', (['n_node', 'n_embed'], {}), '(n_node, n_embed)\n', (344, 361), True, 'import numpy as np\n'), ((694, 731), 'numpy.random.rand', 'np.random.rand', (['graph.n_node', 'n_embed'], {}), '(graph.n_node, n_embed)\n', (708, 731), True, 'import numpy as np\n'), ((993, 1003), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (999, 1003), True, 'import numpy as np\n'), ((1014, 1029), 'numpy.isnan', 'np.isnan', (['agm_x'], {}), '(agm_x)\n', (1022, 1029), True, 'import numpy as np\n'), ((1125, 1135), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1131, 1135), True, 'import numpy as np\n'), ((1146, 1161), 'numpy.isnan', 'np.isnan', (['agm_x'], {}), '(agm_x)\n', (1154, 1161), True, 'import numpy as np\n'), ((1704, 1725), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (1719, 1725), False, 'import os\n'), ((1749, 1772), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (1763, 1772), False, 'import os\n'), ((1794, 1814), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (1805, 1814), False, 'import os\n'), ((2108, 2131), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2129, 2131), False, 'import datetime\n')] |
"""
Signal processing.
"""
import collections.abc
import logging
from typing import Tuple
import numpy as np
from ridge_detection.helper import displayContours, save_to_disk
from ridge_detection.lineDetector import LineDetector
from ridge_detection.params import Params
from scipy import ndimage as ndi
from skimage.filters import gabor_kernel
import alsa.image_proc as ip
DEFAULT_RIDGE_CONFIG = {
# "path_to_file": img_path,
"mandatory_parameters": {
# "Sigma": 3.39,
# "Lower_Threshold": 0.34,
# "Upper_Threshold": 1.02,
"Maximum_Line_Length": 0,
"Minimum_Line_Length": 0,
"Darkline": "LIGHT",
"Overlap_resolution": "NONE",
},
"optional_parameters": {
"Line_width": 3,
"High_contrast": 200,
"Low_contrast": 60,
},
"further_options": {
"Correct_position": True,
"Estimate_width": True,
"doExtendLine": True,
"Show_junction_points": True,
"Show_IDs": False,
"Display_results": False,
"Preview": False,
"Make_Binary": True,
"save_on_disk": True,
},
}
def update(d: dict, u: dict):
"""
Update dictionary d with values from dictionary u.
"""
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
def compute_feats(image, kernels):
feats = np.zeros((len(kernels), 2), dtype=np.double)
for k, kernel in enumerate(kernels):
filtered = ndi.convolve(image, kernel, mode="wrap")
feats[k, 0] = filtered.mean()
feats[k, 1] = filtered.var()
return feats
# Returns the kernels for the gabor filter with theta values theta_0 to theta_n with n steps (np.linspace)
def gabor_kernels(freq, n=1, theta_0=0, theta_n=np.pi, bw=1):
kernels = []
if theta_0 == 0 and theta_n == np.pi:
theta_n = theta_n - theta_n / n
for i in np.linspace(theta_0, theta_n, n):
kernel = np.real(gabor_kernel(freq, theta=i, bandwidth=bw))
kernels.append(kernel)
relic = int(round(1 / freq))
return kernels, relic
def power(image, kernel, relic):
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
mat = np.sqrt(
ndi.convolve(image, np.real(kernel), mode="wrap") ** 2
+ ndi.convolve(image, np.imag(kernel), mode="wrap") ** 2
)
if relic < min(image.shape[0] / 2, image.shape[1] / 2):
mat = mat[relic:(-relic), relic:(-relic)]
mat_min = np.min(mat)
mat_max = np.max(mat)
if mat_max == 0:
raise Exception("Image is an array of zeros")
else:
mat = (mat - mat_min) / mat_max * 255
return mat
# returns the gabor filtered image used by kernels
def gabor_image(image, kernels, relic):
results = []
for k in kernels:
results.append(power(image, k, relic))
return results
# Returns the linear combination of the images in image_list with
# weights in weights is None, all weights are equal to 1.
def lin_comb(image_list, weights=None):
array_to_return = np.zeros(image_list[0].shape)
if weights is None:
for im in image_list:
array_to_return += im
elif len(weights) == len(image_list):
for im, weight in zip(image_list, weights):
array_to_return += weight * im
else:
raise Exception("len of weights differs from len of image_list")
array_to_return = array_to_return / len(image_list)
return array_to_return
# Attempts to denoise the array by taking the average of given
# square amount of pixels
def avg_denoise(array, pixels=3):
row_start = 0
col_start = 0
to_return = np.copy(array)
while col_start < (array.shape[1] + pixels):
row_start = 0
while row_start < (array.shape[0] + pixels):
to_return[
row_start : (row_start + pixels), col_start : (col_start + pixels)
] = np.mean(
array[
row_start : (row_start + pixels), col_start : (col_start + pixels)
]
)
row_start += pixels
col_start += pixels
return to_return
def resolve_ridge_config(img_path: str, override_ridge_config: dict) -> dict:
"""
Resolve final ridge detection configuration.
"""
ridge_config = DEFAULT_RIDGE_CONFIG
ridge_config["path_to_file"] = img_path
ridge_config = update(ridge_config, override_ridge_config)
return ridge_config
def ridge(
img_path, saved_img_dir, override_ridge_config: dict, save_on_file: bool = False
):
ridge_config = resolve_ridge_config(
img_path=img_path, override_ridge_config=override_ridge_config
)
params = Params(ridge_config)
img = ip.open_image(img_path, asarray=False)
detect = LineDetector(params=ridge_config)
try:
result = detect.detectLines(img)
except (SystemExit, NameError) as exc:
logging.error(
f"Caught {exc} SystemExit/NameError from ridge detection.a "
f"img_path = {img_path}",
)
return [], None
resultJunction = detect.junctions
out_img, img_only_lines = displayContours(params, result, resultJunction)
if save_on_file:
save_to_disk(out_img, img_only_lines, str(saved_img_dir))
# result has the coordinates, img_only_lines is an Image
return result, img_only_lines
# attempts to form straight lines or line segments out of line
# objects calculated by ridge function
# returns a list of list of tuples of len 2 which contain the
# coordinates for the line segment points in a matrix
def calculate_ridge_points(line_o, max_length=80):
# returns list of list of tuples
def calculate_accurate_points(
angle_list, line_o, orig_size, start_point=0, x_range=(0, 255), y_range=(0, 255)
):
def extend_line(coord_list, x_range, y_range):
x_d = coord_list[0][1][0] - coord_list[0][0][0]
y_d = coord_list[0][1][1] - coord_list[0][0][1]
x_orig = coord_list[0][1][0]
y_orig = coord_list[0][1][1]
x_1 = x_orig
x_2 = x_orig
y_2 = y_orig
y_1 = y_orig
if x_d != 0:
k = y_d / x_d
while x_1 >= np.min(line_o.col) and y_2 >= np.min(line_o.row):
x_1 -= 1
y_2 -= k
min_d = np.inf
for x, y in zip(line_o.col, line_o.row):
current_d = np.sqrt((x - x_1) ** 2 + (y - y_2) ** 2)
if current_d < min_d:
min_d = current_d
if min_d > 10:
break
while x_2 <= np.max(line_o.col) and y_1 <= np.max(line_o.row):
x_2 += 1
y_1 += k
min_d = np.inf
for x, y in zip(line_o.col, line_o.row):
current_d = np.sqrt((x - x_2) ** 2 + (y - y_1) ** 2)
if current_d < min_d:
min_d = current_d
if min_d > 10:
break
y_1 = int(round(min(y_range[1] - 1, max(y_range[0], y_1))))
y_2 = int(round(min(y_range[1] - 1, max(y_range[0], y_2))))
x_1 = int(round(min(x_range[1] - 1, max(x_range[0], x_1))))
x_2 = int(round(min(x_range[1] - 1, max(x_range[0], x_2))))
return (x_1, y_2), (x_2, y_1)
else:
while y_2 > np.min(y_range):
y_2 -= 1
while y_1 < np.max(y_range):
y_1 += 1
return (x_1, y_2), (x_2, y_1)
n_points = line_o.num
median_angle = np.median(angle_list)
first_p = 0
last_p = len(angle_list) - 1
min_first = min_last = np.inf
for i in range(int(len(angle_list) / 3)):
if abs(angle_list[i] - median_angle) < min_first:
first_p = i
min_first = abs(angle_list[i] - median_angle)
if abs(angle_list[-i - 1] - median_angle) < min_last:
last_p = len(angle_list) - i - 1
min_last = abs(angle_list[-i - 1] - median_angle)
first_p = int((first_p + start_point) / orig_size * (n_points - 1))
last_p = int((last_p + start_point) / orig_size * (n_points - 1))
to_return = [
[
(line_o.col[first_p], line_o.row[first_p]),
(line_o.col[last_p], line_o.row[last_p]),
]
]
a, b = extend_line(to_return, x_range, y_range)
to_return = [a, b]
return [to_return]
# attempts to form a line segment and return the index of the
# coordinate in the line which should be chosen
def ridge_segmentation(angle_list, line_o, n_points, orig_size, start=0):
mid_p = int((len(angle_list)) / 2)
angle1 = angle_list[:mid_p]
angle2 = angle_list[mid_p:]
to_return = list()
angle1_std = np.std(angle1)
angle2_std = np.std(angle2)
line1_list = list()
line2_list = list()
if angle1_std <= 0.2 or mid_p < 21:
x_min = int(start / orig_size * n_points)
x_max = int((start + mid_p) / orig_size * n_points)
x_range_0 = np.min(line_o.col[x_min:x_max])
x_range_1 = np.max(line_o.col[x_min:x_max])
y_range_0 = np.min(line_o.row[x_min:x_max])
y_range_1 = np.max(line_o.row[x_min:x_max])
line1_list = calculate_accurate_points(
angle1,
line_o,
orig_size,
start_point=start,
x_range=(x_range_0, x_range_1),
y_range=(y_range_0, y_range_1),
)
else:
to_return.extend(
ridge_segmentation(angle1, line_o, n_points, orig_size, start=start)
)
if len(line1_list) > 0:
to_return.extend(line1_list)
if angle2_std <= 0.2 or mid_p < 21:
x_max = int((start + len(angle_list)) / orig_size * n_points)
x_min = int((start + mid_p) / orig_size * n_points)
x_range_0 = np.min(line_o.col[x_min:x_max])
x_range_1 = np.max(line_o.col[x_min:x_max])
y_range_0 = np.min(line_o.row[x_min:x_max])
y_range_1 = np.max(line_o.row[x_min:x_max])
line2_list = calculate_accurate_points(
angle2,
line_o,
orig_size,
start_point=int((start + mid_p) / orig_size * n_points),
x_range=(x_range_0, x_range_1),
y_range=(y_range_0, y_range_1),
)
else:
to_return.extend(
ridge_segmentation(
angle2, line_o, n_points, orig_size, start=mid_p + start
)
)
if len(line2_list) > 0:
to_return.extend(line2_list)
return to_return
n_points = line_o.num
orig_size = len(line_o.angle)
# if line is small enough, the end points will suffice
if n_points >= 20:
angles = line_o.angle
angles_std = np.std(angles)
# check if the angle of the pixels are consistent
if angles_std < 0.2 and len(line_o.col) <= max_length:
return calculate_accurate_points(
angles,
line_o,
orig_size,
x_range=(np.min(line_o.col), np.max(line_o.col)),
y_range=(np.min(line_o.row), np.max(line_o.row)),
)
else:
# check if the angle in the middle is consistent
quartile = int(len(angles) / 4)
quartile_angles = angles[quartile : (len(angles) - quartile)]
angles_std = np.std(quartile_angles)
if angles_std < 0.2 and len(line_o.col) <= max_length:
cut_point = int(n_points / 4)
return calculate_accurate_points(
quartile_angles,
line_o,
orig_size,
cut_point,
x_range=(np.min(line_o.col), np.max(line_o.col)),
y_range=(np.min(line_o.row), np.max(line_o.row)),
)
else:
to_return = list()
for line in ridge_segmentation(angles, line_o, n_points, orig_size):
line_list = list()
for i in line:
line_list.append(i)
to_return.append(line_list)
return to_return
else:
return [[(line_o.col[0], line_o.row[0]), (line_o.col[-1], line_o.row[-1])]]
def ridge_fit(
img_path,
saved_img_dir,
override_ridge_config: dict,
img_shape: Tuple[int, int] = (256, 256),
slack: int = 1,
save_on_file: bool = False,
):
coords, _ = ridge(
img_path=img_path,
saved_img_dir=saved_img_dir,
override_ridge_config=override_ridge_config,
save_on_file=save_on_file,
)
line_coords_list = list()
img_list = list()
for _, line in enumerate(coords):
line_coords = calculate_ridge_points(line)
im = ip.img_binmat_line_segment(line_coords, img_shape, slack=slack)
line_coords_list.append(line_coords)
img_list.append(im)
return line_coords_list, img_list
# attempts to extrapolate lines in ridge_fit_coords by checking for
# lines with same position and angle
# DEPRAVED!! DO NOT USE
def connect_lines(ridge_fit_coords, eps=3, shape=(256, 256)):
line_list = list()
# extrapolate line until meets the edge of the image
def extrapolate(coord, k, shape):
x_orig = coord[0]
y_orig = coord[1]
x_1 = x_orig
x_2 = x_orig
y_2 = y_orig
y_1 = y_orig
while x_1 > 0 and y_2 > 0:
x_1 -= 1
y_2 -= k
while x_2 < shape[1] - 1 and y_1 < shape[0] - 1:
x_2 += 1
y_1 += k
y_1 = int(round(min(255, max(0, y_1))))
y_2 = int(round(min(255, max(0, y_2))))
return (x_1, y_2), (x_2, y_1)
# return the coordinates of a line when extrapolated to the
# edge of the image
def edge_coords(line_coords, shape):
x_d = line_coords[1][0] - line_coords[0][0]
y_d = line_coords[1][1] - line_coords[0][1]
if x_d > 0:
k = y_d / x_d
a, b = extrapolate(line_coords[0], k, shape)
return [a, b]
else:
return [(line_coords[0][0], 0), (line_coords[0][0], shape[0] - 1)]
def combine_lines(edge_points, line_list, eps):
def compare_coords(crds1, crds2, eps):
satisfied = True
for c1, c2 in zip(crds1, crds2):
if abs(c1[0] - c2[0]) > eps or abs(c1[1] - c2[1]) > eps:
satisfied = False
return satisfied
def distance(crds1, crds2):
def eukl(x1, x2, y1, y2):
return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
distances = list()
for crd1 in crds1:
for crd2 in crds2:
distances.append(eukl(crd1[0], crd2[0], crd1[1], crd2[1]))
return np.min(distances)
to_return = list()
sorted_list = list()
for i, points in enumerate(edge_points):
id_list = list()
id_list.append(i)
sorted_list.append(i)
for j, points2 in enumerate(edge_points):
if j not in sorted_list:
if (
compare_coords(points, points2, eps)
and distance(line_list[i], line_list[j]) < np.max(shape) / 8
):
id_list.append(j)
sorted_list.append(j)
to_return.append(id_list)
return to_return
def fill_line(coord_list):
x_min = np.inf
x_max = 0
y_min = y_max = 0
for crds in coord_list:
for crd in crds:
if crd[0] < x_min:
x_min = crd[0]
y_min = crd[1]
if crd[0] > x_max:
x_max = crd[0]
y_max = crd[1]
return [(x_min, y_min), (x_max, y_max)]
for line_segment in ridge_fit_coords:
for line in line_segment:
line_list.append(line)
extrapol_list = list()
for j, line in enumerate(line_list):
extrapol_list.append(edge_coords(line, shape))
grouped_ids = combine_lines(extrapol_list, line_list, eps)
grouped_line_list = list()
for grp in grouped_ids:
group = list()
for id in grp:
group.append(line_list[id])
grouped_line_list.append(group)
for i, grp in enumerate(grouped_line_list):
new_coords = fill_line(grp)
grouped_line_list[i] = new_coords
return grouped_line_list
| [
"numpy.sqrt",
"ridge_detection.params.Params",
"logging.error",
"numpy.imag",
"numpy.mean",
"numpy.max",
"alsa.image_proc.open_image",
"ridge_detection.helper.displayContours",
"numpy.linspace",
"numpy.real",
"numpy.min",
"scipy.ndimage.convolve",
"alsa.image_proc.img_binmat_line_segment",
... | [((1977, 2009), 'numpy.linspace', 'np.linspace', (['theta_0', 'theta_n', 'n'], {}), '(theta_0, theta_n, n)\n', (1988, 2009), True, 'import numpy as np\n'), ((2576, 2587), 'numpy.min', 'np.min', (['mat'], {}), '(mat)\n', (2582, 2587), True, 'import numpy as np\n'), ((2602, 2613), 'numpy.max', 'np.max', (['mat'], {}), '(mat)\n', (2608, 2613), True, 'import numpy as np\n'), ((3147, 3176), 'numpy.zeros', 'np.zeros', (['image_list[0].shape'], {}), '(image_list[0].shape)\n', (3155, 3176), True, 'import numpy as np\n'), ((3745, 3759), 'numpy.copy', 'np.copy', (['array'], {}), '(array)\n', (3752, 3759), True, 'import numpy as np\n'), ((4789, 4809), 'ridge_detection.params.Params', 'Params', (['ridge_config'], {}), '(ridge_config)\n', (4795, 4809), False, 'from ridge_detection.params import Params\n'), ((4821, 4859), 'alsa.image_proc.open_image', 'ip.open_image', (['img_path'], {'asarray': '(False)'}), '(img_path, asarray=False)\n', (4834, 4859), True, 'import alsa.image_proc as ip\n'), ((4874, 4907), 'ridge_detection.lineDetector.LineDetector', 'LineDetector', ([], {'params': 'ridge_config'}), '(params=ridge_config)\n', (4886, 4907), False, 'from ridge_detection.lineDetector import LineDetector\n'), ((5238, 5285), 'ridge_detection.helper.displayContours', 'displayContours', (['params', 'result', 'resultJunction'], {}), '(params, result, resultJunction)\n', (5253, 5285), False, 'from ridge_detection.helper import displayContours, save_to_disk\n'), ((1559, 1599), 'scipy.ndimage.convolve', 'ndi.convolve', (['image', 'kernel'], {'mode': '"""wrap"""'}), "(image, kernel, mode='wrap')\n", (1571, 1599), True, 'from scipy import ndimage as ndi\n'), ((7879, 7900), 'numpy.median', 'np.median', (['angle_list'], {}), '(angle_list)\n', (7888, 7900), True, 'import numpy as np\n'), ((9180, 9194), 'numpy.std', 'np.std', (['angle1'], {}), '(angle1)\n', (9186, 9194), True, 'import numpy as np\n'), ((9216, 9230), 'numpy.std', 'np.std', (['angle2'], {}), '(angle2)\n', (9222, 9230), True, 'import numpy as np\n'), ((11361, 11375), 'numpy.std', 'np.std', (['angles'], {}), '(angles)\n', (11367, 11375), True, 'import numpy as np\n'), ((13399, 13462), 'alsa.image_proc.img_binmat_line_segment', 'ip.img_binmat_line_segment', (['line_coords', 'img_shape'], {'slack': 'slack'}), '(line_coords, img_shape, slack=slack)\n', (13425, 13462), True, 'import alsa.image_proc as ip\n'), ((2036, 2077), 'skimage.filters.gabor_kernel', 'gabor_kernel', (['freq'], {'theta': 'i', 'bandwidth': 'bw'}), '(freq, theta=i, bandwidth=bw)\n', (2048, 2077), False, 'from skimage.filters import gabor_kernel\n'), ((4006, 4080), 'numpy.mean', 'np.mean', (['array[row_start:row_start + pixels, col_start:col_start + pixels]'], {}), '(array[row_start:row_start + pixels, col_start:col_start + pixels])\n', (4013, 4080), True, 'import numpy as np\n'), ((5009, 5115), 'logging.error', 'logging.error', (['f"""Caught {exc} SystemExit/NameError from ridge detection.a img_path = {img_path}"""'], {}), "(\n f'Caught {exc} SystemExit/NameError from ridge detection.a img_path = {img_path}'\n )\n", (5022, 5115), False, 'import logging\n'), ((9473, 9504), 'numpy.min', 'np.min', (['line_o.col[x_min:x_max]'], {}), '(line_o.col[x_min:x_max])\n', (9479, 9504), True, 'import numpy as np\n'), ((9529, 9560), 'numpy.max', 'np.max', (['line_o.col[x_min:x_max]'], {}), '(line_o.col[x_min:x_max])\n', (9535, 9560), True, 'import numpy as np\n'), ((9585, 9616), 'numpy.min', 'np.min', (['line_o.row[x_min:x_max]'], {}), '(line_o.row[x_min:x_max])\n', (9591, 9616), True, 'import numpy as np\n'), ((9641, 9672), 'numpy.max', 'np.max', (['line_o.row[x_min:x_max]'], {}), '(line_o.row[x_min:x_max])\n', (9647, 9672), True, 'import numpy as np\n'), ((10369, 10400), 'numpy.min', 'np.min', (['line_o.col[x_min:x_max]'], {}), '(line_o.col[x_min:x_max])\n', (10375, 10400), True, 'import numpy as np\n'), ((10425, 10456), 'numpy.max', 'np.max', (['line_o.col[x_min:x_max]'], {}), '(line_o.col[x_min:x_max])\n', (10431, 10456), True, 'import numpy as np\n'), ((10481, 10512), 'numpy.min', 'np.min', (['line_o.row[x_min:x_max]'], {}), '(line_o.row[x_min:x_max])\n', (10487, 10512), True, 'import numpy as np\n'), ((10537, 10568), 'numpy.max', 'np.max', (['line_o.row[x_min:x_max]'], {}), '(line_o.row[x_min:x_max])\n', (10543, 10568), True, 'import numpy as np\n'), ((11982, 12005), 'numpy.std', 'np.std', (['quartile_angles'], {}), '(quartile_angles)\n', (11988, 12005), True, 'import numpy as np\n'), ((15448, 15465), 'numpy.min', 'np.min', (['distances'], {}), '(distances)\n', (15454, 15465), True, 'import numpy as np\n'), ((15210, 15250), 'numpy.sqrt', 'np.sqrt', (['((x2 - x1) ** 2 + (y2 - y1) ** 2)'], {}), '((x2 - x1) ** 2 + (y2 - y1) ** 2)\n', (15217, 15250), True, 'import numpy as np\n'), ((2346, 2361), 'numpy.real', 'np.real', (['kernel'], {}), '(kernel)\n', (2353, 2361), True, 'import numpy as np\n'), ((2411, 2426), 'numpy.imag', 'np.imag', (['kernel'], {}), '(kernel)\n', (2418, 2426), True, 'import numpy as np\n'), ((7658, 7673), 'numpy.min', 'np.min', (['y_range'], {}), '(y_range)\n', (7664, 7673), True, 'import numpy as np\n'), ((7732, 7747), 'numpy.max', 'np.max', (['y_range'], {}), '(y_range)\n', (7738, 7747), True, 'import numpy as np\n'), ((6352, 6370), 'numpy.min', 'np.min', (['line_o.col'], {}), '(line_o.col)\n', (6358, 6370), True, 'import numpy as np\n'), ((6382, 6400), 'numpy.min', 'np.min', (['line_o.row'], {}), '(line_o.row)\n', (6388, 6400), True, 'import numpy as np\n'), ((6593, 6633), 'numpy.sqrt', 'np.sqrt', (['((x - x_1) ** 2 + (y - y_2) ** 2)'], {}), '((x - x_1) ** 2 + (y - y_2) ** 2)\n', (6600, 6633), True, 'import numpy as np\n'), ((6821, 6839), 'numpy.max', 'np.max', (['line_o.col'], {}), '(line_o.col)\n', (6827, 6839), True, 'import numpy as np\n'), ((6851, 6869), 'numpy.max', 'np.max', (['line_o.row'], {}), '(line_o.row)\n', (6857, 6869), True, 'import numpy as np\n'), ((7062, 7102), 'numpy.sqrt', 'np.sqrt', (['((x - x_2) ** 2 + (y - y_1) ** 2)'], {}), '((x - x_2) ** 2 + (y - y_1) ** 2)\n', (7069, 7102), True, 'import numpy as np\n'), ((11643, 11661), 'numpy.min', 'np.min', (['line_o.col'], {}), '(line_o.col)\n', (11649, 11661), True, 'import numpy as np\n'), ((11663, 11681), 'numpy.max', 'np.max', (['line_o.col'], {}), '(line_o.col)\n', (11669, 11681), True, 'import numpy as np\n'), ((11709, 11727), 'numpy.min', 'np.min', (['line_o.row'], {}), '(line_o.row)\n', (11715, 11727), True, 'import numpy as np\n'), ((11729, 11747), 'numpy.max', 'np.max', (['line_o.row'], {}), '(line_o.row)\n', (11735, 11747), True, 'import numpy as np\n'), ((12325, 12343), 'numpy.min', 'np.min', (['line_o.col'], {}), '(line_o.col)\n', (12331, 12343), True, 'import numpy as np\n'), ((12345, 12363), 'numpy.max', 'np.max', (['line_o.col'], {}), '(line_o.col)\n', (12351, 12363), True, 'import numpy as np\n'), ((12395, 12413), 'numpy.min', 'np.min', (['line_o.row'], {}), '(line_o.row)\n', (12401, 12413), True, 'import numpy as np\n'), ((12415, 12433), 'numpy.max', 'np.max', (['line_o.row'], {}), '(line_o.row)\n', (12421, 12433), True, 'import numpy as np\n'), ((15913, 15926), 'numpy.max', 'np.max', (['shape'], {}), '(shape)\n', (15919, 15926), True, 'import numpy as np\n')] |
from numpy import genfromtxt
from matplotlib.pyplot import figure, legend, loglog, savefig, xlabel, ylabel,show
from sys import argv
input_file = str(argv[1])
output_file = str(argv[2])
data = genfromtxt(input_file, skip_header=1, delimiter=',')
N = data[:,0]
err = data[:,1]
fig=figure()
loglog(N,1/N, '--', label='slope -1')
loglog(N,err, 'o-', label='L2 error')
xlabel('degrees of freedom')
ylabel('error')
legend()
savefig(output_file)
print('Generated ' + output_file)
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure",
"numpy.genfromtxt",
"matplotlib.pyplot.legend"
] | [((195, 247), 'numpy.genfromtxt', 'genfromtxt', (['input_file'], {'skip_header': '(1)', 'delimiter': '""","""'}), "(input_file, skip_header=1, delimiter=',')\n", (205, 247), False, 'from numpy import genfromtxt\n'), ((283, 291), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (289, 291), False, 'from matplotlib.pyplot import figure, legend, loglog, savefig, xlabel, ylabel, show\n'), ((292, 332), 'matplotlib.pyplot.loglog', 'loglog', (['N', '(1 / N)', '"""--"""'], {'label': '"""slope -1"""'}), "(N, 1 / N, '--', label='slope -1')\n", (298, 332), False, 'from matplotlib.pyplot import figure, legend, loglog, savefig, xlabel, ylabel, show\n'), ((330, 368), 'matplotlib.pyplot.loglog', 'loglog', (['N', 'err', '"""o-"""'], {'label': '"""L2 error"""'}), "(N, err, 'o-', label='L2 error')\n", (336, 368), False, 'from matplotlib.pyplot import figure, legend, loglog, savefig, xlabel, ylabel, show\n'), ((368, 396), 'matplotlib.pyplot.xlabel', 'xlabel', (['"""degrees of freedom"""'], {}), "('degrees of freedom')\n", (374, 396), False, 'from matplotlib.pyplot import figure, legend, loglog, savefig, xlabel, ylabel, show\n'), ((397, 412), 'matplotlib.pyplot.ylabel', 'ylabel', (['"""error"""'], {}), "('error')\n", (403, 412), False, 'from matplotlib.pyplot import figure, legend, loglog, savefig, xlabel, ylabel, show\n'), ((413, 421), 'matplotlib.pyplot.legend', 'legend', ([], {}), '()\n', (419, 421), False, 'from matplotlib.pyplot import figure, legend, loglog, savefig, xlabel, ylabel, show\n'), ((422, 442), 'matplotlib.pyplot.savefig', 'savefig', (['output_file'], {}), '(output_file)\n', (429, 442), False, 'from matplotlib.pyplot import figure, legend, loglog, savefig, xlabel, ylabel, show\n')] |
import numpy as np
from scipy.interpolate import interp2d,interp1d
from .params import default_params
import camb
from camb import model
import scipy.interpolate as si
import scipy.constants as constants
"""
This module will (eventually) abstract away the choice of boltzmann codes.
However, it does it stupidly by simply providing a common
stunted interface. It makes no guarantee that the same set
of parameters passed to the two engines will produce the same
results. It could be a test-bed for converging towards that.
"""
class Cosmology(object):
def __init__(self,params=None,halofit=None,engine='camb'):
assert engine in ['camb','class']
if engine=='class': raise NotImplementedError
self.p = dict(params) if params is not None else {}
for param in default_params.keys():
if param not in self.p.keys(): self.p[param] = default_params[param]
# Cosmology
self._init_cosmology(self.p,halofit)
def sigma_crit(self,zlens,zsource):
Gval = 4.517e-48 # Newton G in Mpc,seconds,Msun units
cval = 9.716e-15 # speed of light in Mpc,second units
Dd = self.angular_diameter_distance(zlens)
Ds = self.angular_diameter_distance(zsource)
Dds = np.asarray([self.results.angular_diameter_distance2(zl,zsource) for zl in zlens])
return cval**2 * Ds / 4 / np.pi / Gval / Dd / Dds
def P_mm_linear(self,zs,ks):
pass
def P_mm_nonlinear(self,ks,zs,halofit_version='mead'):
pass
def comoving_radial_distance(self,z):
return self.results.comoving_radial_distance(z)
def angular_diameter_distance(self,z):
return self.results.angular_diameter_distance(z)
def hubble_parameter(self,z):
# H(z) in km/s/Mpc
return self.results.hubble_parameter(z)
def h_of_z(self,z):
# H(z) in 1/Mpc
return self.results.h_of_z(z)
def _init_cosmology(self,params,halofit):
try:
theta = params['theta100']/100.
H0 = None
print("WARNING: Using theta100 parameterization. H0 ignored.")
except:
H0 = params['H0']
theta = None
try:
omm = params['omm']
h = params['H0']/100.
params['omch2'] = omm*h**2-params['ombh2']
print("WARNING: omm specified. Ignoring omch2.")
except:
pass
self.pars = camb.set_params(ns=params['ns'],As=params['As'],H0=H0,
cosmomc_theta=theta,ombh2=params['ombh2'],
omch2=params['omch2'], mnu=params['mnu'],
tau=params['tau'],nnu=params['nnu'],
num_massive_neutrinos=
params['num_massive_neutrinos'],
w=params['w0'],wa=params['wa'],
dark_energy_model='ppf',
halofit_version=self.p['default_halofit'] if halofit is None else halofit,
AccuracyBoost=2)
self.results = camb.get_background(self.pars)
self.params = params
self.h = self.params['H0']/100.
omh2 = self.params['omch2']+self.params['ombh2'] # FIXME: neutrinos
self.om0 = omh2 / (self.params['H0']/100.)**2.
try: self.as8 = self.params['as8']
except: self.as8 = 1
def _get_matter_power(self,zs,ks,nonlinear=False):
PK = camb.get_matter_power_interpolator(self.pars, nonlinear=nonlinear,
hubble_units=False,
k_hunit=False, kmax=ks.max(),
zmax=zs.max()+1.)
return (self.as8**2.) * PK.P(zs, ks, grid=True)
def rho_matter_z(self,z):
return self.rho_critical_z(0.) * self.om0 \
* (1+np.atleast_1d(z))**3. # in msolar / megaparsec3
def omz(self,z):
return self.rho_matter_z(z)/self.rho_critical_z(z)
def rho_critical_z(self,z):
Hz = self.hubble_parameter(z) * 3.241e-20 # SI # FIXME: constants need checking
G = 6.67259e-11 # SI
rho = 3.*(Hz**2.)/8./np.pi/G # SI
return rho * 1.477543e37 # in msolar / megaparsec3
def D_growth(self, a):
# From <NAME>?
_amin = 0.001 # minimum scale factor
_amax = 1.0 # maximum scale factor
_na = 512 # number of points in interpolation arrays
atab = np.linspace(_amin,
_amax,
_na)
ks = np.logspace(np.log10(1e-5),np.log10(1.),num=100)
zs = a2z(atab)
deltakz = self.results.get_redshift_evolution(ks, zs, ['delta_cdm']) #index: k,z,0
D_camb = deltakz[0,:,0]/deltakz[0,0,0]
_da_interp = interp1d(atab, D_camb, kind='linear')
_da_interp_type = "camb"
return _da_interp(a)/_da_interp(1.0)
def P_lin(self,ks,zs,knorm = 1e-4,kmax = 0.1):
"""
This function will provide the linear matter power spectrum used in calculation
of sigma2. It is written as
P_lin(k,z) = norm(z) * T(k)**2
where T(k) is the <NAME>, 1998 transfer function.
Care has to be taken about interpreting this beyond LCDM.
For example, the transfer function can be inaccurate for nuCDM and wCDM cosmologies.
If this function is only used to model sigma2 -> N(M,z) -> halo model power spectra at small
scales, and cosmological dependence is obtained through an accurate CAMB based P(k),
one should be fine.
"""
tk = self.Tk(ks,'eisenhu_osc')
assert knorm<kmax
PK = camb.get_matter_power_interpolator(self.pars, nonlinear=False,
hubble_units=False, k_hunit=False, kmax=kmax,
zmax=zs.max()+1.)
pnorm = PK.P(zs, knorm,grid=True)
tnorm = self.Tk(knorm,'eisenhu_osc') * knorm**(self.params['ns'])
plin = (pnorm/tnorm) * tk**2. * ks**(self.params['ns'])
return (self.as8**2.) *plin
def P_lin_slow(self,ks,zs,kmax = 0.1):
PK = camb.get_matter_power_interpolator(self.pars, nonlinear=False,
hubble_units=False, k_hunit=False, kmax=kmax,
zmax=zs.max()+1.)
plin = PK.P(zs, ks,grid=True)
return (self.as8**2.) * plin
def Tk(self,ks,type ='eisenhu_osc'):
"""
Pulled from cosmicpy https://github.com/cosmicpy/cosmicpy/blob/master/LICENSE.rst
"""
k = ks/self.h
self.tcmb = 2.726
T_2_7_sqr = (self.tcmb/2.7)**2
h2 = self.h**2
w_m = self.params['omch2'] + self.params['ombh2']
w_b = self.params['ombh2']
self._k_eq = 7.46e-2*w_m/T_2_7_sqr / self.h # Eq. (3) [h/Mpc]
self._z_eq = 2.50e4*w_m/(T_2_7_sqr)**2 # Eq. (2)
# z drag from Eq. (4)
b1 = 0.313*pow(w_m, -0.419)*(1.0+0.607*pow(w_m, 0.674))
b2 = 0.238*pow(w_m, 0.223)
self._z_d = 1291.0*pow(w_m, 0.251)/(1.0+0.659*pow(w_m, 0.828)) * \
(1.0 + b1*pow(w_b, b2))
# Ratio of the baryon to photon momentum density at z_d Eq. (5)
self._R_d = 31.5 * w_b / (T_2_7_sqr)**2 * (1.e3/self._z_d)
# Ratio of the baryon to photon momentum density at z_eq Eq. (5)
self._R_eq = 31.5 * w_b / (T_2_7_sqr)**2 * (1.e3/self._z_eq)
# Sound horizon at drag epoch in h^-1 Mpc Eq. (6)
self.sh_d = 2.0/(3.0*self._k_eq) * np.sqrt(6.0/self._R_eq) * \
np.log((np.sqrt(1.0 + self._R_d) + np.sqrt(self._R_eq + self._R_d)) /
(1.0 + np.sqrt(self._R_eq)))
# Eq. (7) but in [hMpc^{-1}]
self._k_silk = 1.6 * pow(w_b, 0.52) * pow(w_m, 0.73) * \
(1.0 + pow(10.4*w_m, -0.95)) / self.h
Omega_m = self.om0
fb = self.params['ombh2'] / (self.params['omch2']+self.params['ombh2']) # self.Omega_b / self.Omega_m
fc = self.params['omch2'] / (self.params['omch2']+self.params['ombh2']) # self.params['ombh2'] #(self.Omega_m - self.Omega_b) / self.Omega_m
alpha_gamma = 1.-0.328*np.log(431.*w_m)*w_b/w_m + \
0.38*np.log(22.3*w_m)*(fb)**2
gamma_eff = Omega_m*self.h * \
(alpha_gamma + (1.-alpha_gamma)/(1.+(0.43*k*self.sh_d)**4))
res = np.zeros_like(k)
if(type == 'eisenhu'):
q = k * pow(self.tcmb/2.7, 2)/gamma_eff
# EH98 (29) #
L = np.log(2.*np.exp(1.0) + 1.8*q)
C = 14.2 + 731.0/(1.0 + 62.5*q)
res = L/(L + C*q*q)
elif(type == 'eisenhu_osc'):
# Cold dark matter transfer function
# EH98 (11, 12)
a1 = pow(46.9*w_m, 0.670) * (1.0 + pow(32.1*w_m, -0.532))
a2 = pow(12.0*w_m, 0.424) * (1.0 + pow(45.0*w_m, -0.582))
alpha_c = pow(a1, -fb) * pow(a2, -fb**3)
b1 = 0.944 / (1.0 + pow(458.0*w_m, -0.708))
b2 = pow(0.395*w_m, -0.0266)
beta_c = 1.0 + b1*(pow(fc, b2) - 1.0)
beta_c = 1.0 / beta_c
# EH98 (19). [k] = h/Mpc
def T_tilde(k1, alpha, beta):
# EH98 (10); [q] = 1 BUT [k] = h/Mpc
q = k1 / (13.41 * self._k_eq)
L = np.log(np.exp(1.0) + 1.8 * beta * q)
C = 14.2 / alpha + 386.0 / (1.0 + 69.9 * pow(q, 1.08))
T0 = L/(L + C*q*q)
return T0
# EH98 (17, 18)
f = 1.0 / (1.0 + (k * self.sh_d / 5.4)**4)
Tc = f * T_tilde(k, 1.0, beta_c) + \
(1.0 - f) * T_tilde(k, alpha_c, beta_c)
# Baryon transfer function
# EH98 (19, 14, 21)
y = (1.0 + self._z_eq) / (1.0 + self._z_d)
x = np.sqrt(1.0 + y)
G_EH98 = y * (-6.0 * x +
(2.0 + 3.0*y) * np.log((x + 1.0) / (x - 1.0)))
alpha_b = 2.07 * self._k_eq * self.sh_d * \
pow(1.0 + self._R_d, -0.75) * G_EH98
beta_node = 8.41 * pow(w_m, 0.435)
tilde_s = self.sh_d / pow(1.0 + (beta_node /
(k * self.sh_d))**3, 1.0/3.0)
beta_b = 0.5 + fb + (3.0 - 2.0 * fb) * np.sqrt((17.2 * w_m)**2 + 1.0)
# [tilde_s] = Mpc/h
Tb = (T_tilde(k, 1.0, 1.0) / (1.0 + (k * self.sh_d / 5.2)**2) +
alpha_b / (1.0 + (beta_b/(k * self.sh_d))**3) *
np.exp(-pow(k / self._k_silk, 1.4))) * np.sinc(k*tilde_s/np.pi)
# Total transfer function
res = fb * Tb + fc * Tc
return res
def lensing_window(self,ezs,zs,dndz=None):
"""
Generates a lensing convergence window
W(z).
zs: If (nz,) with nz>2 and dndz is not None, then these are the points
at which dndz is defined. If nz=2 and no dndz is provided, it is (zmin,zmax)
for a top-hat window. If a single number, and no dndz is provided,
it is a delta function source at zs.
"""
zs = np.array(zs).reshape(-1)
H0 = self.h_of_z(0.)
H = self.h_of_z(ezs)
chis = self.comoving_radial_distance(ezs)
chistar = self.comoving_radial_distance(zs)
if zs.size==1:
assert dndz is None
integrand = (chistar - chis)/chistar
integral = integrand
integral[ezs>zs] = 0
else:
nznorm = np.trapz(dndz,zs)
dndz = dndz/nznorm
# integrand has shape (num_z,num_zs) to be integrated over zs
integrand = (chistar[None,:] - chis[:,None])/chistar[None,:] * dndz[None,:]
for i in range(integrand.shape[0]): integrand[i][zs<ezs[i]] = 0 # FIXME: vectorize this
integral = np.trapz(integrand,zs,axis=-1)
return 1.5*self.om0*H0**2.*(1.+ezs)*chis/H * integral
def C_kg(self,ells,zs,ks,Pgm,gzs,gdndz=None,lzs=None,ldndz=None,lwindow=None):
gzs = np.array(gzs).reshape(-1)
if lwindow is None: Wz1s = self.lensing_window(gzs,lzs,ldndz)
else: Wz1s = lwindow
chis = self.comoving_radial_distance(gzs)
hzs = self.h_of_z(gzs) # 1/Mpc
if gzs.size>1:
nznorm = np.trapz(gdndz,gzs)
Wz2s = gdndz/nznorm
else:
Wz2s = 1.
return limber_integral(ells,zs,ks,Pgm,gzs,Wz1s,Wz2s,hzs,chis)
def C_gg(self,ells,zs,ks,Pgg,gzs,gdndz=None,zmin=None,zmax=None):
gzs = np.asarray(gzs)
chis = self.comoving_radial_distance(gzs)
hzs = self.h_of_z(gzs) # 1/Mpc
if gzs.size>1:
nznorm = np.trapz(gdndz,gzs)
Wz1s = gdndz/nznorm
Wz2s = gdndz/nznorm
else:
dchi = self.comoving_radial_distance(zmax) - self.comoving_radial_distance(zmin)
Wz1s = 1.
Wz2s = 1./dchi/hzs
return limber_integral(ells,zs,ks,Pgg,gzs,Wz1s,Wz2s,hzs,chis)
def C_kk(self,ells,zs,ks,Pmm,lzs1=None,ldndz1=None,lzs2=None,ldndz2=None,lwindow1=None,lwindow2=None):
if lwindow1 is None: lwindow1 = self.lensing_window(zs,lzs1,ldndz1)
if lwindow2 is None: lwindow2 = self.lensing_window(zs,lzs2,ldndz2)
chis = self.comoving_radial_distance(zs)
hzs = self.h_of_z(zs) # 1/Mpc
return limber_integral(ells,zs,ks,Pmm,zs,lwindow1,lwindow2,hzs,chis)
def C_gy(self,ells,zs,ks,Pgp,gzs,gdndz=None,zmin=None,zmax=None):
gzs = np.asarray(gzs)
chis = self.comoving_radial_distance(gzs)
hzs = self.h_of_z(gzs) # 1/Mpc
if gzs.size>1:
nznorm = np.trapz(gdndz,gzs)
Wz1s = dndz/nznorm
Wz2s = gdndz/nznorm
else:
dchi = self.comoving_radial_distance(zmax) - self.comoving_radial_distance(zmin)
Wz1s = 1.
Wz2s = 1./dchi/hzs
return limber_integral(ells,zs,ks,Ppy,gzs,1,Wz2s,hzs,chis)
def C_ky(self,ells,zs,ks,Pym,lzs1=None,ldndz1=None,lzs2=None,ldndz2=None,lwindow1=None):
if lwindow1 is None: lwindow1 = self.lensing_window(zs,lzs1,ldndz1)
chis = self.comoving_radial_distance(zs)
hzs = self.h_of_z(zs) # 1/Mpc
return limber_integral(ells,zs,ks,Pym,zs,lwindow1,1,hzs,chis)
def C_yy(self,ells,zs,ks,Ppp,dndz=None,zmin=None,zmax=None):
chis = self.comoving_radial_distance(zs)
hzs = self.h_of_z(zs) # 1/Mpc
# Convert to y units
#
return limber_integral(ells,zs,ks,Ppp,zs,1,1,hzs,chis)
def total_matter_power_spectrum(self,Pnn,Pne,Pee):
omtoth2 = self.p['omch2'] + self.p['ombh2']
fc = self.p['omch2']/omtoth2
fb = self.p['ombh2']/omtoth2
return fc**2.*Pnn + 2.*fc*fb*Pne + fb*fb*Pee
def total_matter_galaxy_power_spectrum(self,Pgn,Pge):
omtoth2 = self.p['omch2'] + self.p['ombh2']
fc = self.p['omch2']/omtoth2
fb = self.p['ombh2']/omtoth2
return fc*Pgn + fb*Pge
def a2z(a): return (1.0/a)-1.0
def limber_integral(ells,zs,ks,Pzks,gzs,Wz1s,Wz2s,hzs,chis):
"""
Get C(ell) = \int dz (H(z)/c) W1(z) W2(z) Pzks(z,k=ell/chi) / chis**2.
ells: (nells,) multipoles looped over
zs: redshifts (npzs,) corresponding to Pzks
ks: comoving wavenumbers (nks,) corresponding to Pzks
Pzks: (npzs,nks) power specrum
gzs: (nzs,) corersponding to Wz1s, W2zs, Hzs and chis
Wz1s: weight function (nzs,)
Wz2s: weight function (nzs,)
hzs: Hubble parameter (nzs,) in *1/Mpc* (e.g. camb.results.h_of_z(z))
chis: comoving distances (nzs,)
We interpolate P(z,k)
"""
hzs = np.array(hzs).reshape(-1)
Wz1s = np.array(Wz1s).reshape(-1)
Wz2s = np.array(Wz2s).reshape(-1)
chis = np.array(chis).reshape(-1)
prefactor = hzs * Wz1s * Wz2s / chis**2.
zevals = gzs
if zs.size>1:
f = interp2d(ks,zs,Pzks,bounds_error=True)
else:
f = interp1d(ks,Pzks[0],bounds_error=True)
Cells = np.zeros(ells.shape)
for i,ell in enumerate(ells):
kevals = (ell+0.5)/chis
if zs.size>1:
# hack suggested in https://stackoverflow.com/questions/47087109/evaluate-the-output-from-scipy-2d-interpolation-along-a-curve
# to get around scipy.interpolate limitations
interpolated = si.dfitpack.bispeu(f.tck[0], f.tck[1], f.tck[2], f.tck[3], f.tck[4], kevals, zevals)[0]
else:
interpolated = f(kevals)
if zevals.size==1: Cells[i] = interpolated * prefactor
else: Cells[i] = np.trapz(interpolated*prefactor,zevals)
return Cells
| [
"numpy.log10",
"camb.set_params",
"numpy.trapz",
"numpy.sqrt",
"scipy.interpolate.dfitpack.bispeu",
"numpy.log",
"numpy.asarray",
"numpy.sinc",
"scipy.interpolate.interp1d",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"camb.get_background",
"numpy.zeros_like",
"scipy.i... | [((16320, 16340), 'numpy.zeros', 'np.zeros', (['ells.shape'], {}), '(ells.shape)\n', (16328, 16340), True, 'import numpy as np\n'), ((2508, 2916), 'camb.set_params', 'camb.set_params', ([], {'ns': "params['ns']", 'As': "params['As']", 'H0': 'H0', 'cosmomc_theta': 'theta', 'ombh2': "params['ombh2']", 'omch2': "params['omch2']", 'mnu': "params['mnu']", 'tau': "params['tau']", 'nnu': "params['nnu']", 'num_massive_neutrinos': "params['num_massive_neutrinos']", 'w': "params['w0']", 'wa': "params['wa']", 'dark_energy_model': '"""ppf"""', 'halofit_version': "(self.p['default_halofit'] if halofit is None else halofit)", 'AccuracyBoost': '(2)'}), "(ns=params['ns'], As=params['As'], H0=H0, cosmomc_theta=\n theta, ombh2=params['ombh2'], omch2=params['omch2'], mnu=params['mnu'],\n tau=params['tau'], nnu=params['nnu'], num_massive_neutrinos=params[\n 'num_massive_neutrinos'], w=params['w0'], wa=params['wa'],\n dark_energy_model='ppf', halofit_version=self.p['default_halofit'] if \n halofit is None else halofit, AccuracyBoost=2)\n", (2523, 2916), False, 'import camb\n'), ((3237, 3267), 'camb.get_background', 'camb.get_background', (['self.pars'], {}), '(self.pars)\n', (3256, 3267), False, 'import camb\n'), ((4706, 4736), 'numpy.linspace', 'np.linspace', (['_amin', '_amax', '_na'], {}), '(_amin, _amax, _na)\n', (4717, 4736), True, 'import numpy as np\n'), ((5036, 5073), 'scipy.interpolate.interp1d', 'interp1d', (['atab', 'D_camb'], {'kind': '"""linear"""'}), "(atab, D_camb, kind='linear')\n", (5044, 5073), False, 'from scipy.interpolate import interp2d, interp1d\n'), ((8682, 8698), 'numpy.zeros_like', 'np.zeros_like', (['k'], {}), '(k)\n', (8695, 8698), True, 'import numpy as np\n'), ((12835, 12850), 'numpy.asarray', 'np.asarray', (['gzs'], {}), '(gzs)\n', (12845, 12850), True, 'import numpy as np\n'), ((13807, 13822), 'numpy.asarray', 'np.asarray', (['gzs'], {}), '(gzs)\n', (13817, 13822), True, 'import numpy as np\n'), ((16196, 16237), 'scipy.interpolate.interp2d', 'interp2d', (['ks', 'zs', 'Pzks'], {'bounds_error': '(True)'}), '(ks, zs, Pzks, bounds_error=True)\n', (16204, 16237), False, 'from scipy.interpolate import interp2d, interp1d\n'), ((16269, 16309), 'scipy.interpolate.interp1d', 'interp1d', (['ks', 'Pzks[0]'], {'bounds_error': '(True)'}), '(ks, Pzks[0], bounds_error=True)\n', (16277, 16309), False, 'from scipy.interpolate import interp2d, interp1d\n'), ((4816, 4831), 'numpy.log10', 'np.log10', (['(1e-05)'], {}), '(1e-05)\n', (4824, 4831), True, 'import numpy as np\n'), ((4831, 4844), 'numpy.log10', 'np.log10', (['(1.0)'], {}), '(1.0)\n', (4839, 4844), True, 'import numpy as np\n'), ((11796, 11814), 'numpy.trapz', 'np.trapz', (['dndz', 'zs'], {}), '(dndz, zs)\n', (11804, 11814), True, 'import numpy as np\n'), ((12130, 12162), 'numpy.trapz', 'np.trapz', (['integrand', 'zs'], {'axis': '(-1)'}), '(integrand, zs, axis=-1)\n', (12138, 12162), True, 'import numpy as np\n'), ((12592, 12612), 'numpy.trapz', 'np.trapz', (['gdndz', 'gzs'], {}), '(gdndz, gzs)\n', (12600, 12612), True, 'import numpy as np\n'), ((12984, 13004), 'numpy.trapz', 'np.trapz', (['gdndz', 'gzs'], {}), '(gdndz, gzs)\n', (12992, 13004), True, 'import numpy as np\n'), ((13956, 13976), 'numpy.trapz', 'np.trapz', (['gdndz', 'gzs'], {}), '(gdndz, gzs)\n', (13964, 13976), True, 'import numpy as np\n'), ((15944, 15957), 'numpy.array', 'np.array', (['hzs'], {}), '(hzs)\n', (15952, 15957), True, 'import numpy as np\n'), ((15981, 15995), 'numpy.array', 'np.array', (['Wz1s'], {}), '(Wz1s)\n', (15989, 15995), True, 'import numpy as np\n'), ((16019, 16033), 'numpy.array', 'np.array', (['Wz2s'], {}), '(Wz2s)\n', (16027, 16033), True, 'import numpy as np\n'), ((16057, 16071), 'numpy.array', 'np.array', (['chis'], {}), '(chis)\n', (16065, 16071), True, 'import numpy as np\n'), ((16880, 16922), 'numpy.trapz', 'np.trapz', (['(interpolated * prefactor)', 'zevals'], {}), '(interpolated * prefactor, zevals)\n', (16888, 16922), True, 'import numpy as np\n'), ((7860, 7885), 'numpy.sqrt', 'np.sqrt', (['(6.0 / self._R_eq)'], {}), '(6.0 / self._R_eq)\n', (7867, 7885), True, 'import numpy as np\n'), ((10124, 10140), 'numpy.sqrt', 'np.sqrt', (['(1.0 + y)'], {}), '(1.0 + y)\n', (10131, 10140), True, 'import numpy as np\n'), ((11406, 11418), 'numpy.array', 'np.array', (['zs'], {}), '(zs)\n', (11414, 11418), True, 'import numpy as np\n'), ((12334, 12347), 'numpy.array', 'np.array', (['gzs'], {}), '(gzs)\n', (12342, 12347), True, 'import numpy as np\n'), ((16653, 16741), 'scipy.interpolate.dfitpack.bispeu', 'si.dfitpack.bispeu', (['f.tck[0]', 'f.tck[1]', 'f.tck[2]', 'f.tck[3]', 'f.tck[4]', 'kevals', 'zevals'], {}), '(f.tck[0], f.tck[1], f.tck[2], f.tck[3], f.tck[4], kevals,\n zevals)\n', (16671, 16741), True, 'import scipy.interpolate as si\n'), ((4078, 4094), 'numpy.atleast_1d', 'np.atleast_1d', (['z'], {}), '(z)\n', (4091, 4094), True, 'import numpy as np\n'), ((8531, 8549), 'numpy.log', 'np.log', (['(22.3 * w_m)'], {}), '(22.3 * w_m)\n', (8537, 8549), True, 'import numpy as np\n'), ((10855, 10883), 'numpy.sinc', 'np.sinc', (['(k * tilde_s / np.pi)'], {}), '(k * tilde_s / np.pi)\n', (10862, 10883), True, 'import numpy as np\n'), ((7908, 7932), 'numpy.sqrt', 'np.sqrt', (['(1.0 + self._R_d)'], {}), '(1.0 + self._R_d)\n', (7915, 7932), True, 'import numpy as np\n'), ((7935, 7966), 'numpy.sqrt', 'np.sqrt', (['(self._R_eq + self._R_d)'], {}), '(self._R_eq + self._R_d)\n', (7942, 7966), True, 'import numpy as np\n'), ((7993, 8012), 'numpy.sqrt', 'np.sqrt', (['self._R_eq'], {}), '(self._R_eq)\n', (8000, 8012), True, 'import numpy as np\n'), ((8837, 8848), 'numpy.exp', 'np.exp', (['(1.0)'], {}), '(1.0)\n', (8843, 8848), True, 'import numpy as np\n'), ((10592, 10624), 'numpy.sqrt', 'np.sqrt', (['((17.2 * w_m) ** 2 + 1.0)'], {}), '((17.2 * w_m) ** 2 + 1.0)\n', (10599, 10624), True, 'import numpy as np\n'), ((8485, 8504), 'numpy.log', 'np.log', (['(431.0 * w_m)'], {}), '(431.0 * w_m)\n', (8491, 8504), True, 'import numpy as np\n'), ((9630, 9641), 'numpy.exp', 'np.exp', (['(1.0)'], {}), '(1.0)\n', (9636, 9641), True, 'import numpy as np\n'), ((10220, 10249), 'numpy.log', 'np.log', (['((x + 1.0) / (x - 1.0))'], {}), '((x + 1.0) / (x - 1.0))\n', (10226, 10249), True, 'import numpy as np\n')] |
# test annotation bboxes with extracted midframes and clips
import cv2
import os
import json
import numpy as np
AVA_FOLDER = os.environ['AVA_DIR'] + '/AVA'
segments_folder = AVA_FOLDER + '/segments/segments/'
annotations_folder = AVA_FOLDER + '/annotations/'
data_folder = AVA_FOLDER + '/data/'
objects_folder = AVA_FOLDER + '/objects/'
# objects_folder = AVA_FOLDER + '/ava_trained_persons/'
split = 'train'
seg_anno_file = data_folder + 'segment_annotations_%s.json' %split
def main():
with open(seg_anno_file) as fp:
annotations = json.load(fp)
seg_keys = annotations.keys()
seg_keys.sort()
last_mov = ''
for seg_key in seg_keys:
# for seg_key in ['-<KEY>']:
print('Working on %s' %seg_key)
cur_annos = annotations[seg_key]
movie_key, timestamp = seg_key.split('.')
if movie_key == last_mov:
continue
last_mov = movie_key
midframe_file = os.path.join(segments_folder, split, 'midframes', movie_key, timestamp+'.jpg')
object_detection_file = os.path.join(objects_folder, split, movie_key, '%s.json' %timestamp)
with open(object_detection_file) as fp:
object_results = json.load(fp)
midframe = cv2.imread(midframe_file)
anno_frame = np.copy(midframe)
draw_anno(anno_frame, cur_annos)
obj_frame = np.copy(midframe)
draw_objects(obj_frame, object_results['detections'])
img_to_show = np.concatenate([obj_frame, anno_frame], axis=1)
cv2.imshow('result', img_to_show)
k = cv2.waitKey(0)
if k == ord('q'):
os._exit(0)
def draw_objects(frame, detections):
H,W,C = frame.shape
for bbid, bbox in enumerate(detections):
color = colors[bbid,:]
current_bbox = bbox['box']
top, left, bottom, right = current_bbox
left = int(W * left)
right = int(W * right)
top = int(H * top)
bottom = int(H * bottom)
conf = bbox['score']
label = bbox['class_str']
message = label + ' %.2f' % conf
cv2.rectangle(frame, (left,top), (right,bottom), color, 2)
font_size = max(0.5,(right - left)/50.0/float(len(message)))
cv2.rectangle(frame, (left, top-int(font_size*40)), (right,top), color, -1)
cv2.putText(frame, message, (left, top-12), 0, font_size, (255,255,255)-color, 1)
colors = np.random.randint(0, 255, [100, 3])
def draw_anno(frame, bboxes):
H,W,C = frame.shape
for bbid, bbox in enumerate(bboxes):
color = colors[bbid,:]
current_bbox = bbox['bbox']
left, top, right, bottom = current_bbox
left = int(W * left)
right = int(W * right)
top = int(H * top)
bottom = int(H * bottom)
cv2.rectangle(frame, (left,top), (right,bottom), color, 2)
if __name__ == '__main__':
main()
| [
"cv2.rectangle",
"numpy.copy",
"os.path.join",
"cv2.imshow",
"cv2.putText",
"numpy.random.randint",
"cv2.waitKey",
"os._exit",
"numpy.concatenate",
"json.load",
"cv2.imread"
] | [((2418, 2453), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '[100, 3]'], {}), '(0, 255, [100, 3])\n', (2435, 2453), True, 'import numpy as np\n'), ((555, 568), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (564, 568), False, 'import json\n'), ((945, 1030), 'os.path.join', 'os.path.join', (['segments_folder', 'split', '"""midframes"""', 'movie_key', "(timestamp + '.jpg')"], {}), "(segments_folder, split, 'midframes', movie_key, timestamp + '.jpg'\n )\n", (957, 1030), False, 'import os\n'), ((1057, 1126), 'os.path.join', 'os.path.join', (['objects_folder', 'split', 'movie_key', "('%s.json' % timestamp)"], {}), "(objects_folder, split, movie_key, '%s.json' % timestamp)\n", (1069, 1126), False, 'import os\n'), ((1237, 1262), 'cv2.imread', 'cv2.imread', (['midframe_file'], {}), '(midframe_file)\n', (1247, 1262), False, 'import cv2\n'), ((1285, 1302), 'numpy.copy', 'np.copy', (['midframe'], {}), '(midframe)\n', (1292, 1302), True, 'import numpy as np\n'), ((1373, 1390), 'numpy.copy', 'np.copy', (['midframe'], {}), '(midframe)\n', (1380, 1390), True, 'import numpy as np\n'), ((1476, 1523), 'numpy.concatenate', 'np.concatenate', (['[obj_frame, anno_frame]'], {'axis': '(1)'}), '([obj_frame, anno_frame], axis=1)\n', (1490, 1523), True, 'import numpy as np\n'), ((1533, 1566), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'img_to_show'], {}), "('result', img_to_show)\n", (1543, 1566), False, 'import cv2\n'), ((1579, 1593), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1590, 1593), False, 'import cv2\n'), ((2103, 2163), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, top)', '(right, bottom)', 'color', '(2)'], {}), '(frame, (left, top), (right, bottom), color, 2)\n', (2116, 2163), False, 'import cv2\n'), ((2325, 2416), 'cv2.putText', 'cv2.putText', (['frame', 'message', '(left, top - 12)', '(0)', 'font_size', '((255, 255, 255) - color)', '(1)'], {}), '(frame, message, (left, top - 12), 0, font_size, (255, 255, 255) -\n color, 1)\n', (2336, 2416), False, 'import cv2\n'), ((2795, 2855), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, top)', '(right, bottom)', 'color', '(2)'], {}), '(frame, (left, top), (right, bottom), color, 2)\n', (2808, 2855), False, 'import cv2\n'), ((1203, 1216), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (1212, 1216), False, 'import json\n'), ((1632, 1643), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (1640, 1643), False, 'import os\n')] |
import numpy as np
from astropy import units as u
from astropy.time import Time
from CelestialMechanics.kepler import constants
from CelestialMechanics.kepler import kepler3
from CelestialMechanics.mu import mu_sun, mu_gm1m2
from CelestialMechanics.orbital_elements.orbital_elements import solve, solve_ellipse, solve_hyperbola, solve_parable
from CelestialMechanics.orbits import ellipse, parable, hyperbola
# 1
print("# 1")
a = 2.6785123 * u.au
e = 0.2543422
i = 0 * u.deg
W = 0 * u.deg
w = 0 * u.deg
M_r = 0 * u.deg
T = kepler3.T_sun(a, 0)
print('T', T)
mu = mu_sun(0)
t_r = Time('2006-06-30T00:00:00Z', format='isot', scale='utc').jd * u.d
t = Time('2006-05-05T00:00:00Z', format='isot', scale='utc').jd * u.d
r, angle, r1, r_angle1 = solve_ellipse(a, e, M_r, mu, t_r, t)
print('theta', angle.to(u.deg))
print('theta', angle.to(u.deg) % (360 * u.deg))
print('r', r)
(x, y, z), (x1, y1, z1) = solve(a, e, W, w, i, M_r, mu, t_r, t)
print('r', np.sqrt(x * x + y * y + z * z))
# 2
print("# 2")
R_earth = 6378.14 * u.km
m1 = 5.97E24 * u.kg
m2 = 0 * u.kg
q = R_earth + 300 * u.km
Q = R_earth + 500 * u.km
print('q', q)
print('Q', Q)
a, e = ellipse.ae(q, Q)
print('a', a)
print('e', e)
T = kepler3.T(a, m1, m2)
print('T', T.to(u.h))
n = ellipse.n(a, mu_gm1m2(m1, m2))
print('n', n.to(u.deg / u.d))
vq = ellipse.v(q, a, m1, m2)
print('vq', vq.to(u.km / u.s))
vq, vQ = ellipse.vqQ(a, e, m1, m2)
print('vq', vq.to(u.km / u.s))
# 4
print("# 4")
t0 = Time('2014-05-28T00:00:00Z', format='isot', scale='utc').jd * u.d
print('t0', t0)
t1 = Time('2014-06-21T00:00:00Z', format='isot', scale='utc').jd * u.d
print('t1', t1)
L_r = 245.26107
w = 102.9777
print('M_r', L_r - w)
# 5
print("# 5")
t0 = Time('2014-01-03T00:00:00Z', format='isot', scale='utc').jd * u.d + 0.633 * u.d
a = 1 * u.au
e = 0.01669
r = 1 * u.au
mu = mu_sun(1 / constants.Earth_Moon)
angles = ellipse.angles(a, e, r)
print('theta1', angles[0])
print('theta2', angles[1])
delta_t1_t0 = ellipse.delta_t_t0_aeangle(a, e, angles[0], mu) % (1 * u.yr).to(u.d) # module 1 year
print('t1', Time(t0 + delta_t1_t0, format='jd', scale='utc').isot)
delta_t2_t0 = ellipse.delta_t_t0_aeangle(a, e, angles[1], mu) % (1 * u.yr).to(u.d) # module 1 year
print('t2', Time(t0 + delta_t2_t0, format='jd', scale='utc').isot)
| [
"CelestialMechanics.kepler.kepler3.T_sun",
"numpy.sqrt",
"CelestialMechanics.mu.mu_gm1m2",
"CelestialMechanics.orbital_elements.orbital_elements.solve",
"CelestialMechanics.orbits.ellipse.delta_t_t0_aeangle",
"CelestialMechanics.orbits.ellipse.ae",
"CelestialMechanics.orbital_elements.orbital_elements.s... | [((527, 546), 'CelestialMechanics.kepler.kepler3.T_sun', 'kepler3.T_sun', (['a', '(0)'], {}), '(a, 0)\n', (540, 546), False, 'from CelestialMechanics.kepler import kepler3\n'), ((566, 575), 'CelestialMechanics.mu.mu_sun', 'mu_sun', (['(0)'], {}), '(0)\n', (572, 575), False, 'from CelestialMechanics.mu import mu_sun, mu_gm1m2\n'), ((743, 779), 'CelestialMechanics.orbital_elements.orbital_elements.solve_ellipse', 'solve_ellipse', (['a', 'e', 'M_r', 'mu', 't_r', 't'], {}), '(a, e, M_r, mu, t_r, t)\n', (756, 779), False, 'from CelestialMechanics.orbital_elements.orbital_elements import solve, solve_ellipse, solve_hyperbola, solve_parable\n'), ((900, 937), 'CelestialMechanics.orbital_elements.orbital_elements.solve', 'solve', (['a', 'e', 'W', 'w', 'i', 'M_r', 'mu', 't_r', 't'], {}), '(a, e, W, w, i, M_r, mu, t_r, t)\n', (905, 937), False, 'from CelestialMechanics.orbital_elements.orbital_elements import solve, solve_ellipse, solve_hyperbola, solve_parable\n'), ((1146, 1162), 'CelestialMechanics.orbits.ellipse.ae', 'ellipse.ae', (['q', 'Q'], {}), '(q, Q)\n', (1156, 1162), False, 'from CelestialMechanics.orbits import ellipse, parable, hyperbola\n'), ((1195, 1215), 'CelestialMechanics.kepler.kepler3.T', 'kepler3.T', (['a', 'm1', 'm2'], {}), '(a, m1, m2)\n', (1204, 1215), False, 'from CelestialMechanics.kepler import kepler3\n'), ((1308, 1331), 'CelestialMechanics.orbits.ellipse.v', 'ellipse.v', (['q', 'a', 'm1', 'm2'], {}), '(q, a, m1, m2)\n', (1317, 1331), False, 'from CelestialMechanics.orbits import ellipse, parable, hyperbola\n'), ((1372, 1397), 'CelestialMechanics.orbits.ellipse.vqQ', 'ellipse.vqQ', (['a', 'e', 'm1', 'm2'], {}), '(a, e, m1, m2)\n', (1383, 1397), False, 'from CelestialMechanics.orbits import ellipse, parable, hyperbola\n'), ((1820, 1852), 'CelestialMechanics.mu.mu_sun', 'mu_sun', (['(1 / constants.Earth_Moon)'], {}), '(1 / constants.Earth_Moon)\n', (1826, 1852), False, 'from CelestialMechanics.mu import mu_sun, mu_gm1m2\n'), ((1863, 1886), 'CelestialMechanics.orbits.ellipse.angles', 'ellipse.angles', (['a', 'e', 'r'], {}), '(a, e, r)\n', (1877, 1886), False, 'from CelestialMechanics.orbits import ellipse, parable, hyperbola\n'), ((949, 979), 'numpy.sqrt', 'np.sqrt', (['(x * x + y * y + z * z)'], {}), '(x * x + y * y + z * z)\n', (956, 979), True, 'import numpy as np\n'), ((1255, 1271), 'CelestialMechanics.mu.mu_gm1m2', 'mu_gm1m2', (['m1', 'm2'], {}), '(m1, m2)\n', (1263, 1271), False, 'from CelestialMechanics.mu import mu_sun, mu_gm1m2\n'), ((1956, 2003), 'CelestialMechanics.orbits.ellipse.delta_t_t0_aeangle', 'ellipse.delta_t_t0_aeangle', (['a', 'e', 'angles[0]', 'mu'], {}), '(a, e, angles[0], mu)\n', (1982, 2003), False, 'from CelestialMechanics.orbits import ellipse, parable, hyperbola\n'), ((2123, 2170), 'CelestialMechanics.orbits.ellipse.delta_t_t0_aeangle', 'ellipse.delta_t_t0_aeangle', (['a', 'e', 'angles[1]', 'mu'], {}), '(a, e, angles[1], mu)\n', (2149, 2170), False, 'from CelestialMechanics.orbits import ellipse, parable, hyperbola\n'), ((582, 638), 'astropy.time.Time', 'Time', (['"""2006-06-30T00:00:00Z"""'], {'format': '"""isot"""', 'scale': '"""utc"""'}), "('2006-06-30T00:00:00Z', format='isot', scale='utc')\n", (586, 638), False, 'from astropy.time import Time\n'), ((652, 708), 'astropy.time.Time', 'Time', (['"""2006-05-05T00:00:00Z"""'], {'format': '"""isot"""', 'scale': '"""utc"""'}), "('2006-05-05T00:00:00Z', format='isot', scale='utc')\n", (656, 708), False, 'from astropy.time import Time\n'), ((1453, 1509), 'astropy.time.Time', 'Time', (['"""2014-05-28T00:00:00Z"""'], {'format': '"""isot"""', 'scale': '"""utc"""'}), "('2014-05-28T00:00:00Z', format='isot', scale='utc')\n", (1457, 1509), False, 'from astropy.time import Time\n'), ((1540, 1596), 'astropy.time.Time', 'Time', (['"""2014-06-21T00:00:00Z"""'], {'format': '"""isot"""', 'scale': '"""utc"""'}), "('2014-06-21T00:00:00Z', format='isot', scale='utc')\n", (1544, 1596), False, 'from astropy.time import Time\n'), ((2054, 2102), 'astropy.time.Time', 'Time', (['(t0 + delta_t1_t0)'], {'format': '"""jd"""', 'scale': '"""utc"""'}), "(t0 + delta_t1_t0, format='jd', scale='utc')\n", (2058, 2102), False, 'from astropy.time import Time\n'), ((2221, 2269), 'astropy.time.Time', 'Time', (['(t0 + delta_t2_t0)'], {'format': '"""jd"""', 'scale': '"""utc"""'}), "(t0 + delta_t2_t0, format='jd', scale='utc')\n", (2225, 2269), False, 'from astropy.time import Time\n'), ((1696, 1752), 'astropy.time.Time', 'Time', (['"""2014-01-03T00:00:00Z"""'], {'format': '"""isot"""', 'scale': '"""utc"""'}), "('2014-01-03T00:00:00Z', format='isot', scale='utc')\n", (1700, 1752), False, 'from astropy.time import Time\n')] |
from contextlib import ExitStack
import fnmatch
import io
import os
import shutil
import urllib3
import zipfile
import click
import numpy
import rasterio
from rasterio.enums import Interleaving, Compression
# bio is 12 variables in one, the rest are monthly
VARS = {
'tmin': {
'nodata': -32768,
'dtype': rasterio.int16,
'scale': 10
},
'tmax': {
'nodata': -32768,
'dtype': rasterio.int16,
'scale': 10
},
'tavg': {
'nodata': -32768,
'dtype': rasterio.int16,
'scale': 10
},
'prec': {
'nodata': -32768,
'dtype': rasterio.int16,
'scale': 1
},
'srad': {
'nodata': 65535,
'dtype': rasterio.uint16,
'scale': 1
},
'wind': {
'nodata': 65535,
'dtype': rasterio.uint16,
'scale': 10
},
'vapr': {
'nodata': 65535,
'dtype': rasterio.uint16,
'scale': 100
}
}
BASE_URI = 'https://biogeo.ucdavis.edu/data/worldclim/v2.1/base/'
DATA_DIR = './data/'
def local_file(res: str, var: str, ext: str='zip'):
return f'{DATA_DIR}{res}_{var}.{ext}'
def remote_file(res: str, var: str):
return f'{BASE_URI}wc2.1_{res}_{var}.zip'
def download_archive(res: str, var: str):
lfile = local_file(res, var)
if os.path.exists(lfile):
return
http = urllib3.PoolManager()
with http.request('GET', remote_file(res, var), preload_content=False) as resp, \
open(lfile, 'wb') as archive:
shutil.copyfileobj(resp, archive)
resp.release_conn()
http.clear()
## replace fill value and change datatype + scaling with broadcast
def process_band(
band: numpy.ma.MaskedArray,
nodata: int,
dtype: str,
scale: int
):
return numpy.ma.filled(band * scale, fill_value=nodata).astype(dtype)
def extract_normal(res: str, var: str):
with zipfile.ZipFile(local_file(res, var), 'r') as zip:
for bname in zip.namelist():
if fnmatch.fnmatch(bname, '*.tif') and not os.path.exists(f'{DATA_DIR}{bname}'):
zip.extract(bname, DATA_DIR)
def extract_and_combine(res: str, var: str, vconfig):
with zipfile.ZipFile(local_file(res, var), 'r') as zip, \
ExitStack() as stack:
bandfiles = [
stack.enter_context(rasterio.open(io.BytesIO(zip.read(bandname)))) for bandname in
sorted(bname for bname in zip.namelist() if fnmatch.fnmatch(bname, '*.tif'))
]
# grab the profile from the first file and override for the outfile
profile = bandfiles[0].profile
profile.update({
'driver': 'GTIFF',
'dtype': vconfig['dtype'],
'nodata': vconfig['nodata'],
'count': len(bandfiles),
'interleave': Interleaving.pixel.value,
'compress': Compression.none.value,
'tiled': True,
'blocksize': 128,
'overviews': None
})
with rasterio.open(local_file(res, var, 'tif'), mode='w', **profile) as outfile:
# assume all the file have the same blocks
for ij, window in bandfiles[0].block_windows(1):
bands = [process_band(f.read(1, masked=True, window=window), **vconfig) for f in bandfiles]
outfile.write(
numpy.asarray(bands),
window=window,
)
@click.command()
@click.argument('res', type=click.Choice(choices=['10m', '5m', '2.5m', '10s']))
@click.option('--extract', default=False)
def download(res, extract):
# blocks need to be in order, streamable forces this (doesn't work)
with rasterio.Env(STREAMABLE_OUTPUT=True):
for v, c in VARS.items():
print(v)
download_archive(res, v)
if (extract):
extract_normal(res, v)
extract_and_combine(res, v, c)
if __name__ == '__main__':
download() | [
"os.path.exists",
"click.Choice",
"shutil.copyfileobj",
"click.option",
"rasterio.Env",
"numpy.asarray",
"numpy.ma.filled",
"urllib3.PoolManager",
"fnmatch.fnmatch",
"contextlib.ExitStack",
"click.command"
] | [((3097, 3112), 'click.command', 'click.command', ([], {}), '()\n', (3110, 3112), False, 'import click\n'), ((3194, 3234), 'click.option', 'click.option', (['"""--extract"""'], {'default': '(False)'}), "('--extract', default=False)\n", (3206, 3234), False, 'import click\n'), ((1201, 1222), 'os.path.exists', 'os.path.exists', (['lfile'], {}), '(lfile)\n', (1215, 1222), False, 'import os\n'), ((1245, 1266), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (1264, 1266), False, 'import urllib3\n'), ((1391, 1424), 'shutil.copyfileobj', 'shutil.copyfileobj', (['resp', 'archive'], {}), '(resp, archive)\n', (1409, 1424), False, 'import shutil\n'), ((2079, 2090), 'contextlib.ExitStack', 'ExitStack', ([], {}), '()\n', (2088, 2090), False, 'from contextlib import ExitStack\n'), ((3340, 3376), 'rasterio.Env', 'rasterio.Env', ([], {'STREAMABLE_OUTPUT': '(True)'}), '(STREAMABLE_OUTPUT=True)\n', (3352, 3376), False, 'import rasterio\n'), ((3141, 3191), 'click.Choice', 'click.Choice', ([], {'choices': "['10m', '5m', '2.5m', '10s']"}), "(choices=['10m', '5m', '2.5m', '10s'])\n", (3153, 3191), False, 'import click\n'), ((1637, 1685), 'numpy.ma.filled', 'numpy.ma.filled', (['(band * scale)'], {'fill_value': 'nodata'}), '(band * scale, fill_value=nodata)\n', (1652, 1685), False, 'import numpy\n'), ((1842, 1873), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['bname', '"""*.tif"""'], {}), "(bname, '*.tif')\n", (1857, 1873), False, 'import fnmatch\n'), ((1882, 1918), 'os.path.exists', 'os.path.exists', (['f"""{DATA_DIR}{bname}"""'], {}), "(f'{DATA_DIR}{bname}')\n", (1896, 1918), False, 'import os\n'), ((3038, 3058), 'numpy.asarray', 'numpy.asarray', (['bands'], {}), '(bands)\n', (3051, 3058), False, 'import numpy\n'), ((2258, 2289), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['bname', '"""*.tif"""'], {}), "(bname, '*.tif')\n", (2273, 2289), False, 'import fnmatch\n')] |
""" Utility functions for EO charge transfer inefficiency tests
"""
import numpy as np
import lsst.afw.math as afwMath
import lsst.geom as lsstGeom
__all__ = ["Estimator", "SubImage", "estimateCti"]
class Estimator:
"Abstraction for a point estimator of pixel data and its errors"
def __init__(self, *args, **kwds):
self.image = None
self.statCtrl = None
self.statistic = None
self.value = None
self.error = None
self._format_str = None
if args:
self.set_properties(*args, **kwds)
def set_properties(self, image, statCtrl, statistic=afwMath.MEAN, varWt=1):
# Make a deep copy of the input image so that we can convert to
# e- and have Poisson statistics apply.
self.image = image.clone()
self.statCtrl = statCtrl
self.statistic = statistic
self.varWt = varWt
self._compute_stats()
def _compute_stats(self):
if self.statCtrl is None:
makeStatistics = lambda *args: afwMath.makeStatistics(*args[:2])
else:
makeStatistics = lambda *args: afwMath.makeStatistics(*args[:3])
if self.statistic not in (afwMath.MEAN, afwMath.MEDIAN):
# In case other statistics are given, set error to zero for now.
self.value = makeStatistics(self.image.image, self.statistic,
self.statCtrl).getValue()
self.error = 0
return
# Compute the error assuming the statistic is afw.MEAN. For
# Gaussian stats, the error on the median is sqrt(pi/2.)
# times the error on the mean, but for Poisson stats, it is
# actually zero when the number of pixels is much larger than
# the expected count per pixel, but within factors of order
# unity to the error on the mean for numpix \la O(100)*count/pixel.
flags = self.statistic | afwMath.SUM | afwMath.MEAN # pylint: disable=no-member
stats = makeStatistics(self.image.image, flags, self.statCtrl)
pixel_sum = stats.getValue(afwMath.SUM) # pylint: disable=no-member
# Infer the number of pixels taking into account masking.
if pixel_sum == 0:
# Handle case where pixel_sum is zero (and hence the
# mean is zero).
self.value = 0
self.error = 0
return
npix = pixel_sum/stats.getValue(afwMath.MEAN) # pylint: disable=no-member
self.value = stats.getValue(self.statistic)
self.error = np.sqrt(pixel_sum/self.varWt)/npix
def __add__(self, other):
result = Estimator()
if isinstance(other, Estimator):
result.value = self.value + other.value
result.error = np.sqrt(self.error**2 + other.error**2)
else:
# Assume other is an int or float.
result.value = self.value + other
result.error = self.error
return result
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
result = Estimator()
if isinstance(other, Estimator):
result.value = self.value - other.value
result.error = np.sqrt(self.error**2 + other.error**2)
else:
# Assume other is an int or float.
result.value = self.value - other
result.error = self.error
return result
def __rsub__(self, other):
result = self.__sub__(other)
if isinstance(other, Estimator):
result.value *= -1
return result
def __mul__(self, other):
result = Estimator()
if isinstance(other, Estimator):
result.value = self.value*other.value
result.error = (np.abs(result.value)
* np.sqrt((self.error/self.value)**2
+ (other.error/other.value)**2))
else:
result.value = self.value*other
result.error = self.error*other
return result
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
return self.__truediv__(other)
def __truediv__(self, other):
result = Estimator()
if isinstance(other, Estimator):
result.value = self.value/other.value
result.error = (np.abs(result.value)
* np.sqrt((self.error/self.value)**2
+ (other.error/other.value)**2))
else:
result.value = self.value/other
result.error = self.error/other
return result
def set_format_str(self, format_str):
self._format_str = format_str
def __repr__(self):
return self.__str__()
def __str__(self):
if self._format_str is None:
return "%s +/- %s" % (self.value, self.error)
return ' +/- '.join((self._format_str.format(self.value),
self._format_str.format(self.error)))
class SubImage:
"""Functor to produce sub-images depending on scan direction."""
def __init__(self, calibExp, amp, overscans, direction):
self.imaging = amp.getBBox()
self.image = calibExp
self.amp = amp
if direction == 'p':
self._bbox = self._parallelBox
llc = lsstGeom.Point2I(amp.getRawParallelOverscanBBox().getMinX(),
amp.getRawParallelOverscanBBox().getMinY() + overscans)
urc = amp.getRawParallelOverscanBBox().getCorners()[2]
self._biasReg = lsstGeom.Box2I(llc, urc)
self.lastpix = amp.getRawDataBBox().getMaxY()
return
if direction == 's':
self._bbox = self._serialBox
llc = lsstGeom.Point2I(amp.getRawSerialOverscanBBox().getMinX() + overscans,
amp.getRawSerialOverscanBBox().getMinY())
urc = amp.getRawSerialOverscanBBox().getCorners()[2]
#
# Omit the last 4 columns to avoid the bright column in the
# last overscan column in the e2v vendor data.
#
urc[0] -= 4
self._biasReg = lsstGeom.Box2I(llc, urc)
self.lastpix = amp.getRawDataBBox().getMaxX()
return
raise ValueError("Unknown scan direction: " + str(direction))
def biasEst(self, statistic=afwMath.MEAN):
subim = self.image.Factory(self.image, self._biasReg)
biasEstimate = Estimator()
biasEstimate.value = afwMath.makeStatistics(subim.image, statistic).getValue()
num_pix = len(subim.getImage().getArray().flatten())
biasEstimate.error = afwMath.makeStatistics(subim.image, afwMath.STDEV).getValue()\
/ np.sqrt(float(num_pix)) # pylint: disable=no-member
return biasEstimate
def __call__(self, start, end=None):
if end is None:
end = start
my_exp = self.image.Factory(self.image, self._bbox(start, end))
return my_exp
def _parallelBox(self, start, end):
llc = lsstGeom.PointI(self.amp.getRawDataBBox().getMinX(), start)
urc = lsstGeom.PointI(self.amp.getRawDataBBox().getMaxX(), end)
return lsstGeom.BoxI(llc, urc)
def _serialBox(self, start, end):
llc = lsstGeom.PointI(start, self.amp.getRawDataBBox().getMinY())
urc = lsstGeom.PointI(end, self.amp.getRawDataBBox().getMaxY())
return lsstGeom.BoxI(llc, urc)
def estimateCti(calibExp, amp, direction, overscans, statCtrl):
nFrames = 10 # alibExp.meta['nFrames']
subimage = SubImage(calibExp, amp, overscans, direction)
lastpix = subimage.lastpix
# find signal in last image vector (i.e., row or column)
lastIm = Estimator(subimage(lastpix), statCtrl, varWt=nFrames)
# find signal in each overscan vector
overscan_ests = []
for i in range(1, overscans+1):
overscan_ests.append(Estimator(subimage(lastpix+i), statCtrl, varWt=nFrames))
# sum medians of first n overscan rows
summed = sum(overscan_ests)
# Find bias level.
biasEst = subimage.biasEst(statistic=afwMath.MEAN)
# signal = last - bias
sig = lastIm - biasEst
# trailed = sum(last2) - bias
trailed = summed - overscans*biasEst
# charge loss per transfer = (trailed/signal)/N
chargelosspt = (trailed/sig)/(lastpix + 1.)
return chargelosspt
| [
"numpy.abs",
"numpy.sqrt",
"lsst.geom.Box2I",
"lsst.afw.math.makeStatistics",
"lsst.geom.BoxI"
] | [((7271, 7294), 'lsst.geom.BoxI', 'lsstGeom.BoxI', (['llc', 'urc'], {}), '(llc, urc)\n', (7284, 7294), True, 'import lsst.geom as lsstGeom\n'), ((7495, 7518), 'lsst.geom.BoxI', 'lsstGeom.BoxI', (['llc', 'urc'], {}), '(llc, urc)\n', (7508, 7518), True, 'import lsst.geom as lsstGeom\n'), ((2554, 2585), 'numpy.sqrt', 'np.sqrt', (['(pixel_sum / self.varWt)'], {}), '(pixel_sum / self.varWt)\n', (2561, 2585), True, 'import numpy as np\n'), ((2769, 2812), 'numpy.sqrt', 'np.sqrt', (['(self.error ** 2 + other.error ** 2)'], {}), '(self.error ** 2 + other.error ** 2)\n', (2776, 2812), True, 'import numpy as np\n'), ((3223, 3266), 'numpy.sqrt', 'np.sqrt', (['(self.error ** 2 + other.error ** 2)'], {}), '(self.error ** 2 + other.error ** 2)\n', (3230, 3266), True, 'import numpy as np\n'), ((5618, 5642), 'lsst.geom.Box2I', 'lsstGeom.Box2I', (['llc', 'urc'], {}), '(llc, urc)\n', (5632, 5642), True, 'import lsst.geom as lsstGeom\n'), ((6232, 6256), 'lsst.geom.Box2I', 'lsstGeom.Box2I', (['llc', 'urc'], {}), '(llc, urc)\n', (6246, 6256), True, 'import lsst.geom as lsstGeom\n'), ((1032, 1065), 'lsst.afw.math.makeStatistics', 'afwMath.makeStatistics', (['*args[:2]'], {}), '(*args[:2])\n', (1054, 1065), True, 'import lsst.afw.math as afwMath\n'), ((1123, 1156), 'lsst.afw.math.makeStatistics', 'afwMath.makeStatistics', (['*args[:3]'], {}), '(*args[:3])\n', (1145, 1156), True, 'import lsst.afw.math as afwMath\n'), ((3772, 3792), 'numpy.abs', 'np.abs', (['result.value'], {}), '(result.value)\n', (3778, 3792), True, 'import numpy as np\n'), ((3823, 3897), 'numpy.sqrt', 'np.sqrt', (['((self.error / self.value) ** 2 + (other.error / other.value) ** 2)'], {}), '((self.error / self.value) ** 2 + (other.error / other.value) ** 2)\n', (3830, 3897), True, 'import numpy as np\n'), ((4373, 4393), 'numpy.abs', 'np.abs', (['result.value'], {}), '(result.value)\n', (4379, 4393), True, 'import numpy as np\n'), ((4424, 4498), 'numpy.sqrt', 'np.sqrt', (['((self.error / self.value) ** 2 + (other.error / other.value) ** 2)'], {}), '((self.error / self.value) ** 2 + (other.error / other.value) ** 2)\n', (4431, 4498), True, 'import numpy as np\n'), ((6578, 6624), 'lsst.afw.math.makeStatistics', 'afwMath.makeStatistics', (['subim.image', 'statistic'], {}), '(subim.image, statistic)\n', (6600, 6624), True, 'import lsst.afw.math as afwMath\n'), ((6726, 6776), 'lsst.afw.math.makeStatistics', 'afwMath.makeStatistics', (['subim.image', 'afwMath.STDEV'], {}), '(subim.image, afwMath.STDEV)\n', (6748, 6776), True, 'import lsst.afw.math as afwMath\n')] |
# Authors : <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License : BSD 3-clause
import os.path as op
import warnings
from nose.tools import assert_true, assert_equal
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
from scipy import fftpack
from mne import read_events, Epochs
from mne.io import read_raw_fif
from mne.time_frequency._stockwell import (tfr_stockwell, _st,
_precompute_st_windows,
_check_input_st,
_st_power_itc)
from mne.time_frequency.tfr import AverageTFR
from mne.utils import run_tests_if_main
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
def test_stockwell_check_input():
"""Test input checker for stockwell"""
# check for data size equal and unequal to a power of 2
for last_dim in (127, 128):
data = np.zeros((2, 10, last_dim))
with warnings.catch_warnings(record=True): # 127 < n_fft
x_in, n_fft, zero_pad = _check_input_st(data, None)
assert_equal(x_in.shape, (2, 10, 128))
assert_equal(n_fft, 128)
assert_equal(zero_pad, 128 - last_dim)
def test_stockwell_st_no_zero_pad():
"""Test stockwell power itc"""
data = np.zeros((20, 128))
start_f = 1
stop_f = 10
sfreq = 30
width = 2
W = _precompute_st_windows(data.shape[-1], start_f, stop_f, sfreq, width)
_st_power_itc(data, 10, True, 0, 1, W)
def test_stockwell_core():
"""Test stockwell transform."""
# adapted from
# http://vcs.ynic.york.ac.uk/docs/naf/intro/concepts/timefreq.html
sfreq = 1000.0 # make things easy to understand
dur = 0.5
onset, offset = 0.175, 0.275
n_samp = int(sfreq * dur)
t = np.arange(n_samp) / sfreq # make an array for time
pulse_freq = 15.
pulse = np.cos(2. * np.pi * pulse_freq * t)
pulse[0:int(onset * sfreq)] = 0. # Zero before our desired pulse
pulse[int(offset * sfreq):] = 0. # and zero after our desired pulse
width = 0.5
freqs = fftpack.fftfreq(len(pulse), 1. / sfreq)
fmin, fmax = 1.0, 100.0
start_f, stop_f = [np.abs(freqs - f).argmin() for f in (fmin, fmax)]
W = _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width)
st_pulse = _st(pulse, start_f, W)
st_pulse = np.abs(st_pulse) ** 2
assert_equal(st_pulse.shape[-1], len(pulse))
st_max_freq = freqs[st_pulse.max(axis=1).argmax(axis=0)] # max freq
assert_allclose(st_max_freq, pulse_freq, atol=1.0)
assert_true(onset < t[st_pulse.max(axis=0).argmax(axis=0)] < offset)
# test inversion to FFT, by averaging local spectra, see eq. 5 in
# Moukadem, A., <NAME>., <NAME>. and <NAME>.
# "Stockwell transform optimization applied on the detection of split in
# heart sounds."
width = 1.0
start_f, stop_f = 0, len(pulse)
W = _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width)
y = _st(pulse, start_f, W)
# invert stockwell
y_inv = fftpack.ifft(np.sum(y, axis=1)).real
assert_array_almost_equal(pulse, y_inv)
def test_stockwell_api():
"""Test stockwell functions."""
raw = read_raw_fif(raw_fname)
event_id, tmin, tmax = 1, -0.2, 0.5
event_name = op.join(base_dir, 'test-eve.fif')
events = read_events(event_name)
epochs = Epochs(raw, events, # XXX pick 2 has epochs of zeros.
event_id, tmin, tmax, picks=[0, 1, 3])
for fmin, fmax in [(None, 50), (5, 50), (5, None)]:
with warnings.catch_warnings(record=True): # zero papdding
power, itc = tfr_stockwell(epochs, fmin=fmin, fmax=fmax,
return_itc=True)
if fmax is not None:
assert_true(power.freqs.max() <= fmax)
with warnings.catch_warnings(record=True): # padding
power_evoked = tfr_stockwell(epochs.average(), fmin=fmin,
fmax=fmax, return_itc=False)
# for multitaper these don't necessarily match, but they seem to
# for stockwell... if this fails, this maybe could be changed
# just to check the shape
assert_array_almost_equal(power_evoked.data, power.data)
assert_true(isinstance(power, AverageTFR))
assert_true(isinstance(itc, AverageTFR))
assert_equal(power.data.shape, itc.data.shape)
assert_true(itc.data.min() >= 0.0)
assert_true(itc.data.max() <= 1.0)
assert_true(np.log(power.data.max()) * 20 <= 0.0)
assert_true(np.log(power.data.max()) * 20 <= 0.0)
run_tests_if_main()
| [
"mne.time_frequency._stockwell._precompute_st_windows",
"mne.utils.run_tests_if_main",
"nose.tools.assert_equal",
"numpy.arange",
"numpy.testing.assert_array_almost_equal",
"mne.io.read_raw_fif",
"numpy.testing.assert_allclose",
"mne.time_frequency._stockwell._st_power_itc",
"numpy.abs",
"mne.time... | [((781, 814), 'os.path.join', 'op.join', (['base_dir', '"""test_raw.fif"""'], {}), "(base_dir, 'test_raw.fif')\n", (788, 814), True, 'import os.path as op\n'), ((4656, 4675), 'mne.utils.run_tests_if_main', 'run_tests_if_main', ([], {}), '()\n', (4673, 4675), False, 'from mne.utils import run_tests_if_main\n'), ((712, 732), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (722, 732), True, 'import os.path as op\n'), ((1373, 1392), 'numpy.zeros', 'np.zeros', (['(20, 128)'], {}), '((20, 128))\n', (1381, 1392), True, 'import numpy as np\n'), ((1462, 1531), 'mne.time_frequency._stockwell._precompute_st_windows', '_precompute_st_windows', (['data.shape[-1]', 'start_f', 'stop_f', 'sfreq', 'width'], {}), '(data.shape[-1], start_f, stop_f, sfreq, width)\n', (1484, 1531), False, 'from mne.time_frequency._stockwell import tfr_stockwell, _st, _precompute_st_windows, _check_input_st, _st_power_itc\n'), ((1536, 1574), 'mne.time_frequency._stockwell._st_power_itc', '_st_power_itc', (['data', '(10)', '(True)', '(0)', '(1)', 'W'], {}), '(data, 10, True, 0, 1, W)\n', (1549, 1574), False, 'from mne.time_frequency._stockwell import tfr_stockwell, _st, _precompute_st_windows, _check_input_st, _st_power_itc\n'), ((1954, 1990), 'numpy.cos', 'np.cos', (['(2.0 * np.pi * pulse_freq * t)'], {}), '(2.0 * np.pi * pulse_freq * t)\n', (1960, 1990), True, 'import numpy as np\n'), ((2324, 2385), 'mne.time_frequency._stockwell._precompute_st_windows', '_precompute_st_windows', (['n_samp', 'start_f', 'stop_f', 'sfreq', 'width'], {}), '(n_samp, start_f, stop_f, sfreq, width)\n', (2346, 2385), False, 'from mne.time_frequency._stockwell import tfr_stockwell, _st, _precompute_st_windows, _check_input_st, _st_power_itc\n'), ((2402, 2424), 'mne.time_frequency._stockwell._st', '_st', (['pulse', 'start_f', 'W'], {}), '(pulse, start_f, W)\n', (2405, 2424), False, 'from mne.time_frequency._stockwell import tfr_stockwell, _st, _precompute_st_windows, _check_input_st, _st_power_itc\n'), ((2588, 2638), 'numpy.testing.assert_allclose', 'assert_allclose', (['st_max_freq', 'pulse_freq'], {'atol': '(1.0)'}), '(st_max_freq, pulse_freq, atol=1.0)\n', (2603, 2638), False, 'from numpy.testing import assert_array_almost_equal, assert_allclose\n'), ((2991, 3052), 'mne.time_frequency._stockwell._precompute_st_windows', '_precompute_st_windows', (['n_samp', 'start_f', 'stop_f', 'sfreq', 'width'], {}), '(n_samp, start_f, stop_f, sfreq, width)\n', (3013, 3052), False, 'from mne.time_frequency._stockwell import tfr_stockwell, _st, _precompute_st_windows, _check_input_st, _st_power_itc\n'), ((3061, 3083), 'mne.time_frequency._stockwell._st', '_st', (['pulse', 'start_f', 'W'], {}), '(pulse, start_f, W)\n', (3064, 3083), False, 'from mne.time_frequency._stockwell import tfr_stockwell, _st, _precompute_st_windows, _check_input_st, _st_power_itc\n'), ((3160, 3199), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['pulse', 'y_inv'], {}), '(pulse, y_inv)\n', (3185, 3199), False, 'from numpy.testing import assert_array_almost_equal, assert_allclose\n'), ((3274, 3297), 'mne.io.read_raw_fif', 'read_raw_fif', (['raw_fname'], {}), '(raw_fname)\n', (3286, 3297), False, 'from mne.io import read_raw_fif\n'), ((3355, 3388), 'os.path.join', 'op.join', (['base_dir', '"""test-eve.fif"""'], {}), "(base_dir, 'test-eve.fif')\n", (3362, 3388), True, 'import os.path as op\n'), ((3402, 3425), 'mne.read_events', 'read_events', (['event_name'], {}), '(event_name)\n', (3413, 3425), False, 'from mne import read_events, Epochs\n'), ((3439, 3497), 'mne.Epochs', 'Epochs', (['raw', 'events', 'event_id', 'tmin', 'tmax'], {'picks': '[0, 1, 3]'}), '(raw, events, event_id, tmin, tmax, picks=[0, 1, 3])\n', (3445, 3497), False, 'from mne import read_events, Epochs\n'), ((4422, 4468), 'nose.tools.assert_equal', 'assert_equal', (['power.data.shape', 'itc.data.shape'], {}), '(power.data.shape, itc.data.shape)\n', (4434, 4468), False, 'from nose.tools import assert_true, assert_equal\n'), ((1002, 1029), 'numpy.zeros', 'np.zeros', (['(2, 10, last_dim)'], {}), '((2, 10, last_dim))\n', (1010, 1029), True, 'import numpy as np\n'), ((1169, 1207), 'nose.tools.assert_equal', 'assert_equal', (['x_in.shape', '(2, 10, 128)'], {}), '(x_in.shape, (2, 10, 128))\n', (1181, 1207), False, 'from nose.tools import assert_true, assert_equal\n'), ((1216, 1240), 'nose.tools.assert_equal', 'assert_equal', (['n_fft', '(128)'], {}), '(n_fft, 128)\n', (1228, 1240), False, 'from nose.tools import assert_true, assert_equal\n'), ((1249, 1287), 'nose.tools.assert_equal', 'assert_equal', (['zero_pad', '(128 - last_dim)'], {}), '(zero_pad, 128 - last_dim)\n', (1261, 1287), False, 'from nose.tools import assert_true, assert_equal\n'), ((1868, 1885), 'numpy.arange', 'np.arange', (['n_samp'], {}), '(n_samp)\n', (1877, 1885), True, 'import numpy as np\n'), ((2440, 2456), 'numpy.abs', 'np.abs', (['st_pulse'], {}), '(st_pulse)\n', (2446, 2456), True, 'import numpy as np\n'), ((4269, 4325), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['power_evoked.data', 'power.data'], {}), '(power_evoked.data, power.data)\n', (4294, 4325), False, 'from numpy.testing import assert_array_almost_equal, assert_allclose\n'), ((1043, 1079), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (1066, 1079), False, 'import warnings\n'), ((1132, 1159), 'mne.time_frequency._stockwell._check_input_st', '_check_input_st', (['data', 'None'], {}), '(data, None)\n', (1147, 1159), False, 'from mne.time_frequency._stockwell import tfr_stockwell, _st, _precompute_st_windows, _check_input_st, _st_power_itc\n'), ((3132, 3149), 'numpy.sum', 'np.sum', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (3138, 3149), True, 'import numpy as np\n'), ((3622, 3658), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (3645, 3658), False, 'import warnings\n'), ((3702, 3762), 'mne.time_frequency._stockwell.tfr_stockwell', 'tfr_stockwell', (['epochs'], {'fmin': 'fmin', 'fmax': 'fmax', 'return_itc': '(True)'}), '(epochs, fmin=fmin, fmax=fmax, return_itc=True)\n', (3715, 3762), False, 'from mne.time_frequency._stockwell import tfr_stockwell, _st, _precompute_st_windows, _check_input_st, _st_power_itc\n'), ((3895, 3931), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (3918, 3931), False, 'import warnings\n'), ((2266, 2283), 'numpy.abs', 'np.abs', (['(freqs - f)'], {}), '(freqs - f)\n', (2272, 2283), True, 'import numpy as np\n')] |
from pathlib import Path
import numpy as np
import pytest
from helpers import get_expected_if_it_exists
from nanomesh.image2mesh import plane2mesh
from nanomesh.image2mesh._mesher2d import Polygon
def block_image(shape=(10, 10)):
"""Generate test array with 4 block quadrants filled with 1 or 0."""
i, j = (np.array(shape) / 2).astype(int)
image = np.zeros(shape)
image[:i, :j] = 1
image[-i:, -j:] = 1
return image
@pytest.mark.xfail(pytest.OS_DOES_NOT_MATCH_DATA_GEN,
raises=AssertionError,
reason=('https://github.com/hpgem/nanomesh/issues/144'))
def test_plane2mesh(segmented_image):
"""Test 2D mesh generation and plot."""
np.random.seed(1234) # set seed for reproducible clustering
mesh = plane2mesh(segmented_image,
max_edge_dist=4,
plot=True,
opts='q30a100')
fn = Path('segmented_mesh_2d.msh')
expected_mesh = get_expected_if_it_exists(fn, result=mesh)
assert mesh.points.shape[1] == 2
assert mesh.points.shape == expected_mesh.points.shape
np.testing.assert_allclose(mesh.points, expected_mesh.points)
cell_types = mesh.cells_dict.keys()
assert cell_types == expected_mesh.cells_dict.keys()
for cell_type in cell_types:
cells = mesh.cells_dict[cell_type]
expected_cells = expected_mesh.cells_dict[cell_type]
assert cells.shape == expected_cells.shape
np.testing.assert_allclose(cells, expected_cells)
data_keys = mesh.cell_data_dict.keys()
for data_key in data_keys:
for cell_type in cell_types:
data = mesh.cell_data_dict[data_key][cell_type]
expected_data = expected_mesh.cell_data_dict[data_key][cell_type]
np.testing.assert_allclose(data, expected_data)
def test_subdivide_polygon():
"""Test polygon subdivision."""
polygon = Polygon(np.array([[0, 0], [0, 6], [2, 6], [2, 0], [0, 0]]))
ret = polygon.subdivide(max_dist=2)
expected_points = np.array([[0., 0.], [0., 2.], [0., 4.], [0.,
6.], [2., 6.],
[2., 4.], [2., 2.], [2., 0.], [0., 0.]])
assert np.all(ret.points == expected_points)
@pytest.mark.parametrize(
'coords,expected_corner',
(
([[0, 3], [5, 5], [0, 7]], None),
([[0, 3], [5, 5], [3, 0]], [0, 0]), # bottom, left
([[3, 0], [5, 5], [0, 3]], [0, 0]), # bottom, left
([[9, 17], [5, 15], [17, 19]], None),
([[9, 5], [7, 4], [4, 0]], [9, 0]), # bottom, right
([[0, 17], [5, 15], [3, 19]], [0, 19]), # top, left
([[9, 17], [5, 15], [3, 19]], [9, 19]), # top, right
([[5, 5], [5, 7], [6, 6]], None),
))
def test_close_contour(coords, expected_corner):
image_shape = 10, 20
polygon = Polygon(np.array(coords))
n_rows = polygon.points.shape[1]
ret = polygon.close_corner(image_shape)
is_corner = (expected_corner is not None)
if is_corner:
ret.points.shape[1] == n_rows + 1
corner = ret.points[-1]
np.testing.assert_equal(corner, expected_corner)
else:
ret.points.shape[1] == n_rows
| [
"numpy.testing.assert_equal",
"pathlib.Path",
"pytest.mark.xfail",
"numpy.testing.assert_allclose",
"nanomesh.image2mesh.plane2mesh",
"pytest.mark.parametrize",
"numpy.zeros",
"numpy.array",
"numpy.random.seed",
"helpers.get_expected_if_it_exists",
"numpy.all"
] | [((446, 580), 'pytest.mark.xfail', 'pytest.mark.xfail', (['pytest.OS_DOES_NOT_MATCH_DATA_GEN'], {'raises': 'AssertionError', 'reason': '"""https://github.com/hpgem/nanomesh/issues/144"""'}), "(pytest.OS_DOES_NOT_MATCH_DATA_GEN, raises=AssertionError,\n reason='https://github.com/hpgem/nanomesh/issues/144')\n", (463, 580), False, 'import pytest\n'), ((2291, 2654), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""coords,expected_corner"""', '(([[0, 3], [5, 5], [0, 7]], None), ([[0, 3], [5, 5], [3, 0]], [0, 0]), ([[3,\n 0], [5, 5], [0, 3]], [0, 0]), ([[9, 17], [5, 15], [17, 19]], None), ([[\n 9, 5], [7, 4], [4, 0]], [9, 0]), ([[0, 17], [5, 15], [3, 19]], [0, 19]),\n ([[9, 17], [5, 15], [3, 19]], [9, 19]), ([[5, 5], [5, 7], [6, 6]], None))'], {}), "('coords,expected_corner', (([[0, 3], [5, 5], [0, 7]\n ], None), ([[0, 3], [5, 5], [3, 0]], [0, 0]), ([[3, 0], [5, 5], [0, 3]],\n [0, 0]), ([[9, 17], [5, 15], [17, 19]], None), ([[9, 5], [7, 4], [4, 0]\n ], [9, 0]), ([[0, 17], [5, 15], [3, 19]], [0, 19]), ([[9, 17], [5, 15],\n [3, 19]], [9, 19]), ([[5, 5], [5, 7], [6, 6]], None)))\n", (2314, 2654), False, 'import pytest\n'), ((364, 379), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (372, 379), True, 'import numpy as np\n'), ((703, 723), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (717, 723), True, 'import numpy as np\n'), ((775, 846), 'nanomesh.image2mesh.plane2mesh', 'plane2mesh', (['segmented_image'], {'max_edge_dist': '(4)', 'plot': '(True)', 'opts': '"""q30a100"""'}), "(segmented_image, max_edge_dist=4, plot=True, opts='q30a100')\n", (785, 846), False, 'from nanomesh.image2mesh import plane2mesh\n'), ((923, 952), 'pathlib.Path', 'Path', (['"""segmented_mesh_2d.msh"""'], {}), "('segmented_mesh_2d.msh')\n", (927, 952), False, 'from pathlib import Path\n'), ((973, 1015), 'helpers.get_expected_if_it_exists', 'get_expected_if_it_exists', (['fn'], {'result': 'mesh'}), '(fn, result=mesh)\n', (998, 1015), False, 'from helpers import get_expected_if_it_exists\n'), ((1117, 1178), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mesh.points', 'expected_mesh.points'], {}), '(mesh.points, expected_mesh.points)\n', (1143, 1178), True, 'import numpy as np\n'), ((2042, 2164), 'numpy.array', 'np.array', (['[[0.0, 0.0], [0.0, 2.0], [0.0, 4.0], [0.0, 6.0], [2.0, 6.0], [2.0, 4.0], [\n 2.0, 2.0], [2.0, 0.0], [0.0, 0.0]]'], {}), '([[0.0, 0.0], [0.0, 2.0], [0.0, 4.0], [0.0, 6.0], [2.0, 6.0], [2.0,\n 4.0], [2.0, 2.0], [2.0, 0.0], [0.0, 0.0]])\n', (2050, 2164), True, 'import numpy as np\n'), ((2250, 2287), 'numpy.all', 'np.all', (['(ret.points == expected_points)'], {}), '(ret.points == expected_points)\n', (2256, 2287), True, 'import numpy as np\n'), ((1475, 1524), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cells', 'expected_cells'], {}), '(cells, expected_cells)\n', (1501, 1524), True, 'import numpy as np\n'), ((1926, 1976), 'numpy.array', 'np.array', (['[[0, 0], [0, 6], [2, 6], [2, 0], [0, 0]]'], {}), '([[0, 0], [0, 6], [2, 6], [2, 0], [0, 0]])\n', (1934, 1976), True, 'import numpy as np\n'), ((2889, 2905), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (2897, 2905), True, 'import numpy as np\n'), ((3138, 3186), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['corner', 'expected_corner'], {}), '(corner, expected_corner)\n', (3161, 3186), True, 'import numpy as np\n'), ((1788, 1835), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['data', 'expected_data'], {}), '(data, expected_data)\n', (1814, 1835), True, 'import numpy as np\n'), ((319, 334), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (327, 334), True, 'import numpy as np\n')] |
import pandas as pnd
import numpy as np
import JMPEstadisticas as jmp
import os
current_dir = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(current_dir, 'datos.csv')
raw_data = open(filename)
data = np.loadtxt(raw_data, delimiter=";",skiprows=1)
data=pnd.DataFrame({'Pesos':data})
stats = jmp.JMPEstadisticas(data["Pesos"])
stats.analisisCaracteristica()
| [
"os.path.join",
"JMPEstadisticas.JMPEstadisticas",
"os.path.realpath",
"pandas.DataFrame",
"numpy.loadtxt"
] | [((150, 188), 'os.path.join', 'os.path.join', (['current_dir', '"""datos.csv"""'], {}), "(current_dir, 'datos.csv')\n", (162, 188), False, 'import os\n'), ((222, 269), 'numpy.loadtxt', 'np.loadtxt', (['raw_data'], {'delimiter': '""";"""', 'skiprows': '(1)'}), "(raw_data, delimiter=';', skiprows=1)\n", (232, 269), True, 'import numpy as np\n'), ((274, 304), 'pandas.DataFrame', 'pnd.DataFrame', (["{'Pesos': data}"], {}), "({'Pesos': data})\n", (287, 304), True, 'import pandas as pnd\n'), ((312, 346), 'JMPEstadisticas.JMPEstadisticas', 'jmp.JMPEstadisticas', (["data['Pesos']"], {}), "(data['Pesos'])\n", (331, 346), True, 'import JMPEstadisticas as jmp\n'), ((111, 137), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (127, 137), False, 'import os\n')] |
#!/usr/bin/env python3
from __future__ import division, print_function
import string, time, random, msvcrt
import sounddevice as sd
import numpy as np
from scipy import io
import scipy.io.wavfile
import csv
from collections import Counter
import morse
SPS = 8000
LETTERS = string.ascii_uppercase
FREQ = 750
WPM = 25
FS = 10
AUDIO_PADDING = 0.5 # Seconds
CLICK_SMOOTH = 2 # Tone periods
def main(freq, wpm, fs, prompt, force, limit, length, outFile, inFile):
messages = wordFinder(limit, length)
print('Message =', messages)
if prompt:
# Load spoken letter WAV files
letterNames = loadLetterNames()
sps = letterNames[LETTERS[0]][0]
else:
sps = SPS
print('Audio samples per second =', sps)
print('Tone period =', round(1000/freq, 1), 'ms')
dps = morse.wpmToDps(wpm) # Dots per second
mspd = 1000/dps # Dot duration in milliseconds
farnsworthScale = morse.farnsworthScaleFactor(wpm, fs)
print('Dot width =', round(mspd, 1), 'ms')
print('Dash width =', int(round(mspd * morse.DASH_WIDTH)), 'ms')
print('Character space =', int(round(mspd * morse.CHAR_SPACE * farnsworthScale)), 'ms')
print('Word space =', int(round(mspd * morse.WORD_SPACE * farnsworthScale)), 'ms')
print()
print("Hit <ENTER> to start.")
input()
continue_with_test = True
while continue_with_test:
missed_count = 0
continue_with_test = False
retest_messages = []
for message in messages:
# Compute morse code audio from plain text
playMessage(message, sps, wpm, fs, freq)
print('Enter message:')
start = time.time()
check = input()
if check.upper() == message.upper():
end = time.time()
print('Correct! [', '{:.2f}'.format(end-start), 's]')
else:
print('Wrong. The correct answer is ', message)
retest_messages.append(message)
continue_with_test = True
missed_count = missed_count + 1
print('You missed ', missed_count, '. ')
if force:
print('Retesting missed message...')
message = retest_messages
else:
continue_with_test = False
def wordFinder(limit, length):
messages = []
with open('dictionary.txt') as fin:
lines = (word.strip().upper() for word in fin)
words = [(word, Counter(word)) for word in lines]
rack = Counter(''.join(list(string.ascii_uppercase)))
for training_word, letter_count in words:
# Using length here to limit output for example purposes
if len(training_word) == length and not (letter_count - rack):
messages.append(training_word)
random.shuffle(messages)
messages = random.choices(messages, k=limit)
return messages
def playMessage(message, sps, wpm, fs, freq):
audio = stringToMorseAudio(message, sps, wpm, fs, freq, 0.5, None, promptVolume=0.3)
audio /= 2
playBlock(audio, sps)
time.sleep(0.1)
def addAudio(base, new, offset):
if base is None:
base = np.array([], dtype=np.float32)
assert offset >= 0
lenBase, lenNew = len(base), len(new)
if offset+lenNew > lenBase:
# Make base longer by padding with zeros
base = np.append(base, np.zeros(offset+lenNew-lenBase))
base[offset:offset+lenNew] += new
return base
def boolArrToSwitchedTone(boolArr, freq, sps, volume=1.0):
''' Create the tone audio from a bool array representation of morse code. '''
weightLen = int(CLICK_SMOOTH*sps/freq)
if weightLen % 2 == 0:
weightLen += 1 # Make sure the weight array is odd length
smoothingWeights = np.concatenate((np.arange(1, weightLen//2+1), np.arange(weightLen//2+1, 0, -1)))
smoothingWeights = smoothingWeights / np.sum(smoothingWeights)
numSamplesPadding = int(sps*AUDIO_PADDING) + int((weightLen-1)/2)
padding = np.zeros(numSamplesPadding, dtype=bool)
boolArr = np.concatenate((padding, boolArr, padding)).astype(np.float32)
if CLICK_SMOOTH <= 0:
smoothBoolArr = boolArr
else:
smoothBoolArr = np.correlate(boolArr, smoothingWeights, 'valid')
numSamples = len(smoothBoolArr)
x = np.arange(numSamples)
toneArr = np.sin(x * (freq*2*np.pi/sps)) * volume
toneArr *= smoothBoolArr
return toneArr
def stringToMorseAudio(message, sps=SPS, wpm=WPM, fs=FS, freq=FREQ, volume=1.0, letterPrompts=None, promptVolume=1.0):
message = message.upper()
code = morse.stringToMorse(message)
boolArr = morse.morseToBoolArr(code, sps, wpm, fs)
audio = boolArrToSwitchedTone(boolArr, freq, sps, volume)
numSamplesPadding = int(sps*AUDIO_PADDING)
if letterPrompts is not None:
for i in range(len(message)):
l = message[i]
if l in letterPrompts:
offsetPlus = morse.morseSampleDuration(morse.stringToMorse(message[:i+1]), sps, wpm, fs)
letterDuration = morse.morseSampleDuration(morse.letterToMorse(message[i]), sps, wpm, fs)
offset = numSamplesPadding + offsetPlus - letterDuration
audio = addAudio(audio, letterPrompts[l][1]*promptVolume, offset)
return audio
def loadLetterNames(pathTemplate='audio/letter-names/%s_.wav', letters=LETTERS):
out = {}
for letter in letters:
fName = pathTemplate % letter
out[letter] = loadWav(fName)
return out
def loadWav(fName):
rate, data = io.wavfile.getHistory(fName, mmap=True)
dataScale = data.astype(np.float32) / maxDtypeVolume(data.dtype)
return rate, dataScale
def maxDtypeVolume(dtype):
try:
return np.iinfo(dtype).max # Integer data type
except ValueError:
return 1.0 # Float data type
def playLetterNamesBlock(letterNames):
sps = letterNames[LETTERS[0]][0]
letterNameList = [letterNames[l][1] for l in LETTERS]
alphabetAudio = np.concatenate(letterNameList)
playBlock(alphabetAudio, sps)
def genTone(frequency, duration, sps=SPS, volume=1.0):
return np.sin(np.arange(sps*duration)*(frequency*2*np.pi/sps))*volume
def playTone(*args, **kwargs):
play(genTone(*args, **kwargs))
def play(array, sps=SPS):
sd.play(array.astype(np.float32), sps)
def waitFor(array, sps=SPS):
duration = len(array) / sps
time.sleep(duration)
def playBlock(array, sps=SPS):
play(array, sps)
waitFor(array, sps)
if __name__ == '__main__':
import sys, argparse
parser = argparse.ArgumentParser(description='Convert text to morse code audio.')
parser.add_argument('-f', type=float, default=FREQ, help='Tone frequency')
parser.add_argument('--wpm', type=float, default=WPM, help='Words per minute')
parser.add_argument('--fs', type=float, default=FS, help='Farnsworth speed')
parser.add_argument('-p', action='store_true', default=False, help='Say letters along with morse code')
parser.add_argument('--force', action='store_true', default=False, help='Force user to get the answer correct before completing')
parser.add_argument('--limit', type=int, default=0, help='Limit to X queries')
parser.add_argument('--length', type=int, default=0, help='Length of the word')
parser.add_argument('-o', type=str, default='', help='Output to given WAV file instead of playing sound')
parser.add_argument('-i', type=str, default='', help='Input from text file')
args = parser.parse_args()
main(args.f, args.wpm, args.fs, args.p, args.force, args.limit, args.length, args.o, args.i)
| [
"numpy.iinfo",
"time.sleep",
"numpy.array",
"random.choices",
"numpy.sin",
"scipy.io.wavfile.getHistory",
"numpy.arange",
"argparse.ArgumentParser",
"morse.stringToMorse",
"numpy.concatenate",
"morse.farnsworthScaleFactor",
"morse.wpmToDps",
"random.shuffle",
"morse.morseToBoolArr",
"num... | [((788, 807), 'morse.wpmToDps', 'morse.wpmToDps', (['wpm'], {}), '(wpm)\n', (802, 807), False, 'import morse\n'), ((897, 933), 'morse.farnsworthScaleFactor', 'morse.farnsworthScaleFactor', (['wpm', 'fs'], {}), '(wpm, fs)\n', (924, 933), False, 'import morse\n'), ((2590, 2614), 'random.shuffle', 'random.shuffle', (['messages'], {}), '(messages)\n', (2604, 2614), False, 'import string, time, random, msvcrt\n'), ((2628, 2661), 'random.choices', 'random.choices', (['messages'], {'k': 'limit'}), '(messages, k=limit)\n', (2642, 2661), False, 'import string, time, random, msvcrt\n'), ((2855, 2870), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2865, 2870), False, 'import string, time, random, msvcrt\n'), ((3728, 3767), 'numpy.zeros', 'np.zeros', (['numSamplesPadding'], {'dtype': 'bool'}), '(numSamplesPadding, dtype=bool)\n', (3736, 3767), True, 'import numpy as np\n'), ((4012, 4033), 'numpy.arange', 'np.arange', (['numSamples'], {}), '(numSamples)\n', (4021, 4033), True, 'import numpy as np\n'), ((4287, 4315), 'morse.stringToMorse', 'morse.stringToMorse', (['message'], {}), '(message)\n', (4306, 4315), False, 'import morse\n'), ((4328, 4368), 'morse.morseToBoolArr', 'morse.morseToBoolArr', (['code', 'sps', 'wpm', 'fs'], {}), '(code, sps, wpm, fs)\n', (4348, 4368), False, 'import morse\n'), ((5173, 5212), 'scipy.io.wavfile.getHistory', 'io.wavfile.getHistory', (['fName'], {'mmap': '(True)'}), '(fName, mmap=True)\n', (5194, 5212), False, 'from scipy import io\n'), ((5594, 5624), 'numpy.concatenate', 'np.concatenate', (['letterNameList'], {}), '(letterNameList)\n', (5608, 5624), True, 'import numpy as np\n'), ((5977, 5997), 'time.sleep', 'time.sleep', (['duration'], {}), '(duration)\n', (5987, 5997), False, 'import string, time, random, msvcrt\n'), ((6134, 6206), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert text to morse code audio."""'}), "(description='Convert text to morse code audio.')\n", (6157, 6206), False, 'import sys, argparse\n'), ((2935, 2965), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float32'}), '([], dtype=np.float32)\n', (2943, 2965), True, 'import numpy as np\n'), ((3623, 3647), 'numpy.sum', 'np.sum', (['smoothingWeights'], {}), '(smoothingWeights)\n', (3629, 3647), True, 'import numpy as np\n'), ((3923, 3971), 'numpy.correlate', 'np.correlate', (['boolArr', 'smoothingWeights', '"""valid"""'], {}), "(boolArr, smoothingWeights, 'valid')\n", (3935, 3971), True, 'import numpy as np\n'), ((4046, 4082), 'numpy.sin', 'np.sin', (['(x * (freq * 2 * np.pi / sps))'], {}), '(x * (freq * 2 * np.pi / sps))\n', (4052, 4082), True, 'import numpy as np\n'), ((1599, 1610), 'time.time', 'time.time', ([], {}), '()\n', (1608, 1610), False, 'import string, time, random, msvcrt\n'), ((3129, 3164), 'numpy.zeros', 'np.zeros', (['(offset + lenNew - lenBase)'], {}), '(offset + lenNew - lenBase)\n', (3137, 3164), True, 'import numpy as np\n'), ((3518, 3550), 'numpy.arange', 'np.arange', (['(1)', '(weightLen // 2 + 1)'], {}), '(1, weightLen // 2 + 1)\n', (3527, 3550), True, 'import numpy as np\n'), ((3548, 3584), 'numpy.arange', 'np.arange', (['(weightLen // 2 + 1)', '(0)', '(-1)'], {}), '(weightLen // 2 + 1, 0, -1)\n', (3557, 3584), True, 'import numpy as np\n'), ((3780, 3823), 'numpy.concatenate', 'np.concatenate', (['(padding, boolArr, padding)'], {}), '((padding, boolArr, padding))\n', (3794, 3823), True, 'import numpy as np\n'), ((5350, 5365), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (5358, 5365), True, 'import numpy as np\n'), ((1691, 1702), 'time.time', 'time.time', ([], {}), '()\n', (1700, 1702), False, 'import string, time, random, msvcrt\n'), ((2285, 2298), 'collections.Counter', 'Counter', (['word'], {}), '(word)\n', (2292, 2298), False, 'from collections import Counter\n'), ((5729, 5754), 'numpy.arange', 'np.arange', (['(sps * duration)'], {}), '(sps * duration)\n', (5738, 5754), True, 'import numpy as np\n'), ((4637, 4673), 'morse.stringToMorse', 'morse.stringToMorse', (['message[:i + 1]'], {}), '(message[:i + 1])\n', (4656, 4673), False, 'import morse\n'), ((4738, 4769), 'morse.letterToMorse', 'morse.letterToMorse', (['message[i]'], {}), '(message[i])\n', (4757, 4769), False, 'import morse\n')] |
from __future__ import print_function, division
import unittest, numpy as np
from pyscf import gto, tddft, scf
from pyscf.nao import bse_iter
from pyscf.nao import polariz_freq_osc_strength
from pyscf.data.nist import HARTREE2EV
class KnowValues(unittest.TestCase):
def test_0147_bse_h2o_rks_pz(self):
""" Interacting case """
mol=gto.M(verbose=0,atom='O 0 0 0;H 0 0.489 1.074;H 0 0.489 -1.074',basis='cc-pvdz',)
gto_hf = scf.RKS(mol)
gto_hf.kernel()
gto_td = tddft.TDDFT(gto_hf)
gto_td.nstates = 95
gto_td.kernel()
omegas = np.arange(0.0, 2.0, 0.01) + 1j*0.03
p_ave = -polariz_freq_osc_strength(gto_td.e, gto_td.oscillator_strength(), omegas).imag
data = np.array([omegas.real*HARTREE2EV, p_ave])
np.savetxt('test_0147_bse_h2o_rks_pz_pyscf.txt', data.T, fmt=['%f','%f'])
data_ref = np.loadtxt('test_0147_bse_h2o_rks_pz_pyscf.txt-ref').T
self.assertTrue(np.allclose(data_ref, data, atol=1e-6, rtol=1e-3))
nao_td = bse_iter(mf=gto_hf, gto=mol, verbosity=0, xc_code='LDA',)
p_iter = -nao_td.comp_polariz_inter_ave(omegas).imag
data = np.array([omegas.real*HARTREE2EV, p_iter])
np.savetxt('test_0147_bse_h2o_rks_pz_nao.txt', data.T, fmt=['%f','%f'])
data_ref = np.loadtxt('test_0147_bse_h2o_rks_pz_nao.txt-ref').T
self.assertTrue(np.allclose(data_ref, data, atol=1e-6, rtol=1e-3))
if __name__ == "__main__": unittest.main()
| [
"numpy.allclose",
"pyscf.gto.M",
"numpy.array",
"pyscf.tddft.TDDFT",
"numpy.savetxt",
"pyscf.nao.bse_iter",
"unittest.main",
"pyscf.scf.RKS",
"numpy.loadtxt",
"numpy.arange"
] | [((1398, 1413), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1411, 1413), False, 'import unittest, numpy as np\n'), ((343, 430), 'pyscf.gto.M', 'gto.M', ([], {'verbose': '(0)', 'atom': '"""O 0 0 0;H 0 0.489 1.074;H 0 0.489 -1.074"""', 'basis': '"""cc-pvdz"""'}), "(verbose=0, atom='O 0 0 0;H 0 0.489 1.074;H 0 0.489 -1.074', basis=\n 'cc-pvdz')\n", (348, 430), False, 'from pyscf import gto, tddft, scf\n'), ((438, 450), 'pyscf.scf.RKS', 'scf.RKS', (['mol'], {}), '(mol)\n', (445, 450), False, 'from pyscf import gto, tddft, scf\n'), ((484, 503), 'pyscf.tddft.TDDFT', 'tddft.TDDFT', (['gto_hf'], {}), '(gto_hf)\n', (495, 503), False, 'from pyscf import gto, tddft, scf\n'), ((701, 744), 'numpy.array', 'np.array', (['[omegas.real * HARTREE2EV, p_ave]'], {}), '([omegas.real * HARTREE2EV, p_ave])\n', (709, 744), True, 'import unittest, numpy as np\n'), ((747, 821), 'numpy.savetxt', 'np.savetxt', (['"""test_0147_bse_h2o_rks_pz_pyscf.txt"""', 'data.T'], {'fmt': "['%f', '%f']"}), "('test_0147_bse_h2o_rks_pz_pyscf.txt', data.T, fmt=['%f', '%f'])\n", (757, 821), True, 'import unittest, numpy as np\n'), ((981, 1037), 'pyscf.nao.bse_iter', 'bse_iter', ([], {'mf': 'gto_hf', 'gto': 'mol', 'verbosity': '(0)', 'xc_code': '"""LDA"""'}), "(mf=gto_hf, gto=mol, verbosity=0, xc_code='LDA')\n", (989, 1037), False, 'from pyscf.nao import bse_iter\n'), ((1108, 1152), 'numpy.array', 'np.array', (['[omegas.real * HARTREE2EV, p_iter]'], {}), '([omegas.real * HARTREE2EV, p_iter])\n', (1116, 1152), True, 'import unittest, numpy as np\n'), ((1155, 1227), 'numpy.savetxt', 'np.savetxt', (['"""test_0147_bse_h2o_rks_pz_nao.txt"""', 'data.T'], {'fmt': "['%f', '%f']"}), "('test_0147_bse_h2o_rks_pz_nao.txt', data.T, fmt=['%f', '%f'])\n", (1165, 1227), True, 'import unittest, numpy as np\n'), ((562, 587), 'numpy.arange', 'np.arange', (['(0.0)', '(2.0)', '(0.01)'], {}), '(0.0, 2.0, 0.01)\n', (571, 587), True, 'import unittest, numpy as np\n'), ((836, 888), 'numpy.loadtxt', 'np.loadtxt', (['"""test_0147_bse_h2o_rks_pz_pyscf.txt-ref"""'], {}), "('test_0147_bse_h2o_rks_pz_pyscf.txt-ref')\n", (846, 888), True, 'import unittest, numpy as np\n'), ((911, 962), 'numpy.allclose', 'np.allclose', (['data_ref', 'data'], {'atol': '(1e-06)', 'rtol': '(0.001)'}), '(data_ref, data, atol=1e-06, rtol=0.001)\n', (922, 962), True, 'import unittest, numpy as np\n'), ((1242, 1292), 'numpy.loadtxt', 'np.loadtxt', (['"""test_0147_bse_h2o_rks_pz_nao.txt-ref"""'], {}), "('test_0147_bse_h2o_rks_pz_nao.txt-ref')\n", (1252, 1292), True, 'import unittest, numpy as np\n'), ((1315, 1366), 'numpy.allclose', 'np.allclose', (['data_ref', 'data'], {'atol': '(1e-06)', 'rtol': '(0.001)'}), '(data_ref, data, atol=1e-06, rtol=0.001)\n', (1326, 1366), True, 'import unittest, numpy as np\n')] |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from stock import Stock
plt.style.use('fivethirtyeight')
def solve(A, r_F, mu_R, sigma_R, verbose=False):
mu = r_F + (mu_R - r_F)**2 / (A * sigma_R**2)
sigma = (mu_R - r_F) / (A * sigma_R)
u = mu - A * sigma**2 / 2
sr = (mu - r_F) / sigma
if verbose:
print("===== NUMERICAL RESULT =====")
print(f"mu*\t= {mu:.4%}")
print(f"sigma*\t= {sigma:.4%}")
print(f"U*\t= {u:.4%}")
print(f"sharpe\t= {sr:.4f}")
print()
return mu, sigma, u, sr
def plot(A, r_F, mu_R, sigma_R):
mu, sigma, u, sr = solve(A, r_F, mu_R, sigma_R)
def func(x):
return A / 2 * x**2 + u
def cons(x):
return r_F + x * (mu_R - r_F) / sigma_R
fig, ax = plt.subplots()
ticks = np.linspace(-0.005, 0.015, 100)
ax.plot(ticks, func(ticks), linewidth=3)
ax.plot(ticks, cons(ticks), linewidth=3)
ax.scatter(sigma, mu, marker='x', c='black')
ax.legend(['Utility Function', 'CAL', 'Optimal Point'])
ax.set_title('Optimal Allocation')
ax.set_xlabel('Risk (%)')
ax.set_ylabel('Expected Return (%)')
ax.set_xticklabels([f'{100*x:.2}' for x in ax.get_xticks()])
ax.set_yticklabels([f'{100*y:.2}' for y in ax.get_yticks()])
fig.tight_layout()
fig.savefig('./allocation.png', dpi=300)
def main():
# load moutai data
moutai = Stock("./data", '600519.XSHG', 2021)
mu_R = moutai.mean
sigma_R = moutai.std
# other constants
A = 28 / 9
r_F_year = 0.025
r_F = (r_F_year + 1)**(1 / 365) - 1
# solve & plot
solve(A, r_F, mu_R, sigma_R, verbose=True)
plot(A, r_F, mu_R, sigma_R)
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.subplots",
"stock.Stock",
"numpy.linspace",
"matplotlib.pyplot.style.use"
] | [((114, 146), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (127, 146), True, 'import matplotlib.pyplot as plt\n'), ((816, 830), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (828, 830), True, 'import matplotlib.pyplot as plt\n'), ((843, 874), 'numpy.linspace', 'np.linspace', (['(-0.005)', '(0.015)', '(100)'], {}), '(-0.005, 0.015, 100)\n', (854, 874), True, 'import numpy as np\n'), ((1435, 1471), 'stock.Stock', 'Stock', (['"""./data"""', '"""600519.XSHG"""', '(2021)'], {}), "('./data', '600519.XSHG', 2021)\n", (1440, 1471), False, 'from stock import Stock\n')] |
# Following architecture file copied from InferSent source code for load pretraining model
# In the later stage, this architecture will be modified to train a binary classifier (not 3 way softmax)
# We use the original code first to make sure the inference pipeline is good
# so that we don't have to worry about output capatibility when re-train this model with Quora
import numpy as np
import time
import pandas as pd
import random
# import torch
# import torch.nn as nn
import streamlit as st
import urllib
import re
import pickle
import requests
import io
# import nltk
# nltk.download('punkt')
# import warnings
# warnings.filterwarnings("ignore")
st.title('Covid-19 Twitter Search')
def pd_load(inp):
return pd.read_csv(inp)
def load_marix(inp):
response = requests.get(inp)
response.raise_for_status()
all_scores = np.load(io.BytesIO(response.content))
return all_scores
df = pd_load("https://raw.githubusercontent.com/CMU-IDS-2020/fp-ctqa/main/tweets.csv")
all_scores = load_marix('https://github.com/CMU-IDS-2020/fp-ctqa/raw/main/adjacency_matrix.npy')
tweets = df.text.tolist()
num_tweets = len(tweets)
st.subheader("Here are some randomly selected most recent tweets about Covid-19")
indices = random.sample(range(num_tweets), 5)
for i, tweet in enumerate([tweets[i] for i in indices]):
st.write("[{}] ".format(i+1) + tweet)
def get_top_n_idx(A, N):
N += 1 # not self
col_idx = np.arange(A.shape[0])[:,None]
sorted_row_idx = np.argsort(A, axis=1)[:,A.shape[1]-N::]
best_scores = A[col_idx,sorted_row_idx]
return sorted_row_idx, best_scores
sample_ids = [1,2,3,4,5]
st.subheader("Which tweet would you like to get information on?")
st.write("Please select the tweet id based on the number inside [] above")
tweet_option = st.selectbox('', sample_ids)
tweet_option -= 1
st.write("Here is the tweet you selected!")
st.write([tweets[i] for i in indices][tweet_option])
st.subheader("How many similar tweets would you like to retrieve?")
n_opt = st.slider("", min_value=1, max_value=5, value=3, step=1)
sorted_row_idx, best_scores = get_top_n_idx(all_scores[indices], n_opt)
sorted_row_idx = sorted_row_idx[tweet_option].tolist()[::-1][1:]
best_scores = best_scores[tweet_option].tolist()[::-1][1:]
st.write('Here are the ordered top ' + str(n_opt) + ' tweets similar to this tweet:')
for tweet_idx, score in zip(sorted_row_idx, best_scores):
st.write(tweets[tweet_idx])
st.write("with similarity score " + str(score))
st.write("\n")
| [
"pandas.read_csv",
"streamlit.write",
"io.BytesIO",
"requests.get",
"numpy.argsort",
"streamlit.subheader",
"streamlit.selectbox",
"streamlit.slider",
"numpy.arange",
"streamlit.title"
] | [((654, 689), 'streamlit.title', 'st.title', (['"""Covid-19 Twitter Search"""'], {}), "('Covid-19 Twitter Search')\n", (662, 689), True, 'import streamlit as st\n'), ((1138, 1224), 'streamlit.subheader', 'st.subheader', (['"""Here are some randomly selected most recent tweets about Covid-19"""'], {}), "(\n 'Here are some randomly selected most recent tweets about Covid-19')\n", (1150, 1224), True, 'import streamlit as st\n'), ((1629, 1694), 'streamlit.subheader', 'st.subheader', (['"""Which tweet would you like to get information on?"""'], {}), "('Which tweet would you like to get information on?')\n", (1641, 1694), True, 'import streamlit as st\n'), ((1695, 1769), 'streamlit.write', 'st.write', (['"""Please select the tweet id based on the number inside [] above"""'], {}), "('Please select the tweet id based on the number inside [] above')\n", (1703, 1769), True, 'import streamlit as st\n'), ((1785, 1813), 'streamlit.selectbox', 'st.selectbox', (['""""""', 'sample_ids'], {}), "('', sample_ids)\n", (1797, 1813), True, 'import streamlit as st\n'), ((1832, 1875), 'streamlit.write', 'st.write', (['"""Here is the tweet you selected!"""'], {}), "('Here is the tweet you selected!')\n", (1840, 1875), True, 'import streamlit as st\n'), ((1876, 1928), 'streamlit.write', 'st.write', (['[tweets[i] for i in indices][tweet_option]'], {}), '([tweets[i] for i in indices][tweet_option])\n', (1884, 1928), True, 'import streamlit as st\n'), ((1929, 1996), 'streamlit.subheader', 'st.subheader', (['"""How many similar tweets would you like to retrieve?"""'], {}), "('How many similar tweets would you like to retrieve?')\n", (1941, 1996), True, 'import streamlit as st\n'), ((2005, 2061), 'streamlit.slider', 'st.slider', (['""""""'], {'min_value': '(1)', 'max_value': '(5)', 'value': '(3)', 'step': '(1)'}), "('', min_value=1, max_value=5, value=3, step=1)\n", (2014, 2061), True, 'import streamlit as st\n'), ((719, 735), 'pandas.read_csv', 'pd.read_csv', (['inp'], {}), '(inp)\n', (730, 735), True, 'import pandas as pd\n'), ((773, 790), 'requests.get', 'requests.get', (['inp'], {}), '(inp)\n', (785, 790), False, 'import requests\n'), ((2411, 2438), 'streamlit.write', 'st.write', (['tweets[tweet_idx]'], {}), '(tweets[tweet_idx])\n', (2419, 2438), True, 'import streamlit as st\n'), ((2495, 2509), 'streamlit.write', 'st.write', (['"""\n"""'], {}), "('\\n')\n", (2503, 2509), True, 'import streamlit as st\n'), ((848, 876), 'io.BytesIO', 'io.BytesIO', (['response.content'], {}), '(response.content)\n', (858, 876), False, 'import io\n'), ((1428, 1449), 'numpy.arange', 'np.arange', (['A.shape[0]'], {}), '(A.shape[0])\n', (1437, 1449), True, 'import numpy as np\n'), ((1479, 1500), 'numpy.argsort', 'np.argsort', (['A'], {'axis': '(1)'}), '(A, axis=1)\n', (1489, 1500), True, 'import numpy as np\n')] |
""" Implementation of Cosmic RIM estimator"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
print("\nphysical_devices\n", physical_devices)
world_size = len(physical_devices)
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
for device in physical_devices:
config = tf.config.experimental.set_memory_growth(device, True)
import tensorflow_probability as tfp
import numpy as np
import os, sys, argparse, time
from scipy.interpolate import InterpolatedUnivariateSpline as iuspline
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from modelpoisson import PoissonData, check_2pt, check_im, get_ps
from rim_utils import build_rim_parallel, myAdam
import flowpm
from flowpm import linear_field, lpt_init, nbody, cic_paint
from flowpm.utils import r2c3d, c2r3d
sys.path.append('../../utils/')
import tools
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--nc', type=int, default=32, help='Grid size')
parser.add_argument('--bs', type=float, default=200, help='Box Size')
parser.add_argument('--nsteps', type=int, default=3, help='')
parser.add_argument('--niter', type=int, default=200, help='Number of iterations/Max iterations')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--optimizer', type=str, default='adam', help='Which optimizer to use')
parser.add_argument('--batch_size', type=int, default=8, help='Batch size')
parser.add_argument('--nsims', type=int, default=100, help='Number of simulations')
parser.add_argument('--nbody', type=str2bool, default=False, help='Number of simulationss')
parser.add_argument('--lpt_order', type=int, default=2, help='Order of LPT Initial conditions')
parser.add_argument('--input_size', type=int, default=16, help='Input layer channel size')
parser.add_argument('--cell_size', type=int, default=16, help='Cell channel size')
parser.add_argument('--rim_iter', type=int, default=10, help='Optimization iteration')
parser.add_argument('--epochs', type=int, default=20, help='Number of epochs')
parser.add_argument('--suffix', type=str, default='', help='Suffix for folder pathname')
parser.add_argument('--batch_in_epoch', type=int, default=20, help='Number of batches in epochs')
parser.add_argument('--plambda', type=float, default=0.10, help='Poisson probability')
parser.add_argument('--parallel', type=str2bool, default=True, help='Parallel or Split')
args = parser.parse_args()
nc, bs = args.nc, args.bs
niter = args.niter
lr = args.lr
a0, af, nsteps = 0.1, 1.0, args.nsteps
stages = np.linspace(a0, af, nsteps, endpoint=True)
plambda = args.plambda
args.stages = stages
args.a0, args.af = a0, af
args.world_size = world_size
#
klin = np.loadtxt('../../data/Planck15_a1p00.txt').T[0]
plin = np.loadtxt('../../data//Planck15_a1p00.txt').T[1]
ipklin = iuspline(klin, plin)
# Compute necessary Fourier kernels
kvec = tools.fftk((nc, nc, nc), boxsize=nc, symmetric=False)
kmesh = (sum(k**2 for k in kvec)**0.5).astype(np.float32)
priorwt = ipklin(kmesh)
args.ipklin = ipklin
args.priorwt = priorwt
datamodel = PoissonData(args)
############################
#RIM Params
params = {}
params['input_size'] = args.input_size
params['cell_size'] = args.cell_size
params['strides'] = 2
params['middle_size'] = args.input_size // params['strides'] #lets divide by strides
params['cell_kernel_size'] = 5
params['input_kernel_size'] = 5
params['middle_kernel_size'] = 5
params['output_kernel_size'] = 5
params['rim_iter'] = args.rim_iter
params['input_activation'] = 'tanh'
params['output_activation'] = 'linear'
params['nc'] = nc
params['batch_size'] = args.batch_size
params['epoch'] = args.epochs
adam = myAdam(params['rim_iter'])
adam10 = myAdam(10*params['rim_iter'])
#fid_recon = Recon_Poisson(nc, bs, plambda=plambda, a0=a0, af=af, nsteps=nsteps, nbody=args.nbody, lpt_order=args.lpt_order, anneal=True)
#strategy = tf.distribute.MirroredStrategy(devices=["/device:GPU:0", "/device:GPU:1"])
strategy = tf.distribute.MirroredStrategy()
print ('\nNumber of devices: {}\n'.format(strategy.num_replicas_in_sync))
BATCH_SIZE_PER_REPLICA = params['batch_size'] // strategy.num_replicas_in_sync
GLOBAL_BATCH_SIZE = params['batch_size']
#################################################
if args.parallel: suffpath = '_p%03d_w%d'%(plambda*100, world_size) + args.suffix
else: suffpath = '_p%03d_split'%(plambda*100) + args.suffix
if args.nbody: ofolder = './models/poisson_L%04d_N%03d_T%02d%s/'%(bs, nc, nsteps, suffpath)
else: ofolder = './models/poisson_L%04d_N%03d_LPT%d%s/'%(bs, nc, args.lpt_order, suffpath)
try: os.makedirs(ofolder)
except Exception as e: print(e)
train_dataset = tf.data.Dataset.range(args.batch_in_epoch)
train_dataset = train_dataset.map(datamodel.pm_data)
train_dataset = train_dataset.prefetch(-1)
test_dataset = tf.data.Dataset.range(strategy.num_replicas_in_sync).map(datamodel.pm_data_test).prefetch(-1)
train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)
test_dist_dataset = strategy.experimental_distribute_dataset(test_dataset)
###############################################
with strategy.scope():
rim = build_rim_parallel(params)
grad_fn = datamodel.recon_grad
optimizer = tf.keras.optimizers.Adam(learning_rate=args.lr)
checkpoint = tf.train.Checkpoint(model=rim)
#
def train_step(inputs):
x_true, y = inputs
x_init = tf.random.normal(x_true.shape)
with tf.GradientTape() as tape:
x_pred = rim(x_init, y, grad_fn)
res = (x_true - x_pred)
loss = tf.reduce_mean(tf.square(res)) ##This is not advised, come back to this
gradients = tape.gradient(loss, rim.trainable_variables)
#optimizer = get_opt(lr)
optimizer.apply_gradients(zip(gradients, rim.trainable_variables))
return loss
def test_step(inputs):
x_true, y = inputs
x_init = tf.random.normal(x_true.shape)
x_pred = rim(x_init, y, grad_fn)
return x_pred, x_init, x_true, y
# `run` replicates the provided computation and runs it
# with the distributed input.
@tf.function
def distributed_train_step(dataset_inputs):
per_replica_losses = strategy.run(train_step, args=(dataset_inputs,))
return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses,
axis=None)
@tf.function
def distributed_test_step(dataset_inputs):
return strategy.run(test_step, args=(dataset_inputs,))
###########################################
####Train
###
#Training
losses = []
for epoch in range(args.epochs):
print("\nFor epoch %d\n"%epoch)
#TRAIN LOOP
total_loss = 0.0
num_batches = 0
starte = time.time()
for x in train_dist_dataset:
startb = time.time()
loss = distributed_train_step(x)
losses.append(loss.numpy())
total_loss += loss
print("epoch %d, num batch %d, loss : "%(epoch, num_batches), loss)
print("Time taken : ", time.time() - startb)
num_batches += 1
train_loss = total_loss / num_batches
print("Train loss for epoch %d "%epoch, train_loss)
print("Time taken for epoch %d: "%epoch, time.time() - starte)
plt.plot(losses)
plt.savefig(ofolder + 'losses.png')
##Test Epoch Training
for x in test_dist_dataset:
print('Testing')
a, b, c, d = distributed_test_step(x)
#print(a.values[0].shape, b.values[0].shape, c.values[0].shape, d.values[0].shape)
try: pred, x_init, xx, yy = a.values[0][-1], b.values[0], c.values[0], d.values[0]
except: pred, x_init, xx, yy = a[-1], b, c, d
pred_adam = adam(x_init, yy, grad_fn)
pred_adam10 = adam10(x_init, yy, grad_fn)
print(x_init.shape, xx.shape, yy.shape, pred.shape, pred_adam.shape, pred_adam10.shape)
check_im(xx[0].numpy(), x_init[0].numpy(), pred[0].numpy(), ofolder + 'rim-im-%d.png'%epoch)
check_2pt(datamodel, xx, yy, x_init, pred, pred_adam, pred_adam10, ofolder + 'rim-2pt-%d.png'%epoch)
break
rim.save_weights(ofolder + '/%d'%epoch)
#$@
#$@ print(len(x), x[0].values[0].shape)
#$@ a, b, c, d = distributed_test_step(x)
#$@ print(a.values[0].shape, b.values[0].shape, c.values[0].shape, d.values[0].shape)
#$@ pred, x_init, xx, yy = a.values[0], b.values[0], c.values[0], d.values[0]
#$@ pred = pred[-1]
#$@ pred_adam = adam(x_init, yy, grad_fn)
#$@ pred_adam10 = adam10(x_init, yy, grad_fn)
#$@
#$@ fig, ax = plt.subplots(1, 3, figsize = (12, 4))
#$@ vmin, vmax = xx[0].numpy().sum(axis=0).min(), xx[0].numpy().sum(axis=0).max()
#$@ ax[0].imshow(xx[0].numpy().sum(axis=0), vmin=vmin, vmax=vmax)
#$@ ax[0].set_title('Truth')
#$@ ax[1].imshow(x_init[0].numpy().sum(axis=0), vmin=vmin, vmax=vmax)
#$@ ax[1].set_title('initial point')
#$@ ax[2].imshow(pred[0].numpy().sum(axis=0), vmin=vmin, vmax=vmax)
#$@ ax[2].set_title('RIM %d step'%(params['rim_iter']))
#$@ plt.savefig(ofolder + 'rim-im-%d.png'%epoch)
#$@ plt.close()
#$@
#$@ ##
#$@ fig, ax = plt.subplots(1, 2, figsize=(9, 4))
#$@ print(x_init.shape, xx.shape, yy.shape, pred.shape, pred_adam.shape, pred_adam10.shape)
#$@ k, pks = get_ps([x_init.numpy(), gal_sample(pm(x_init)).numpy()], [xx.numpy(), yy.numpy()])
#$@ for i in range(2):
#$@ ax[0].plot(k, pks[i][2]/(pks[i][0]*pks[i][1])**0.5, 'C%d--'%i, lw=0.5)
#$@ ax[1].plot(k, (pks[i][0]/pks[i][1])**0.5, 'C%d--'%i, lw=0.5)
#$@
#$@ k, pks = get_ps([pred.numpy(), gal_sample(pm(pred)).numpy()], [xx.numpy(), yy.numpy()])
#$@ for i in range(2):
#$@ ax[0].plot(k, pks[i][2]/(pks[i][0]*pks[i][1])**0.5, 'C%d'%i)
#$@ ax[1].plot(k, (pks[i][0]/pks[i][1])**0.5, 'C%d'%i)
#$@
#$@ k, pks = get_ps([pred_adam.numpy(), gal_sample(pm(pred_adam)).numpy()], [xx.numpy(), yy.numpy()])
#$@ for i in range(2):
#$@ ax[0].plot(k, pks[i][2]/(pks[i][0]*pks[i][1])**0.5, 'C%d-.'%i, lw=0.5)
#$@ ax[1].plot(k, (pks[i][0]/pks[i][1])**0.5, 'C%d-.'%i, lw=0.5)
#$@
#$@ k, pks = get_ps([pred_adam10.numpy(), gal_sample(pm(pred_adam10)).numpy()], [xx.numpy(), yy.numpy()])
#$@ for i in range(2):
#$@ ax[0].plot(k, pks[i][2]/(pks[i][0]*pks[i][1])**0.5, 'C%d:'%i)
#$@ ax[1].plot(k, (pks[i][0]/pks[i][1])**0.5, 'C%d:'%i)
#$@
#$@ for axis in ax:
#$@ axis.semilogx()
#$@ axis.grid(which='both')
#$@ ax[0].set_ylim(-0.1, 1.2)
#$@ ax[1].set_ylim(-0.5, 2.5)
#$@ plt.savefig(ofolder + 'rim-2pt-%d.png'%epoch)
#$@ plt.close()
#$@
#$@ break
#$@
#
| [
"tensorflow.train.Checkpoint",
"tensorflow.GradientTape",
"sys.path.append",
"tensorflow.random.normal",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"modelpoisson.check_2pt",
"numpy.linspace",
"rim_utils.build_rim_parallel",
"tensorflow.square",
"tensorflow.distribute.MirroredStrategy",... | [((199, 250), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (243, 250), True, 'import tensorflow as tf\n'), ((689, 710), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (703, 710), False, 'import matplotlib\n'), ((977, 1008), 'sys.path.append', 'sys.path.append', (['"""../../utils/"""'], {}), "('../../utils/')\n", (992, 1008), False, 'import os, sys, argparse, time\n'), ((1326, 1387), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some integers."""'}), "(description='Process some integers.')\n", (1349, 1387), False, 'import os, sys, argparse, time\n'), ((3044, 3086), 'numpy.linspace', 'np.linspace', (['a0', 'af', 'nsteps'], {'endpoint': '(True)'}), '(a0, af, nsteps, endpoint=True)\n', (3055, 3086), True, 'import numpy as np\n'), ((3311, 3331), 'scipy.interpolate.InterpolatedUnivariateSpline', 'iuspline', (['klin', 'plin'], {}), '(klin, plin)\n', (3319, 3331), True, 'from scipy.interpolate import InterpolatedUnivariateSpline as iuspline\n'), ((3465, 3518), 'tools.fftk', 'tools.fftk', (['(nc, nc, nc)'], {'boxsize': 'nc', 'symmetric': '(False)'}), '((nc, nc, nc), boxsize=nc, symmetric=False)\n', (3475, 3518), False, 'import tools\n'), ((3660, 3677), 'modelpoisson.PoissonData', 'PoissonData', (['args'], {}), '(args)\n', (3671, 3677), False, 'from modelpoisson import PoissonData, check_2pt, check_im, get_ps\n'), ((4252, 4278), 'rim_utils.myAdam', 'myAdam', (["params['rim_iter']"], {}), "(params['rim_iter'])\n", (4258, 4278), False, 'from rim_utils import build_rim_parallel, myAdam\n'), ((4288, 4319), 'rim_utils.myAdam', 'myAdam', (["(10 * params['rim_iter'])"], {}), "(10 * params['rim_iter'])\n", (4294, 4319), False, 'from rim_utils import build_rim_parallel, myAdam\n'), ((4556, 4588), 'tensorflow.distribute.MirroredStrategy', 'tf.distribute.MirroredStrategy', ([], {}), '()\n', (4586, 4588), True, 'import tensorflow as tf\n'), ((5237, 5279), 'tensorflow.data.Dataset.range', 'tf.data.Dataset.range', (['args.batch_in_epoch'], {}), '(args.batch_in_epoch)\n', (5258, 5279), True, 'import tensorflow as tf\n'), ((457, 511), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['device', '(True)'], {}), '(device, True)\n', (497, 511), True, 'import tensorflow as tf\n'), ((5166, 5186), 'os.makedirs', 'os.makedirs', (['ofolder'], {}), '(ofolder)\n', (5177, 5186), False, 'import os, sys, argparse, time\n'), ((5723, 5749), 'rim_utils.build_rim_parallel', 'build_rim_parallel', (['params'], {}), '(params)\n', (5741, 5749), False, 'from rim_utils import build_rim_parallel, myAdam\n'), ((5801, 5848), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'args.lr'}), '(learning_rate=args.lr)\n', (5825, 5848), True, 'import tensorflow as tf\n'), ((5866, 5896), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'model': 'rim'}), '(model=rim)\n', (5885, 5896), True, 'import tensorflow as tf\n'), ((5964, 5994), 'tensorflow.random.normal', 'tf.random.normal', (['x_true.shape'], {}), '(x_true.shape)\n', (5980, 5994), True, 'import tensorflow as tf\n'), ((6430, 6460), 'tensorflow.random.normal', 'tf.random.normal', (['x_true.shape'], {}), '(x_true.shape)\n', (6446, 6460), True, 'import tensorflow as tf\n'), ((7206, 7217), 'time.time', 'time.time', ([], {}), '()\n', (7215, 7217), False, 'import os, sys, argparse, time\n'), ((7707, 7723), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {}), '(losses)\n', (7715, 7723), True, 'from matplotlib import pyplot as plt\n'), ((7728, 7763), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(ofolder + 'losses.png')"], {}), "(ofolder + 'losses.png')\n", (7739, 7763), True, 'from matplotlib import pyplot as plt\n'), ((3196, 3239), 'numpy.loadtxt', 'np.loadtxt', (['"""../../data/Planck15_a1p00.txt"""'], {}), "('../../data/Planck15_a1p00.txt')\n", (3206, 3239), True, 'import numpy as np\n'), ((3252, 3296), 'numpy.loadtxt', 'np.loadtxt', (['"""../../data//Planck15_a1p00.txt"""'], {}), "('../../data//Planck15_a1p00.txt')\n", (3262, 3296), True, 'import numpy as np\n'), ((6004, 6021), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (6019, 6021), True, 'import tensorflow as tf\n'), ((7268, 7279), 'time.time', 'time.time', ([], {}), '()\n', (7277, 7279), False, 'import os, sys, argparse, time\n'), ((8432, 8538), 'modelpoisson.check_2pt', 'check_2pt', (['datamodel', 'xx', 'yy', 'x_init', 'pred', 'pred_adam', 'pred_adam10', "(ofolder + 'rim-2pt-%d.png' % epoch)"], {}), "(datamodel, xx, yy, x_init, pred, pred_adam, pred_adam10, ofolder +\n 'rim-2pt-%d.png' % epoch)\n", (8441, 8538), False, 'from modelpoisson import PoissonData, check_2pt, check_im, get_ps\n'), ((1256, 1309), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (1282, 1309), False, 'import os, sys, argparse, time\n'), ((6135, 6149), 'tensorflow.square', 'tf.square', (['res'], {}), '(res)\n', (6144, 6149), True, 'import tensorflow as tf\n'), ((7681, 7692), 'time.time', 'time.time', ([], {}), '()\n', (7690, 7692), False, 'import os, sys, argparse, time\n'), ((5391, 5443), 'tensorflow.data.Dataset.range', 'tf.data.Dataset.range', (['strategy.num_replicas_in_sync'], {}), '(strategy.num_replicas_in_sync)\n', (5412, 5443), True, 'import tensorflow as tf\n'), ((7491, 7502), 'time.time', 'time.time', ([], {}), '()\n', (7500, 7502), False, 'import os, sys, argparse, time\n')] |
import numpy as np
from numpy.testing import assert_allclose
from autumn.models.covid_19.mixing_matrix.mixing_adjusters.location_adjuster import LocationMixingAdjuster
MM = np.ones([16, 16])
HOME_MM = MM * 0.1
OTHER_LOCATIONS_MM = MM * 0.2
SCHOOL_MM = MM * 0.3
WORK_MM = MM * 0.6
MIXING_MATRICES = {
'all_locations': MM,
'home': HOME_MM,
'other_locations': OTHER_LOCATIONS_MM,
'school': SCHOOL_MM,
'work': WORK_MM
}
def test_location_adjuster__with_no_data():
"""
Ensure there is no change if no mixing data has been suplied.
"""
mobility_funcs = {}
microdistancing_funcs = {}
adjuster = LocationMixingAdjuster(MIXING_MATRICES, mobility_funcs, microdistancing_funcs)
mm = np.ones([16, 16])
adj_mm = adjuster.get_adjustment(0, mm)
assert_allclose(mm, adj_mm, atol=0.01, verbose=True)
def test_location_adjuster__with_only_mobility_data():
mobility_funcs = {"work": lambda t: 0.3 * t, "school": lambda t: 0.2 * t}
microdistancing_funcs = {}
adjuster = LocationMixingAdjuster(MIXING_MATRICES, mobility_funcs, microdistancing_funcs)
mm = np.ones([16, 16])
adj_mm = adjuster.get_adjustment(1, mm)
work_component = WORK_MM * (0.3 - 1)
school_component = SCHOOL_MM * (0.2 - 1)
expect_mm = MM + work_component + school_component
assert_allclose(expect_mm, adj_mm, atol=0.01, verbose=True)
def test_location_adjuster__with_only_microdistancing_data():
mobility_funcs = {}
microdistancing_funcs = {"work": lambda t: 0.3 * t, "school": lambda t: 0.2 * t}
adjuster = LocationMixingAdjuster(MIXING_MATRICES, mobility_funcs, microdistancing_funcs)
mm = np.ones([16, 16])
adj_mm = adjuster.get_adjustment(1, mm)
work_component = WORK_MM * (0.3 - 1)
school_component = SCHOOL_MM * (0.2 - 1)
expect_mm = MM + work_component + school_component
assert_allclose(expect_mm, adj_mm, atol=0.01, verbose=True)
def test_location_adjuster__with_microdistancing_and_mobility_data():
mobility_funcs = {"work": lambda t: 0.3 * t, "home": lambda t: 0.5}
microdistancing_funcs = {"school": lambda t: 0.2 * t, "home": lambda t: 0.7}
adjuster = LocationMixingAdjuster(MIXING_MATRICES, mobility_funcs, microdistancing_funcs)
mm = np.ones([16, 16])
adj_mm = adjuster.get_adjustment(1, mm)
work_component = WORK_MM * (0.3 - 1)
school_component = SCHOOL_MM * (0.2 - 1)
home_component = HOME_MM * (0.5 * 0.7 - 1)
expect_mm = MM + work_component + school_component + home_component
assert_allclose(expect_mm, adj_mm, atol=0.01, verbose=True)
def _get_country_mixing_matrix(sheet_type, iso3):
if sheet_type == "home":
return HOME_MM
if sheet_type == "other_locations":
return OTHER_LOCATIONS_MM
if sheet_type == "school":
return SCHOOL_MM
if sheet_type == "work":
return WORK_MM
else:
return MM
| [
"numpy.testing.assert_allclose",
"autumn.models.covid_19.mixing_matrix.mixing_adjusters.location_adjuster.LocationMixingAdjuster",
"numpy.ones"
] | [((175, 192), 'numpy.ones', 'np.ones', (['[16, 16]'], {}), '([16, 16])\n', (182, 192), True, 'import numpy as np\n'), ((636, 714), 'autumn.models.covid_19.mixing_matrix.mixing_adjusters.location_adjuster.LocationMixingAdjuster', 'LocationMixingAdjuster', (['MIXING_MATRICES', 'mobility_funcs', 'microdistancing_funcs'], {}), '(MIXING_MATRICES, mobility_funcs, microdistancing_funcs)\n', (658, 714), False, 'from autumn.models.covid_19.mixing_matrix.mixing_adjusters.location_adjuster import LocationMixingAdjuster\n'), ((724, 741), 'numpy.ones', 'np.ones', (['[16, 16]'], {}), '([16, 16])\n', (731, 741), True, 'import numpy as np\n'), ((790, 842), 'numpy.testing.assert_allclose', 'assert_allclose', (['mm', 'adj_mm'], {'atol': '(0.01)', 'verbose': '(True)'}), '(mm, adj_mm, atol=0.01, verbose=True)\n', (805, 842), False, 'from numpy.testing import assert_allclose\n'), ((1024, 1102), 'autumn.models.covid_19.mixing_matrix.mixing_adjusters.location_adjuster.LocationMixingAdjuster', 'LocationMixingAdjuster', (['MIXING_MATRICES', 'mobility_funcs', 'microdistancing_funcs'], {}), '(MIXING_MATRICES, mobility_funcs, microdistancing_funcs)\n', (1046, 1102), False, 'from autumn.models.covid_19.mixing_matrix.mixing_adjusters.location_adjuster import LocationMixingAdjuster\n'), ((1112, 1129), 'numpy.ones', 'np.ones', (['[16, 16]'], {}), '([16, 16])\n', (1119, 1129), True, 'import numpy as np\n'), ((1319, 1378), 'numpy.testing.assert_allclose', 'assert_allclose', (['expect_mm', 'adj_mm'], {'atol': '(0.01)', 'verbose': '(True)'}), '(expect_mm, adj_mm, atol=0.01, verbose=True)\n', (1334, 1378), False, 'from numpy.testing import assert_allclose\n'), ((1567, 1645), 'autumn.models.covid_19.mixing_matrix.mixing_adjusters.location_adjuster.LocationMixingAdjuster', 'LocationMixingAdjuster', (['MIXING_MATRICES', 'mobility_funcs', 'microdistancing_funcs'], {}), '(MIXING_MATRICES, mobility_funcs, microdistancing_funcs)\n', (1589, 1645), False, 'from autumn.models.covid_19.mixing_matrix.mixing_adjusters.location_adjuster import LocationMixingAdjuster\n'), ((1655, 1672), 'numpy.ones', 'np.ones', (['[16, 16]'], {}), '([16, 16])\n', (1662, 1672), True, 'import numpy as np\n'), ((1862, 1921), 'numpy.testing.assert_allclose', 'assert_allclose', (['expect_mm', 'adj_mm'], {'atol': '(0.01)', 'verbose': '(True)'}), '(expect_mm, adj_mm, atol=0.01, verbose=True)\n', (1877, 1921), False, 'from numpy.testing import assert_allclose\n'), ((2162, 2240), 'autumn.models.covid_19.mixing_matrix.mixing_adjusters.location_adjuster.LocationMixingAdjuster', 'LocationMixingAdjuster', (['MIXING_MATRICES', 'mobility_funcs', 'microdistancing_funcs'], {}), '(MIXING_MATRICES, mobility_funcs, microdistancing_funcs)\n', (2184, 2240), False, 'from autumn.models.covid_19.mixing_matrix.mixing_adjusters.location_adjuster import LocationMixingAdjuster\n'), ((2250, 2267), 'numpy.ones', 'np.ones', (['[16, 16]'], {}), '([16, 16])\n', (2257, 2267), True, 'import numpy as np\n'), ((2521, 2580), 'numpy.testing.assert_allclose', 'assert_allclose', (['expect_mm', 'adj_mm'], {'atol': '(0.01)', 'verbose': '(True)'}), '(expect_mm, adj_mm, atol=0.01, verbose=True)\n', (2536, 2580), False, 'from numpy.testing import assert_allclose\n')] |
import numpy as np
from math import sin, cos
from ..manager import ConfigManager
point = {'type':'point', 'color':(255,0,0), 'lw':1, 'body':(10,10)}
points = {'type':'points', 'color':(255,0,0), 'lw':1, 'body':[(10,10),(100,200)]}
line = {'type':'line', 'color':(255,0,0), 'lw':1, 'style':'-', 'body':[(10,10),(100,200),(200,200)]}
lines = {'type':'lines', 'color':(255,0,0), 'lw':1, 'style':'-', 'body':[[(10,10),(100,200),(200,200)],[(150,10),(50,250)]]}
polygon = {'type':'polygon', 'color':(255,0,0), 'fcolor':(255,255,0), 'lw':1, 'style':'o', 'body':[(10,10),(100,200),(200,200)]}
polygons = {'type':'polygons', 'color':(255,0,0), 'fcolor':(255,255,0,30), 'fill':False, 'lw':1, 'style':'o', 'body':[[(10,10),(100,200),(200,200)],[(150,10),(50,250),(288,0)]]}
circle = {'type':'circle', 'color':(255,0,0), 'fcolor':(255,255,0), 'fill':False, 'body':(100,100,50)}
circles = {'type':'circles', 'color':(255,0,0), 'fcolor':(255,255,0), 'fill':False, 'body':[(100,100,50),(300,300,100)]}
ellipse = {'type':'ellipse', 'color':(255,0,0), 'fcolor':(255,255,0), 'fill':False, 'body':(100,100,100,50,1)}
ellipses = {'type':'ellipses', 'color':(255,0,0), 'fcolor':(255,255,0), 'fill':False, 'body':[(100,100,100,50,1),(200,250,50,100,3.14)]}
rectangle = {'type':'rectangle', 'color':(255,0,0), 'fcolor':(255,255,0), 'fill':True, 'body':(100,100,80,50)}
rectangles = {'type':'rectangles', 'color':(255,0,0), 'fcolor':(255,255,0), 'fill':False, 'body':[(100,100,80,50),(200,200,80,100)]}
text = {'type':'text', 'color':(255,255,0), 'fcolor':(0,0,0), 'size':8, 'pt':True, 'body':(100,200,'id=0')}
texts = {'type':'texts', 'color':(255,255,0), 'fcolor':(0,0,0), 'size':8, 'pt':True, 'body':[(100,200,'id=0'),(180,250,'id=1')]}
layer = {'type':'layer', 'num':-1, 'clolor':(255,255,0), 'fcolor':(255,255,255), 'fill':False,
'body':[point, points, line, lines, polygon, polygons, circle, circles, ellipse, ellipses, rectangle, rectangles, text, texts]}
layers = {'type':'layers', 'num':-1, 'clolor':(255,255,0), 'fcolor':(255,255,255), 'fill':False,
'body':{1:points, 2:line, 3:layer}}
def plot(pts, dc, f, **key):
pen, brush = dc.GetPen(), dc.GetBrush()
width, color = pen.GetWidth(), pen.GetColour()
fcolor, style = brush.GetColour(), brush.GetStyle()
if 'color' in pts:
pen.SetColour(pts['color'])
if 'fcolor' in pts:
brush.SetColour(pts['fcolor'])
if 'lw' in pts:
pen.SetWidth(pts['lw'])
if 'fill' in pts:
brush.SetStyle((106,100)[pts['fill']])
dc.SetPen(pen)
dc.SetBrush(brush)
if pts['type'] == 'point':
pen.SetWidth(1)
brush.SetStyle(100)
brush.SetColour(pen.GetColour())
dc.SetPen(pen)
dc.SetBrush(brush)
r = pts['r'] if 'r' in pts else 2
x, y = f(*pts['body'])
dc.DrawEllipse (x-r,y-r,r*2,r*2)
pen.SetWidth(pts['lw'] if 'lw' in pts else width)
brush.SetStyle((106,100)[pts['fill']] if 'fill' in pts else style)
brush.SetColour(pts['fc'] if 'fc' in pts else fcolor)
dc.SetPen(pen)
dc.SetBrush(brush)
elif pts['type'] in {'points','line','polygon'}:
lst, plst = [], []
r = pts['r'] if 'r' in pts else 2
for p in pts['body']:
x, y = f(*p)
lst.append((x-r,y-r,r*2,r*2))
plst.append((x,y))
isline = 'style' in pts and '-' in pts['style']
ispoint = 'style' in pts and 'o' in pts['style']
if pts['type'] == 'polygon':
dc.DrawPolygon(plst)
if isline or pts['type'] == 'line':
dc.DrawLines(plst)
if pts['type']=='points' or ispoint:
pen.SetWidth(1)
brush.SetStyle(100)
brush.SetColour(pen.GetColour())
dc.SetPen(pen)
dc.SetBrush(brush)
dc.DrawEllipseList(lst)
pen.SetWidth(pts['lw'] if 'lw' in pts else width)
brush.SetStyle((106,100)[pts['fill']] if 'fill' in pts else style)
brush.SetColour(pts['fc'] if 'fc' in pts else fcolor)
dc.SetPen(pen)
dc.SetBrush(brush)
elif pts['type'] in {'lines','polygons'}:
lst, plst = [], []
r = pts['r'] if 'r' in pts else 2
for i in pts['body']:
line = []
for p in i:
x, y = f(*p)
lst.append((x-r,y-r,r*2,r*2))
line.append((x,y))
plst.append(line)
isline = 'style' in pts and '-' in pts['style']
ispoint = 'style' in pts and 'o' in pts['style']
if pts['type'] == 'polygons':
dc.DrawPolygonList(plst)
if isline or pts['type'] == 'line':
for line in plst:
dc.DrawLines(line)
if pts['type']=='points' or ispoint:
pen.SetWidth(1)
brush.SetStyle(100)
brush.SetColour(pen.GetColour())
dc.SetPen(pen)
dc.SetBrush(brush)
dc.DrawEllipseList(lst)
pen.SetWidth(pts['lw'] if 'lw' in pts else width)
brush.SetStyle((106,100)[pts['fill']] if 'fill' in pts else style)
brush.SetColour(pts['fc'] if 'fc' in pts else fcolor)
dc.SetPen(pen)
dc.SetBrush(brush)
pen.SetWidth(width)
pen.SetColour(color)
brush.SetColour(fcolor)
brush.SetStyle(style)
dc.SetPen(pen)
dc.SetBrush(brush)
def draw_circle(pts, dc, f, **key):
pen, brush = dc.GetPen(), dc.GetBrush()
width, color = pen.GetWidth(), pen.GetColour()
fcolor, style = brush.GetColour(), brush.GetStyle()
if 'color' in pts:
pen.SetColour(pts['color'])
if 'fcolor' in pts:
brush.SetColour(pts['fcolor'])
if 'lw' in pts:
pen.SetWidth(pts['lw'])
if 'fill' in pts:
brush.SetStyle((106,100)[pts['fill']])
dc.SetPen(pen)
dc.SetBrush(brush)
if pts['type'] == 'circle':
x, y ,r = pts['body']
x, y = f(x, y)
dc.DrawCircle(x, y, r*key['k'])
if pts['type'] == 'circles':
lst = []
for x, y ,r in pts['body']:
x, y = f(x, y)
r *= key['k']
lst.append((x-r,y-r,r*2,r*2))
dc.DrawEllipseList(lst)
pen.SetWidth(width)
pen.SetColour(color)
brush.SetColour(fcolor)
brush.SetStyle(style)
dc.SetPen(pen)
dc.SetBrush(brush)
def make_ellipse(l1, l2, ang):
m = np.array([[l1*cos(-ang),-l2*sin(-ang)],
[l1*sin(-ang),l2*cos(-ang)]])
a = np.linspace(0, np.pi*2, 36)
xys = np.array((np.cos(a), np.sin(a)))
return np.dot(m, xys).T
def draw_ellipse(pts, dc, f, **key):
pen, brush = dc.GetPen(), dc.GetBrush()
width, color = pen.GetWidth(), pen.GetColour()
fcolor, style = brush.GetColour(), brush.GetStyle()
if 'color' in pts:
pen.SetColour(pts['color'])
if 'fcolor' in pts:
brush.SetColour(pts['fcolor'])
if 'lw' in pts:
pen.SetWidth(pts['lw'])
if 'fill' in pts:
brush.SetStyle((106,100)[pts['fill']])
dc.SetPen(pen)
dc.SetBrush(brush)
if pts['type'] == 'ellipse':
x, y ,l1, l2, a = pts['body']
elp = make_ellipse(l1,l2,a)
elp = elp*key['k']+f(x,y)
dc.DrawPolygon(elp)
if pts['type'] == 'ellipses':
lst = []
for x, y, l1, l2, a in pts['body']:
elp = make_ellipse(l1,l2,a)
lst.append(elp*key['k']+f(x,y))
dc.DrawPolygonList(lst)
pen.SetWidth(width)
pen.SetColour(color)
brush.SetColour(fcolor)
brush.SetStyle(style)
dc.SetPen(pen)
dc.SetBrush(brush)
def draw_rectangle(pts, dc, f, **key):
pen, brush = dc.GetPen(), dc.GetBrush()
width, color = pen.GetWidth(), pen.GetColour()
fcolor, style = brush.GetColour(), brush.GetStyle()
if 'color' in pts:
pen.SetColour(pts['color'])
if 'fcolor' in pts:
brush.SetColour(pts['fcolor'])
if 'lw' in pts:
pen.SetWidth(pts['lw'])
if 'fill' in pts:
brush.SetStyle((106,100)[pts['fill']])
dc.SetPen(pen)
dc.SetBrush(brush)
if pts['type'] == 'rectangle':
x, y, w, h = pts['body']
x, y = f(x, y)
w, h = w*key['k'], h*key['k']
dc.DrawRectangle(x-w/2, y-h/2, w, h)
if pts['type'] == 'rectangles':
lst = []
for x, y, w, h in pts['body']:
x, y = f(x, y)
w, h = w*key['k'], h*key['k']
lst.append((x-w/2, y-h/2, w, h))
dc.DrawRectangleList(lst)
pen.SetWidth(width)
pen.SetColour(color)
brush.SetColour(fcolor)
brush.SetStyle(style)
dc.SetPen(pen)
dc.SetBrush(brush)
def draw_text(pts, dc, f, **key):
pen, brush, font = dc.GetPen(), dc.GetBrush(), dc.GetFont()
width, color = pen.GetWidth(), pen.GetColour()
fcolor, style = brush.GetColour(), brush.GetStyle()
size = font.GetPointSize()
tcolor = dc.GetTextForeground()
bcolor = dc.GetTextBackground()
if 'color' in pts:
pen.SetColour(pts['color'])
dc.SetTextForeground(pts['color'])
brush.SetColour(pen.GetColour())
brush.SetStyle(100)
if 'fcolor' in pts:
print('hahaha')
dc.SetTextBackground(pts['fcolor'])
if 'size' in pts:
font.SetPointSize(pts['size'])
dc.SetPen(pen)
dc.SetBrush(brush)
dc.SetFont(font)
if pts['type'] == 'text':
x, y, text = pts['body']
x, y = f(x, y)
dc.DrawText(text, x+3, y+3)
if not 'pt' in pts or pts['pt']:
dc.DrawEllipse(x-2,y-2,4,4)
if pts['type'] == 'texts':
tlst, clst, elst = [], [], []
for x, y, text in pts['body']:
x, y = f(x, y)
tlst.append(text)
clst.append((x+3, y+3))
elst.append((x-2, y-2, 4, 4))
dc.DrawTextList(tlst, clst)
if not 'pt' in pts or pts['pt']:
dc.DrawEllipseList(elst)
font.SetPointSize(size)
pen.SetColour(color)
brush.SetColour(fcolor)
brush.SetStyle(style)
dc.SetPen(pen)
dc.SetBrush(brush)
dc.SetFont(font)
dc.SetTextForeground(tcolor)
dc.SetTextBackground(bcolor)
draw_dic = {'points':plot, 'point':plot, 'line':plot, 'polygon':plot, 'lines':plot, 'polygons':plot,
'circle':draw_circle, 'circles':draw_circle, 'ellipse':draw_ellipse, 'ellipses':draw_ellipse,
'rectangle':draw_rectangle, 'rectangles':draw_rectangle, 'text':draw_text, 'texts':draw_text}
def draw(obj, dc, f, **key): draw_dic[obj['type']](obj, dc, f, **key)
def draw_layer(pts, dc, f, **key):
pen, brush = dc.GetPen(), dc.GetBrush()
width, color = pen.GetWidth(), pen.GetColour()
fcolor, style = brush.GetColour(), brush.GetStyle()
if 'color' in pts:
pen.SetColour(pts['color'])
if 'fcolor' in pts:
brush.SetColour(pts['fcolor'])
if 'lw' in pts:
pen.SetWidth(pts['lw'])
if 'fill' in pts:
brush.SetStyle((106,100)[pts['fill']])
dc.SetPen(pen)
dc.SetBrush(brush)
for i in pts['body']:draw(i, dc, f, **key)
pen.SetWidth(width)
pen.SetColour(color)
brush.SetColour(fcolor)
brush.SetStyle(style)
dc.SetPen(pen)
dc.SetBrush(brush)
draw_dic['layer'] = draw_layer
def draw_layers(pts, dc, f, **key):
pen, brush = dc.GetPen(), dc.GetBrush()
width, color = pen.GetWidth(), pen.GetColour()
fcolor, style = brush.GetColour(), brush.GetStyle()
if 'color' in pts:
pen.SetColour(pts['color'])
if 'fcolor' in pts:
brush.SetColour(pts['fcolor'])
if 'lw' in pts:
pen.SetWidth(pts['lw'])
if 'fill' in pts:
brush.SetStyle((106,100)[pts['fill']])
dc.SetPen(pen)
dc.SetBrush(brush)
print(pts['body'].keys())
if key['cur'] in pts['body']:
draw(pts['body'][key['cur']], dc, f, **key)
pen.SetWidth(width)
pen.SetColour(color)
brush.SetColour(fcolor)
brush.SetStyle(style)
dc.SetPen(pen)
dc.SetBrush(brush)
draw_dic['layers'] = draw_layers
class GeometryMark:
def __init__(self, body):
self.body = body
def draw(self, dc, f, **key):
pen, brush, font = dc.GetPen(), dc.GetBrush(), dc.GetFont()
pen.SetColour(ConfigManager.get('mark_color') or (255,255,0))
brush.SetColour(ConfigManager.get('mark_fcolor') or (255,255,255))
brush.SetStyle((106,100)[ConfigManager.get('mark_fill') or False])
pen.SetWidth(ConfigManager.get('mark_lw') or 1)
dc.SetTextForeground(ConfigManager.get('mark_tcolor') or (255,0,0))
font.SetPointSize(ConfigManager.get('mark_tsize') or 8)
dc.SetPen(pen); dc.SetBrush(brush); dc.SetFont(font);
draw(self.body, dc, f, **key)
if __name__ == '__main__':
print(make_ellipse(0,0,2,1,0)) | [
"math.cos",
"numpy.dot",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"math.sin"
] | [((5759, 5788), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi * 2)', '(36)'], {}), '(0, np.pi * 2, 36)\n', (5770, 5788), True, 'import numpy as np\n'), ((5835, 5849), 'numpy.dot', 'np.dot', (['m', 'xys'], {}), '(m, xys)\n', (5841, 5849), True, 'import numpy as np\n'), ((5804, 5813), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (5810, 5813), True, 'import numpy as np\n'), ((5815, 5824), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (5821, 5824), True, 'import numpy as np\n'), ((5693, 5702), 'math.cos', 'cos', (['(-ang)'], {}), '(-ang)\n', (5696, 5702), False, 'from math import sin, cos\n'), ((5707, 5716), 'math.sin', 'sin', (['(-ang)'], {}), '(-ang)\n', (5710, 5716), False, 'from math import sin, cos\n'), ((5728, 5737), 'math.sin', 'sin', (['(-ang)'], {}), '(-ang)\n', (5731, 5737), False, 'from math import sin, cos\n'), ((5741, 5750), 'math.cos', 'cos', (['(-ang)'], {}), '(-ang)\n', (5744, 5750), False, 'from math import sin, cos\n')] |
import xarray as xr
import pandas as pd
from xgcm import Grid
import numpy as np
import matplotlib.pyplot as plt
from dask.diagnostics import ProgressBar
import os
import bsose.preprocess as pp
ds,xgrid = pp.load_bsose()
# Define time metric
# HACK: trouble with time difference metric, so here just setting up own array with 5-days in seconds
dt = xr.DataArray(432000*np.ones(shape=(438)),dims='time')
# Reference density
rho0 = 1035.0
# Define some masks
# Mask to set surface velocity point to zero
tmp = np.ones(len(ds['Zl']))
tmp[0]=0
maskZl = xr.DataArray(tmp,dims=['Zl'],coords={'Zl':ds['Zl']})
# Mask to set surface tracer point to one (everything else to zero)
tmp = np.ones(len(ds['Z']))
tmp[1:]=0
maskZ = xr.DataArray(tmp,dims=['Z'],coords={'Z':ds['Z']})
budget = xr.Dataset()
# Thicknesses
h = ds['drC']+ds['ETAN']*maskZ
eta = ds['ETAN']*maskZ
# TERMS
# Tendency
h_snaps = ds['ETAN_snaps']*maskZ + ds['drC']
hPhi = ds['TRAC01_snaps']*h_snaps
budget['TEND'] = xgrid.diff(hPhi,'T')/dt
# Advection
ADVc = -(xgrid.diff(ds['ADVxTr01'],'X')+
xgrid.diff(ds['ADVyTr01'],'Y',boundary='extend')+
(-1*xgrid.diff(ds['ADVrTr01'],'Z',boundary='extend')))/ds['vC']
budget['ADV'] = h*ADVc
# Diffusion
DIFFc = -(xgrid.diff(ds['DFxETr01'],'X')+
xgrid.diff(ds['DFyETr01'],'Y',boundary='extend')+
(-1*xgrid.diff(ds['DFrITr01'],'Z',boundary='extend')))/ds['vC']
budget['DIFF'] = h*DIFFc
# Air-sea flux
SURFc = maskZ*(ds['BLGCFLX']/ds['drC'])
budget['SURF'] = h*SURFc
# Biology
BIOc = ds['BLGBIOC']
budget['BIO'] = h*BIOc
# Correction and Forcing
CORRc = maskZ*ds['WTRAC01']/ds['drC']
budget['CORR'] = eta*CORRc
FORCc = ds['ForcTr01']
budget['FORC'] = eta*FORCc
# Pressure solver correction
epsilon = ds['oceFWflx']/rho0 + ds['WVEL'].isel(Zl=0) - xgrid.diff(ds['ETAN_snaps'],'T')/dt
budget['EPS'] = maskZ*epsilon*ds['TRAC01']
# Signs in closed budget
signs = {'TEND':-1,'ADV':1,'DIFF':1,'SURF':1,'BIO':1,'CORR':-1,'FORC':1,'EPS':-1}
# Residual
budget['RES'] = (signs['TEND']*budget['TEND']
+ signs['ADV']*budget['ADV'] + signs['DIFF']*budget['DIFF']
+ signs['SURF']*budget['SURF'] + signs['BIO']*budget['BIO']
+ signs['CORR']*budget['CORR'] + signs['FORC']*budget['FORC']
+ signs['EPS']*budget['EPS'])
# Transpose variables to be the same orientation
budget = budget.transpose('time', 'Z', 'YC', 'XC')
budget = budget.chunk({'time':1,'Z':52,'YC':588,'XC':int(2160/4)})
# Save
savedir = '/local/projects/bSOSE_carbon/budget-DIC/netcdf/'
for time in budget['time']:
timepd = pd.to_datetime(time.values)
outfile = 'bsose_i133_2013to2018_5day_'+str(timepd.date())+'_budget-DIC'
path = savedir+outfile+'.nc'
if os.path.isfile(path):
if os.stat(path).st_size==5029554312:
print('Already saved : '+outfile)
else:
print('Deleting partial file : '+outfile)
os.system("rm ' + path")
print('Saving : '+outfile)
select = {'time':time}
dsnow = budget.sel(select).expand_dims(dim='time')
with ProgressBar():
dsnow.to_netcdf(savedir+outfile+'.nc')
dsnow.close()
else:
print('Saving : '+outfile)
select = {'time':time}
dsnow = budget.sel(select).expand_dims(dim='time')
with ProgressBar():
dsnow.to_netcdf(savedir+outfile+'.nc')
dsnow.close() | [
"numpy.ones",
"os.stat",
"xarray.Dataset",
"os.path.isfile",
"bsose.preprocess.load_bsose",
"xarray.DataArray",
"dask.diagnostics.ProgressBar",
"os.system",
"pandas.to_datetime"
] | [((207, 222), 'bsose.preprocess.load_bsose', 'pp.load_bsose', ([], {}), '()\n', (220, 222), True, 'import bsose.preprocess as pp\n'), ((554, 609), 'xarray.DataArray', 'xr.DataArray', (['tmp'], {'dims': "['Zl']", 'coords': "{'Zl': ds['Zl']}"}), "(tmp, dims=['Zl'], coords={'Zl': ds['Zl']})\n", (566, 609), True, 'import xarray as xr\n'), ((722, 774), 'xarray.DataArray', 'xr.DataArray', (['tmp'], {'dims': "['Z']", 'coords': "{'Z': ds['Z']}"}), "(tmp, dims=['Z'], coords={'Z': ds['Z']})\n", (734, 774), True, 'import xarray as xr\n'), ((782, 794), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (792, 794), True, 'import xarray as xr\n'), ((2587, 2614), 'pandas.to_datetime', 'pd.to_datetime', (['time.values'], {}), '(time.values)\n', (2601, 2614), True, 'import pandas as pd\n'), ((2732, 2752), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (2746, 2752), False, 'import os\n'), ((373, 391), 'numpy.ones', 'np.ones', ([], {'shape': '(438)'}), '(shape=438)\n', (380, 391), True, 'import numpy as np\n'), ((2926, 2950), 'os.system', 'os.system', (['"""rm \' + path"""'], {}), '("rm \' + path")\n', (2935, 2950), False, 'import os\n'), ((3349, 3362), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (3360, 3362), False, 'from dask.diagnostics import ProgressBar\n'), ((2765, 2778), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (2772, 2778), False, 'import os\n'), ((3105, 3118), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (3116, 3118), False, 'from dask.diagnostics import ProgressBar\n')] |
import numpy as np
from skimage.morphology import skeletonize
from skan import Skeleton, summarize
import networkx as nx
import toolz as tz
def branch_classification(thres):
"""Predict the extent of branching.
Parameters
----------
thres: array
thresholded image to be analysed
scale: the scale bar size in pixels/metre
Returns
-------
skel: array
skeletonised image
is_main: array
whether the hydride identified is part of the main section or if it is a branch
BLF: int/float
branch length fraction
"""
skeleton = skeletonize(thres)
skel = Skeleton(skeleton, source_image=thres)
summary = summarize(skel)
is_main = np.zeros(summary.shape[0])
us = summary['node-id-src']
vs = summary['node-id-dst']
ws = summary['branch-distance']
edge2idx = {
(u, v): i
for i, (u, v) in enumerate(zip(us, vs))
}
edge2idx.update({
(v, u): i
for i, (u, v) in enumerate(zip(us, vs))
})
g = nx.Graph()
g.add_weighted_edges_from(
zip(us, vs, ws)
)
for conn in nx.connected_components(g):
curr_val = 0
curr_pair = None
h = g.subgraph(conn)
p = dict(nx.all_pairs_dijkstra_path_length(h))
for src in p:
for dst in p[src]:
val = p[src][dst]
if (val is not None
and np.isfinite(val)
and val > curr_val):
curr_val = val
curr_pair = (src, dst)
for i, j in tz.sliding_window(
2,
nx.shortest_path(
h, source=curr_pair[0], target=curr_pair[1], weight='weight'
)
):
is_main[edge2idx[(i, j)]] = 1
summary['main'] = is_main
# Branch Length Fraction
total_length = np.sum(skeleton)
trunk_length = 0
for i in range(summary.shape[0]):
if summary['main'][i]:
trunk_length += summary['branch-distance'][i]
branch_length = total_length - trunk_length
BLF = branch_length/total_length
return skel, is_main, BLF
| [
"skan.summarize",
"networkx.Graph",
"networkx.connected_components",
"numpy.sum",
"numpy.zeros",
"skan.Skeleton",
"networkx.shortest_path",
"numpy.isfinite",
"networkx.all_pairs_dijkstra_path_length",
"skimage.morphology.skeletonize"
] | [((599, 617), 'skimage.morphology.skeletonize', 'skeletonize', (['thres'], {}), '(thres)\n', (610, 617), False, 'from skimage.morphology import skeletonize\n'), ((629, 667), 'skan.Skeleton', 'Skeleton', (['skeleton'], {'source_image': 'thres'}), '(skeleton, source_image=thres)\n', (637, 667), False, 'from skan import Skeleton, summarize\n'), ((682, 697), 'skan.summarize', 'summarize', (['skel'], {}), '(skel)\n', (691, 697), False, 'from skan import Skeleton, summarize\n'), ((713, 739), 'numpy.zeros', 'np.zeros', (['summary.shape[0]'], {}), '(summary.shape[0])\n', (721, 739), True, 'import numpy as np\n'), ((1036, 1046), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1044, 1046), True, 'import networkx as nx\n'), ((1126, 1152), 'networkx.connected_components', 'nx.connected_components', (['g'], {}), '(g)\n', (1149, 1152), True, 'import networkx as nx\n'), ((1884, 1900), 'numpy.sum', 'np.sum', (['skeleton'], {}), '(skeleton)\n', (1890, 1900), True, 'import numpy as np\n'), ((1246, 1282), 'networkx.all_pairs_dijkstra_path_length', 'nx.all_pairs_dijkstra_path_length', (['h'], {}), '(h)\n', (1279, 1282), True, 'import networkx as nx\n'), ((1641, 1719), 'networkx.shortest_path', 'nx.shortest_path', (['h'], {'source': 'curr_pair[0]', 'target': 'curr_pair[1]', 'weight': '"""weight"""'}), "(h, source=curr_pair[0], target=curr_pair[1], weight='weight')\n", (1657, 1719), True, 'import networkx as nx\n'), ((1435, 1451), 'numpy.isfinite', 'np.isfinite', (['val'], {}), '(val)\n', (1446, 1451), True, 'import numpy as np\n')] |
import random as rn
import multiprocessing
import platform
import sys
import os
from pathlib import Path
import numpy as np
import pytest
if platform.system() != 'Windows':
if sys.version_info[1] >= 8:
try:
#multiprocessing.get_start_method() != 'fork'
multiprocessing.set_start_method("fork")
except: #pylint:disable=bare-except # noqa: E722
pass
path = str(Path(os.getcwd()))
sys.path.insert(0, path)
from quanguru.QuantumToolbox import states#pylint: disable=import-error,wrong-import-position
from quanguru.QuantumToolbox import operators as ops #pylint: disable=import-error,wrong-import-position
class Helpers:
# used for the helper function fixture, put any helper function for testing as a static method in here and use
# helpers fixture (below) in the testing
@staticmethod
def generateRndDimAndExc(minval, dim=None):
# generates a random integer btw 2 to 20 to be used as dimension, and another integer btw 0 to dim-1 to be used
# either as the excitation or number of components in a super-position state
if dim is None:
dim = rn.randint(2, 20)
return dim, rn.randint(minval, dim-1)
@staticmethod
def generateRndStateParams(dim=None):
# using a randomly generated dimension and number of components, create a dictionary of random excitation
# positions and correponding (random populations) as a key:value combination. it is already normalised
dim, ncom = Helpers.generateRndDimAndExc(0, dim)
comps = list(dict.fromkeys([rn.randint(0, dim-1) for k in range(ncom+1)]))
pops = np.random.dirichlet(np.ones(len(comps)), size=1)[0]
excs = dict(zip(comps, pops))
return dim, excs
@staticmethod
def generateRndPureState(po=1, dim=None):
# generate a random ket state.
dim, excs = Helpers.generateRndStateParams(dim)
state = sum([(np.sqrt(v)**po)*states.basis(dim, k) for k, v in excs.items()])
return state, dim, excs
@pytest.fixture
def helpers():
# helpers fixture to access above helper functions in the testing
return Helpers
@pytest.fixture
def referenceValues():
# a fixture returning dictionary storing some reference values, such as some special operators, constants, etc
return {
'sigmaMinusReference': np.array([[0, 0], [1, 0]]), 'sigmaPlusReference': np.array([[0, 1], [0, 0]]),
'sigmaXReference': np.array([[0, 1], [1, 0]]), 'sigmaYReference': np.array([[0, -1j], [1j, 0]]),
'sigmaZReference': np.array([[1, 0], [0, -1]])
}
qubitStates = {
'0': np.array([[0], [1]]), '1': np.array([[1], [0]]),
'x+': (1/np.sqrt(2))*np.array([[1], [1]]), 'x-': (1/np.sqrt(2))*np.array([[1], [-1]]),
'y+': (1/np.sqrt(2))*np.array([[1], [1j]]), 'y-': (1/np.sqrt(2))*np.array([[1], [-1j]]),
'BellPhi+': (1/np.sqrt(2))*np.array([[1], [0], [0], [1]]),
'BellPhi-': (1/np.sqrt(2))*np.array([[1], [0], [0], [-1]]),
'BellPsi+': (1/np.sqrt(2))*np.array([[0], [1], [1], [0]]),
'BellPsi-': (1/np.sqrt(2))*np.array([[0], [1], [-1], [0]]),
'product1': np.array([[1], [0], [0], [0]]),
'product2': np.array([[0], [1], [0], [0]]),
'product3': np.array([[0], [0], [1], [0]]),
'product4': np.array([[0], [0], [0], [1]]),
}
@pytest.fixture
def specialQubitStates():
# a fixture returning a dictionary of some special qubit states
return {**qubitStates, **{k+'dm':states.densityMatrix(v) for k, v in qubitStates.items()}}
@pytest.fixture
def singleQubitOperators():
# a fixture returning qubit operators generated by our functions
return {
'sz':ops.sigmaz(), 'sy':ops.sigmay(), 'sx':ops.sigmax(), 'sp':ops.sigmap(), 'sm':ops.sigmam()
}
| [
"quanguru.QuantumToolbox.states.basis",
"quanguru.QuantumToolbox.operators.sigmaz",
"sys.path.insert",
"numpy.sqrt",
"quanguru.QuantumToolbox.states.densityMatrix",
"os.getcwd",
"numpy.array",
"platform.system",
"quanguru.QuantumToolbox.operators.sigmam",
"quanguru.QuantumToolbox.operators.sigmax"... | [((436, 460), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path'], {}), '(0, path)\n', (451, 460), False, 'import sys\n'), ((142, 159), 'platform.system', 'platform.system', ([], {}), '()\n', (157, 159), False, 'import platform\n'), ((2639, 2659), 'numpy.array', 'np.array', (['[[0], [1]]'], {}), '([[0], [1]])\n', (2647, 2659), True, 'import numpy as np\n'), ((2666, 2686), 'numpy.array', 'np.array', (['[[1], [0]]'], {}), '([[1], [0]])\n', (2674, 2686), True, 'import numpy as np\n'), ((3170, 3200), 'numpy.array', 'np.array', (['[[1], [0], [0], [0]]'], {}), '([[1], [0], [0], [0]])\n', (3178, 3200), True, 'import numpy as np\n'), ((3222, 3252), 'numpy.array', 'np.array', (['[[0], [1], [0], [0]]'], {}), '([[0], [1], [0], [0]])\n', (3230, 3252), True, 'import numpy as np\n'), ((3274, 3304), 'numpy.array', 'np.array', (['[[0], [0], [1], [0]]'], {}), '([[0], [0], [1], [0]])\n', (3282, 3304), True, 'import numpy as np\n'), ((3326, 3356), 'numpy.array', 'np.array', (['[[0], [0], [0], [1]]'], {}), '([[0], [0], [0], [1]])\n', (3334, 3356), True, 'import numpy as np\n'), ((422, 433), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (431, 433), False, 'import os\n'), ((2365, 2391), 'numpy.array', 'np.array', (['[[0, 0], [1, 0]]'], {}), '([[0, 0], [1, 0]])\n', (2373, 2391), True, 'import numpy as np\n'), ((2415, 2441), 'numpy.array', 'np.array', (['[[0, 1], [0, 0]]'], {}), '([[0, 1], [0, 0]])\n', (2423, 2441), True, 'import numpy as np\n'), ((2470, 2496), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (2478, 2496), True, 'import numpy as np\n'), ((2517, 2550), 'numpy.array', 'np.array', (['[[0, -1.0j], [1.0j, 0]]'], {}), '([[0, -1.0j], [1.0j, 0]])\n', (2525, 2550), True, 'import numpy as np\n'), ((2575, 2602), 'numpy.array', 'np.array', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (2583, 2602), True, 'import numpy as np\n'), ((2717, 2737), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (2725, 2737), True, 'import numpy as np\n'), ((2760, 2781), 'numpy.array', 'np.array', (['[[1], [-1]]'], {}), '([[1], [-1]])\n', (2768, 2781), True, 'import numpy as np\n'), ((2812, 2835), 'numpy.array', 'np.array', (['[[1], [1.0j]]'], {}), '([[1], [1.0j]])\n', (2820, 2835), True, 'import numpy as np\n'), ((2856, 2880), 'numpy.array', 'np.array', (['[[1], [-1.0j]]'], {}), '([[1], [-1.0j]])\n', (2864, 2880), True, 'import numpy as np\n'), ((2915, 2945), 'numpy.array', 'np.array', (['[[1], [0], [0], [1]]'], {}), '([[1], [0], [0], [1]])\n', (2923, 2945), True, 'import numpy as np\n'), ((2982, 3013), 'numpy.array', 'np.array', (['[[1], [0], [0], [-1]]'], {}), '([[1], [0], [0], [-1]])\n', (2990, 3013), True, 'import numpy as np\n'), ((3050, 3080), 'numpy.array', 'np.array', (['[[0], [1], [1], [0]]'], {}), '([[0], [1], [1], [0]])\n', (3058, 3080), True, 'import numpy as np\n'), ((3117, 3148), 'numpy.array', 'np.array', (['[[0], [1], [-1], [0]]'], {}), '([[0], [1], [-1], [0]])\n', (3125, 3148), True, 'import numpy as np\n'), ((3710, 3722), 'quanguru.QuantumToolbox.operators.sigmaz', 'ops.sigmaz', ([], {}), '()\n', (3720, 3722), True, 'from quanguru.QuantumToolbox import operators as ops\n'), ((3729, 3741), 'quanguru.QuantumToolbox.operators.sigmay', 'ops.sigmay', ([], {}), '()\n', (3739, 3741), True, 'from quanguru.QuantumToolbox import operators as ops\n'), ((3748, 3760), 'quanguru.QuantumToolbox.operators.sigmax', 'ops.sigmax', ([], {}), '()\n', (3758, 3760), True, 'from quanguru.QuantumToolbox import operators as ops\n'), ((3767, 3779), 'quanguru.QuantumToolbox.operators.sigmap', 'ops.sigmap', ([], {}), '()\n', (3777, 3779), True, 'from quanguru.QuantumToolbox import operators as ops\n'), ((3786, 3798), 'quanguru.QuantumToolbox.operators.sigmam', 'ops.sigmam', ([], {}), '()\n', (3796, 3798), True, 'from quanguru.QuantumToolbox import operators as ops\n'), ((290, 330), 'multiprocessing.set_start_method', 'multiprocessing.set_start_method', (['"""fork"""'], {}), "('fork')\n", (322, 330), False, 'import multiprocessing\n'), ((1149, 1166), 'random.randint', 'rn.randint', (['(2)', '(20)'], {}), '(2, 20)\n', (1159, 1166), True, 'import random as rn\n'), ((1187, 1214), 'random.randint', 'rn.randint', (['minval', '(dim - 1)'], {}), '(minval, dim - 1)\n', (1197, 1214), True, 'import random as rn\n'), ((2705, 2715), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2712, 2715), True, 'import numpy as np\n'), ((2748, 2758), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2755, 2758), True, 'import numpy as np\n'), ((2800, 2810), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2807, 2810), True, 'import numpy as np\n'), ((2844, 2854), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2851, 2854), True, 'import numpy as np\n'), ((2903, 2913), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2910, 2913), True, 'import numpy as np\n'), ((2970, 2980), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2977, 2980), True, 'import numpy as np\n'), ((3038, 3048), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3045, 3048), True, 'import numpy as np\n'), ((3105, 3115), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3112, 3115), True, 'import numpy as np\n'), ((3512, 3535), 'quanguru.QuantumToolbox.states.densityMatrix', 'states.densityMatrix', (['v'], {}), '(v)\n', (3532, 3535), False, 'from quanguru.QuantumToolbox import states\n'), ((1591, 1613), 'random.randint', 'rn.randint', (['(0)', '(dim - 1)'], {}), '(0, dim - 1)\n', (1601, 1613), True, 'import random as rn\n'), ((1965, 1985), 'quanguru.QuantumToolbox.states.basis', 'states.basis', (['dim', 'k'], {}), '(dim, k)\n', (1977, 1985), False, 'from quanguru.QuantumToolbox import states\n'), ((1949, 1959), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (1956, 1959), True, 'import numpy as np\n')] |
import gym
from gym.spaces import Discrete, Box
import numpy as np
class MemoryGame(gym.Env):
'''An instance of the memory game with noisy observations'''
def __init__(self, config={}):
self._length = config.get("length", 5)
self._num_cues =config.get("num_cues", 2)
self._noise = config.get("noise", 0.1)
self._image = config.get("image", False)
if self._image:
self._image_size = config.get("image_size", 100)
self.observation_space = Box(0, 2, shape=(1, self._image_size, self._image_size))
else:
self.observation_space = Box(0, 2, shape=(self._num_cues + 2,))
self.action_space = Discrete(self._num_cues)
self._current_step = 0
self._current_cue = 0
def _vector_obs(self):
obs = np.random.uniform(0, self._noise, self.observation_space.shape)
if 0 == self._current_step:
obs[-2] += 1
obs[self._current_cue] += 1
elif self._length == self._current_step:
obs[-1] += 1
return obs
def _image_obs(self):
obs = np.random.uniform(0, self._noise, self.observation_space.shape)
if 0 == self._current_step:
slope = self._current_cue * (2.0 / (self._num_cues - 1)) - 1.0
offset = self._image_size // 2
for x in range(self._image_size):
y = int((x - offset) * slope)
y = max(0, min(self._image_size - 1, y + offset))
obs[0, x, y] += 1.0
return obs
def _obs(self):
if self._image:
return self._image_obs()
else:
return self._vector_obs()
def reset(self):
self._current_step = 0
self._current_cue = np.random.randint(self._num_cues)
return self._obs()
def step(self, action):
if self._current_step < self._length:
self._current_step += 1
return self._obs(), 0, False, {}
else:
reward = (1 if action == self._current_cue else 0)
return self._obs(), reward, True, {}
if __name__ == "__main__":
env_config = {
"length": 40,
"num_cues": 2,
"noise": 0.1,
"image": True,
"image_size": 32,
}
env = MemoryGame(env_config)
obs = env.reset()
print(f"\nImage Observation ({obs.shape}):\n")
for y in range(obs.shape[2]):
row = []
for x in range(obs.shape[1]):
if obs[0][x][y] > 0.5:
row.append("#")
else:
row.append(".")
print(" ".join(row))
print("\n") | [
"numpy.random.randint",
"gym.spaces.Box",
"gym.spaces.Discrete",
"numpy.random.uniform"
] | [((696, 720), 'gym.spaces.Discrete', 'Discrete', (['self._num_cues'], {}), '(self._num_cues)\n', (704, 720), False, 'from gym.spaces import Discrete, Box\n'), ((825, 888), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self._noise', 'self.observation_space.shape'], {}), '(0, self._noise, self.observation_space.shape)\n', (842, 888), True, 'import numpy as np\n'), ((1124, 1187), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self._noise', 'self.observation_space.shape'], {}), '(0, self._noise, self.observation_space.shape)\n', (1141, 1187), True, 'import numpy as np\n'), ((1781, 1814), 'numpy.random.randint', 'np.random.randint', (['self._num_cues'], {}), '(self._num_cues)\n', (1798, 1814), True, 'import numpy as np\n'), ((512, 568), 'gym.spaces.Box', 'Box', (['(0)', '(2)'], {'shape': '(1, self._image_size, self._image_size)'}), '(0, 2, shape=(1, self._image_size, self._image_size))\n', (515, 568), False, 'from gym.spaces import Discrete, Box\n'), ((620, 658), 'gym.spaces.Box', 'Box', (['(0)', '(2)'], {'shape': '(self._num_cues + 2,)'}), '(0, 2, shape=(self._num_cues + 2,))\n', (623, 658), False, 'from gym.spaces import Discrete, Box\n')] |
from utils import copy_vocab
import run_setting
from train import train_lm, train_cls, get_model_name
from nltk import ToktokTokenizer as ToktokTokenizer_
from fastai.text.data import NumericalizeProcessor, TextList, ItemLists, TokenizeProcessor
from fastai.basic_data import DatasetType
from fastai.text.transform import Tokenizer, BaseTokenizer
from sklearn.metrics import f1_score, accuracy_score
import numpy as np
import pandas as pd
import torch
from pathlib import Path
from collections import defaultdict
import argparse
data_folder = Path('../data/text_cls')
class ToktokTokenizer(BaseTokenizer):
"Basic class for a tokenizer function."
def __init__(self, lang: str):
self.lang = lang
self.base = ToktokTokenizer_()
def tokenizer(self, t: str):
return self.base.tokenize(t)
def get_tokenizer(lang):
if lang == 'th':
from pythainlp.ulmfit import ThaiTokenizer, pre_rules_th, post_rules_th
return Tokenizer(tok_func=ThaiTokenizer, lang='th', pre_rules=pre_rules_th, post_rules=post_rules_th)
else:
return Tokenizer(tok_func=ToktokTokenizer, lang=lang)
def get_tokenizer_preprocesser(args):
tokenizer = get_tokenizer(args.target_lang)
return TokenizeProcessor(tokenizer=tokenizer, chunksize=10000, mark_fields=False)
def eval_multiclass(preds, y_true):
preds = np.argmax(preds, axis=-1)
f1_macro = f1_score(y_true, preds, average='macro')
acc = accuracy_score(y_true, preds)
scores = dict(F1_macro=f1_macro, Acc=acc)
return scores
def eval_multilabel(pred_probs, y_true):
preds = (pred_probs > 0.5).astype(int)
f1_macro = f1_score(y_true, preds, average='macro')
acc = accuracy_score(y_true, preds)
scores = dict(F1_macro=f1_macro, Acc=acc)
return scores
def print_scores(scores):
str_out = ', '.join(
f'{metric}: {value:.4f}' for metric, value in scores.items())
print(str_out)
def print_scores_summary(scores):
str_out = ', '.join(
f'{metric}: {np.mean(values):.4f}' for metric, values in scores.items())
print('Mean:', str_out)
str_out = ', '.join(
f'{metric}: {np.std(values):.4f}' for metric, values in scores.items())
print('Std :', str_out)
FALSY_STRINGS = {'off', 'false', '0'}
TRUTHY_STRINGS = {'on', 'true', '1'}
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("Invalid value for a boolean flag!")
def none_or_str(s):
if s is None or s.lower() == 'none':
return None
return s
def none_or_int(s):
if s is None or s.lower() == 'none':
return None
return int(s)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('run', type=str)
parser.add_argument('--name', type=str, required=True)
parser.add_argument('--runs', type=int, default=5)
# DHG arguments
parser.add_argument('--langs', type=str, default='en')
parser.add_argument('--dict', type=str, default='word2word')
parser.add_argument('--directed', type=bool_flag, default=True)
parser.add_argument('--w2v', type=none_or_str, default='fasttext')
parser.add_argument('--reverse', type=bool_flag, default=False)
parser.add_argument('--add_from_dict', type=int, default=0)
parser.add_argument('--save_temp', type=bool_flag, default=True)
parser.add_argument('--use_temp_only', type=bool_flag, default=False)
# DHGNet arguments
parser.add_argument('--conv', type=str, default='hgnn')
parser.add_argument('--gnn_layers', type=int, default=2)
parser.add_argument('--layer_norm', type=bool_flag, default=True)
parser.add_argument('--residual', type=str, default=True)
parser.add_argument('--freeze_cross', type=bool_flag, default=True)
# Classifier->AWDLSTM arguments
parser.add_argument('--emb_sz', type=int, default=300)
parser.add_argument('--rnn_layers', type=int, default=3)
parser.add_argument('--tie_weights', type=bool_flag, default=True)
# Training arguments
parser.add_argument('--pretrain_align_epochs', type=int, default=50)
parser.add_argument('--pretrain_lm_epochs', type=int, default=200)
parser.add_argument('--train_cls_epochs', type=int, default=20)
parser.add_argument('--load_lm_gnn', type=none_or_str, default=None)
args = parser.parse_args()
args = run_setting.update(args)
print(args)
assert args.pretrain_lm_epochs or args.train_cls_epochs
model_name = get_model_name(args)
folder = data_folder / f'{args.dataset}'
model_path = folder
train_df = pd.read_csv(folder / 'train_df.csv')
valid_df = pd.read_csv(folder / 'valid_df.csv')
test_df = pd.read_csv(folder / 'test_df.csv')
if args.label_col is None and args.multi_label:
args.label_col = list(train_df.columns[1:])
text_col = args.text_col
label_col = args.label_col
vocab = None
vocab_path = folder / 'models' / f'{model_name}_vocab.pkl'
vocab_path.parent.mkdir(exist_ok=True)
# prepare vocab from dataset
data_prep = TextList.from_df(train_df, model_path, cols=[text_col], processor=[
get_tokenizer_preprocesser(args), NumericalizeProcessor(vocab=vocab)])
data_prep = data_prep.process()
vocab = data_prep.vocab
args.num_lm_vocab = len(vocab.itos)
del data_prep
is_print_model = True
numericalizer = NumericalizeProcessor(vocab=copy_vocab(vocab))
def _train_lm():
nonlocal is_print_model, numericalizer
if numericalizer is None:
numericalizer = NumericalizeProcessor(vocab=copy_vocab(vocab))
processor = [get_tokenizer_preprocesser(args), numericalizer]
train_data = TextList.from_df(train_df, model_path, cols=[
text_col], processor=processor)
val_data = TextList.from_df(valid_df, model_path, cols=[
text_col], processor=processor)
data_lm = (ItemLists(model_path, train=train_data, valid=val_data)
.label_for_lm()
.databunch(bs=64)
)
train_lm(data_lm, args, print_model=is_print_model)
data_lm.vocab.save(vocab_path)
is_print_model = False
del data_lm
torch.cuda.empty_cache()
def _train_cls():
nonlocal is_print_model, numericalizer
if numericalizer is None:
numericalizer = NumericalizeProcessor(vocab=copy_vocab(vocab))
processor = [get_tokenizer_preprocesser(args), numericalizer]
train_data = TextList.from_df(train_df, model_path, cols=[
text_col], processor=processor)
val_data = TextList.from_df(valid_df, model_path, cols=[
text_col], processor=processor)
data_cls = (ItemLists(model_path, train=train_data, valid=val_data)
.label_from_df(label_col, label_cls=None)
.add_test(TextList.from_df(test_df, model_path, cols=[text_col], processor=processor))
.databunch(bs=32)
)
learn = train_cls(data_cls, args, print_model=is_print_model)
is_print_model = False
return learn
if args.pretrain_lm_epochs > 0:
_train_lm()
if args.train_cls_epochs <= 0:
return
valid_scores = defaultdict(list)
test_scores = defaultdict(list)
for i_run in range(args.runs):
print(f'===== RUN #{i_run} =====')
learn = _train_cls()
for _m in learn.model.modules():
if hasattr(_m, '_need_update'):
_m._need_update = True
y_val, y_test = np.array(
valid_df[label_col]), np.array(test_df[label_col])
if not args.multi_label:
y_val, y_test = np.vectorize(learn.data.c2i.get)(
y_val), np.vectorize(learn.data.c2i.get)(y_test)
probs, y = learn.get_preds(DatasetType.Valid, ordered=True)
preds = probs.numpy()
if not args.multi_label:
scores = eval_multiclass(preds, y_val)
else:
scores = eval_multilabel(preds, y_val)
print_scores(scores)
for metric, value in scores.items():
valid_scores[metric].append(value)
probs, y = learn.get_preds(DatasetType.Test, ordered=True)
preds = probs.numpy()
if not args.multi_label:
scores = eval_multiclass(preds, y_test)
else:
scores = eval_multilabel(preds, y_test)
print_scores(scores)
for metric, value in scores.items():
test_scores[metric].append(value)
del learn
torch.cuda.empty_cache()
print('===== END RUN =====')
print()
print('Valid')
print_scores_summary(valid_scores)
print('Test')
print_scores_summary(test_scores)
if __name__ == '__main__':
main()
| [
"run_setting.update",
"pandas.read_csv",
"nltk.ToktokTokenizer",
"numpy.array",
"fastai.text.data.TextList.from_df",
"train.train_cls",
"numpy.mean",
"argparse.ArgumentParser",
"pathlib.Path",
"train.train_lm",
"fastai.text.transform.Tokenizer",
"train.get_model_name",
"fastai.text.data.Toke... | [((559, 583), 'pathlib.Path', 'Path', (['"""../data/text_cls"""'], {}), "('../data/text_cls')\n", (563, 583), False, 'from pathlib import Path\n'), ((1272, 1346), 'fastai.text.data.TokenizeProcessor', 'TokenizeProcessor', ([], {'tokenizer': 'tokenizer', 'chunksize': '(10000)', 'mark_fields': '(False)'}), '(tokenizer=tokenizer, chunksize=10000, mark_fields=False)\n', (1289, 1346), False, 'from fastai.text.data import NumericalizeProcessor, TextList, ItemLists, TokenizeProcessor\n'), ((1401, 1426), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(-1)'}), '(preds, axis=-1)\n', (1410, 1426), True, 'import numpy as np\n'), ((1443, 1483), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'preds'], {'average': '"""macro"""'}), "(y_true, preds, average='macro')\n", (1451, 1483), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((1495, 1524), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'preds'], {}), '(y_true, preds)\n', (1509, 1524), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((1697, 1737), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'preds'], {'average': '"""macro"""'}), "(y_true, preds, average='macro')\n", (1705, 1737), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((1749, 1778), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'preds'], {}), '(y_true, preds)\n', (1763, 1778), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((2927, 2952), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2950, 2952), False, 'import argparse\n'), ((4632, 4656), 'run_setting.update', 'run_setting.update', (['args'], {}), '(args)\n', (4650, 4656), False, 'import run_setting\n'), ((4757, 4777), 'train.get_model_name', 'get_model_name', (['args'], {}), '(args)\n', (4771, 4777), False, 'from train import train_lm, train_cls, get_model_name\n'), ((4867, 4903), 'pandas.read_csv', 'pd.read_csv', (["(folder / 'train_df.csv')"], {}), "(folder / 'train_df.csv')\n", (4878, 4903), True, 'import pandas as pd\n'), ((4920, 4956), 'pandas.read_csv', 'pd.read_csv', (["(folder / 'valid_df.csv')"], {}), "(folder / 'valid_df.csv')\n", (4931, 4956), True, 'import pandas as pd\n'), ((4972, 5007), 'pandas.read_csv', 'pd.read_csv', (["(folder / 'test_df.csv')"], {}), "(folder / 'test_df.csv')\n", (4983, 5007), True, 'import pandas as pd\n'), ((7747, 7764), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7758, 7764), False, 'from collections import defaultdict\n'), ((7784, 7801), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7795, 7801), False, 'from collections import defaultdict\n'), ((757, 775), 'nltk.ToktokTokenizer', 'ToktokTokenizer_', ([], {}), '()\n', (773, 775), True, 'from nltk import ToktokTokenizer as ToktokTokenizer_\n'), ((999, 1097), 'fastai.text.transform.Tokenizer', 'Tokenizer', ([], {'tok_func': 'ThaiTokenizer', 'lang': '"""th"""', 'pre_rules': 'pre_rules_th', 'post_rules': 'post_rules_th'}), "(tok_func=ThaiTokenizer, lang='th', pre_rules=pre_rules_th,\n post_rules=post_rules_th)\n", (1008, 1097), False, 'from fastai.text.transform import Tokenizer, BaseTokenizer\n'), ((1121, 1167), 'fastai.text.transform.Tokenizer', 'Tokenizer', ([], {'tok_func': 'ToktokTokenizer', 'lang': 'lang'}), '(tok_func=ToktokTokenizer, lang=lang)\n', (1130, 1167), False, 'from fastai.text.transform import Tokenizer, BaseTokenizer\n'), ((6035, 6111), 'fastai.text.data.TextList.from_df', 'TextList.from_df', (['train_df', 'model_path'], {'cols': '[text_col]', 'processor': 'processor'}), '(train_df, model_path, cols=[text_col], processor=processor)\n', (6051, 6111), False, 'from fastai.text.data import NumericalizeProcessor, TextList, ItemLists, TokenizeProcessor\n'), ((6172, 6248), 'fastai.text.data.TextList.from_df', 'TextList.from_df', (['valid_df', 'model_path'], {'cols': '[text_col]', 'processor': 'processor'}), '(valid_df, model_path, cols=[text_col], processor=processor)\n', (6188, 6248), False, 'from fastai.text.data import NumericalizeProcessor, TextList, ItemLists, TokenizeProcessor\n'), ((6468, 6519), 'train.train_lm', 'train_lm', (['data_lm', 'args'], {'print_model': 'is_print_model'}), '(data_lm, args, print_model=is_print_model)\n', (6476, 6519), False, 'from train import train_lm, train_cls, get_model_name\n'), ((6622, 6646), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (6644, 6646), False, 'import torch\n'), ((6926, 7002), 'fastai.text.data.TextList.from_df', 'TextList.from_df', (['train_df', 'model_path'], {'cols': '[text_col]', 'processor': 'processor'}), '(train_df, model_path, cols=[text_col], processor=processor)\n', (6942, 7002), False, 'from fastai.text.data import NumericalizeProcessor, TextList, ItemLists, TokenizeProcessor\n'), ((7063, 7139), 'fastai.text.data.TextList.from_df', 'TextList.from_df', (['valid_df', 'model_path'], {'cols': '[text_col]', 'processor': 'processor'}), '(valid_df, model_path, cols=[text_col], processor=processor)\n', (7079, 7139), False, 'from fastai.text.data import NumericalizeProcessor, TextList, ItemLists, TokenizeProcessor\n'), ((7505, 7558), 'train.train_cls', 'train_cls', (['data_cls', 'args'], {'print_model': 'is_print_model'}), '(data_cls, args, print_model=is_print_model)\n', (7514, 7558), False, 'from train import train_lm, train_cls, get_model_name\n'), ((9095, 9119), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (9117, 9119), False, 'import torch\n'), ((2623, 2686), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Invalid value for a boolean flag!"""'], {}), "('Invalid value for a boolean flag!')\n", (2649, 2686), False, 'import argparse\n'), ((5740, 5757), 'utils.copy_vocab', 'copy_vocab', (['vocab'], {}), '(vocab)\n', (5750, 5757), False, 'from utils import copy_vocab\n'), ((8070, 8099), 'numpy.array', 'np.array', (['valid_df[label_col]'], {}), '(valid_df[label_col])\n', (8078, 8099), True, 'import numpy as np\n'), ((8115, 8143), 'numpy.array', 'np.array', (['test_df[label_col]'], {}), '(test_df[label_col])\n', (8123, 8143), True, 'import numpy as np\n'), ((5497, 5531), 'fastai.text.data.NumericalizeProcessor', 'NumericalizeProcessor', ([], {'vocab': 'vocab'}), '(vocab=vocab)\n', (5518, 5531), False, 'from fastai.text.data import NumericalizeProcessor, TextList, ItemLists, TokenizeProcessor\n'), ((2080, 2095), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (2087, 2095), True, 'import numpy as np\n'), ((2217, 2231), 'numpy.std', 'np.std', (['values'], {}), '(values)\n', (2223, 2231), True, 'import numpy as np\n'), ((5923, 5940), 'utils.copy_vocab', 'copy_vocab', (['vocab'], {}), '(vocab)\n', (5933, 5940), False, 'from utils import copy_vocab\n'), ((6812, 6829), 'utils.copy_vocab', 'copy_vocab', (['vocab'], {}), '(vocab)\n', (6822, 6829), False, 'from utils import copy_vocab\n'), ((7349, 7424), 'fastai.text.data.TextList.from_df', 'TextList.from_df', (['test_df', 'model_path'], {'cols': '[text_col]', 'processor': 'processor'}), '(test_df, model_path, cols=[text_col], processor=processor)\n', (7365, 7424), False, 'from fastai.text.data import NumericalizeProcessor, TextList, ItemLists, TokenizeProcessor\n'), ((8207, 8239), 'numpy.vectorize', 'np.vectorize', (['learn.data.c2i.get'], {}), '(learn.data.c2i.get)\n', (8219, 8239), True, 'import numpy as np\n'), ((8266, 8298), 'numpy.vectorize', 'np.vectorize', (['learn.data.c2i.get'], {}), '(learn.data.c2i.get)\n', (8278, 8298), True, 'import numpy as np\n'), ((6307, 6362), 'fastai.text.data.ItemLists', 'ItemLists', (['model_path'], {'train': 'train_data', 'valid': 'val_data'}), '(model_path, train=train_data, valid=val_data)\n', (6316, 6362), False, 'from fastai.text.data import NumericalizeProcessor, TextList, ItemLists, TokenizeProcessor\n'), ((7199, 7254), 'fastai.text.data.ItemLists', 'ItemLists', (['model_path'], {'train': 'train_data', 'valid': 'val_data'}), '(model_path, train=train_data, valid=val_data)\n', (7208, 7254), False, 'from fastai.text.data import NumericalizeProcessor, TextList, ItemLists, TokenizeProcessor\n')] |
from tensorflow import keras as k
from tensorflow.keras import layers, models
import numpy as np
from tensorflow.python.keras.models import Model
class MockModel:
@classmethod
def get_model(cls) -> Model:
# Create a fake model. Basically, we simulate a text classifier where we have 3 words which are represented with 3
# digits: 1, 2 and 3. 0 is reserved for padding.
# There is an embedding matrix that encode each word into a vector containing exactly one 1 representing the word itself.
# So word 2 is represented as [0, 1, 0]
# The classifier tells if there is the occurence of a given word. The output consists of a binary vector, where
# the position p_i of a 1 indicates that the word i was present in the input vector.
model = models.Sequential([
layers.Embedding(input_dim=4, output_dim=3, input_length=3),
layers.GlobalMaxPool1D(),
])
model.compile(loss=k.losses.BinaryCrossentropy())
model.layers[0].set_weights([np.array([[0,0,0],[1,0,0], [0, 1, 0], [0,0,1]])])
return model
| [
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.losses.BinaryCrossentropy",
"numpy.array",
"tensorflow.keras.layers.GlobalMaxPool1D"
] | [((835, 894), 'tensorflow.keras.layers.Embedding', 'layers.Embedding', ([], {'input_dim': '(4)', 'output_dim': '(3)', 'input_length': '(3)'}), '(input_dim=4, output_dim=3, input_length=3)\n', (851, 894), False, 'from tensorflow.keras import layers, models\n'), ((908, 932), 'tensorflow.keras.layers.GlobalMaxPool1D', 'layers.GlobalMaxPool1D', ([], {}), '()\n', (930, 932), False, 'from tensorflow.keras import layers, models\n'), ((973, 1002), 'tensorflow.keras.losses.BinaryCrossentropy', 'k.losses.BinaryCrossentropy', ([], {}), '()\n', (1000, 1002), True, 'from tensorflow import keras as k\n'), ((1042, 1096), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (1050, 1096), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import sys
# https://github.com/Callidon/pyHDT
import hdt
import numpy as np
from tqdm import tqdm
def generate_stats(doc):
n_edges = len(doc)
n_vertices = 0
# create integer mapping
vertices = set()
triples, c = doc.search_triples('', '', '')
for s, p, o in tqdm(triples, total=c):
vertices.add(s)
vertices.add(o)
n_vertices = len(vertices)
vertex_idx_map = {vertex:i for i, vertex in enumerate(vertices)}
# compute degrees
degree_array = np.zeros((n_vertices, 3), dtype=int)
triples, c = doc.search_triples('', '', '')
for s, p, o in tqdm(triples, total=c):
s_idx = vertex_idx_map[s]
o_idx = vertex_idx_map[o]
degree_array[s_idx, 0] += 1 # outdegree
degree_array[o_idx, 1] += 1 # indegree
# overal degree
degree_array[:,2] = degree_array[:, 0] + degree_array[:, 1]
sys.stdout.write('- degree: min %d / max %d / avg %f\n' % (np.min(degree_array[:,2]),
np.max(degree_array[:,2]),
np.mean(degree_array[:,2])))
sys.stdout.write('- in degree: min %d / max %d / avg %f\n' % (np.min(degree_array[:,1]),
np.max(degree_array[:,1]),
np.mean(degree_array[:,1])))
sys.stdout.write('- out degree: min %d / max %d / avg %f\n' % (np.min(degree_array[:,0]),
np.max(degree_array[:,0]),
np.mean(degree_array[:,0])))
# compute density
sys.stdout.write('- density: %f\n' % density(n_vertices, n_edges))
def density(num_vertices, num_edges):
return num_edges / (num_vertices * (num_vertices - 1))
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) != 1:
print("USAGE: ./graphstats.py <graph_stripped.hdt>")
hdtfile = args[0]
doc = hdt.HDTDocument(hdtfile)
generate_stats(doc)
| [
"numpy.mean",
"tqdm.tqdm",
"hdt.HDTDocument",
"numpy.max",
"numpy.zeros",
"numpy.min"
] | [((309, 331), 'tqdm.tqdm', 'tqdm', (['triples'], {'total': 'c'}), '(triples, total=c)\n', (313, 331), False, 'from tqdm import tqdm\n'), ((524, 560), 'numpy.zeros', 'np.zeros', (['(n_vertices, 3)'], {'dtype': 'int'}), '((n_vertices, 3), dtype=int)\n', (532, 560), True, 'import numpy as np\n'), ((628, 650), 'tqdm.tqdm', 'tqdm', (['triples'], {'total': 'c'}), '(triples, total=c)\n', (632, 650), False, 'from tqdm import tqdm\n'), ((2078, 2102), 'hdt.HDTDocument', 'hdt.HDTDocument', (['hdtfile'], {}), '(hdtfile)\n', (2093, 2102), False, 'import hdt\n'), ((967, 993), 'numpy.min', 'np.min', (['degree_array[:, 2]'], {}), '(degree_array[:, 2])\n', (973, 993), True, 'import numpy as np\n'), ((1055, 1081), 'numpy.max', 'np.max', (['degree_array[:, 2]'], {}), '(degree_array[:, 2])\n', (1061, 1081), True, 'import numpy as np\n'), ((1143, 1170), 'numpy.mean', 'np.mean', (['degree_array[:, 2]'], {}), '(degree_array[:, 2])\n', (1150, 1170), True, 'import numpy as np\n'), ((1239, 1265), 'numpy.min', 'np.min', (['degree_array[:, 1]'], {}), '(degree_array[:, 1])\n', (1245, 1265), True, 'import numpy as np\n'), ((1327, 1353), 'numpy.max', 'np.max', (['degree_array[:, 1]'], {}), '(degree_array[:, 1])\n', (1333, 1353), True, 'import numpy as np\n'), ((1415, 1442), 'numpy.mean', 'np.mean', (['degree_array[:, 1]'], {}), '(degree_array[:, 1])\n', (1422, 1442), True, 'import numpy as np\n'), ((1512, 1538), 'numpy.min', 'np.min', (['degree_array[:, 0]'], {}), '(degree_array[:, 0])\n', (1518, 1538), True, 'import numpy as np\n'), ((1600, 1626), 'numpy.max', 'np.max', (['degree_array[:, 0]'], {}), '(degree_array[:, 0])\n', (1606, 1626), True, 'import numpy as np\n'), ((1688, 1715), 'numpy.mean', 'np.mean', (['degree_array[:, 0]'], {}), '(degree_array[:, 0])\n', (1695, 1715), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from matplotlib.legend import Legend
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
from sklearn.metrics import mean_squared_error
"""
Função usada para fittar os dados.
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html
"""
"""
Atributos:
- As variáveis `pressure` e `volume` contém as pressões e volumes utilizados para fitar as funções sigmoid e exponencial. Espera-se que para um pulmão saudável o melhor fit seja exponencial, enquanto que para o doente espera-se um melhor fit da sigmoid. !!Entretanto vale notar que passamos somente os pontos de volume e pressão mínimo (PEEP) de cada passo!!.
- A variável `raw_data` contém uma cópia com todos os dados de pressão e volume.
- As váriaveis `subject`, `manobra`, e `qtd_steps` servem para identificação do caso, respectivamente, nome do porco, manobra e passo. Exemplo, na manobra D existem 5 passos, na C existem 4 e na B existem 3.
- A variável `pmin` contém a PEEP mínima utilzida no fit das sigmoids e exponenciais
- A variável `estimators` é uma lista de diferentes estimadores para o fit das sigmoids e exponenciais. Exemplo: "lm" e "dogbox".
- A variável `interpolate` !!!não foi testada!!! e está setada como False, isto é, !!!No momento não estamos interpolando!!!. Ela é responsável pela interpolação dos dados visando melhorar o fit por meio da criação de mais pontos.
Funções:
_interpolate: cria as versões interpoladas das variáveis `pressure` (`interp_pressures`) e `volumes` (`interp_volumes`).
_get_error: Calcula o "root mean square", volume fitado vs volume
_make_fit_report: Constrói o dataframe que contém a análise do caso (codificado por subject, manobra, qtd_steps). Cada linha do dataframe contém todas informações necessárias daquele caso em específico.
fit: Essa função funciona como uma interface para o usuário, chamando a função `_make_fit_report`, setando os parâmetros necessarios e retornando um DataFrame.
make_plot: Plota os "n" melhores casos do dataframe retornado pela função `fit`. Os melhores são definidos como os que têm os menores erros.
"""
class funcFitter:
def __init__(self, subject:str, manobra:str, raw_data, data:np.ndarray, qtd_steps:int=5, estimators:list=["lm"]):
"""
A variável `raw_data` está formatada de forma que a coluna 0 contém as pressões e a 1 contém os volumes.
A variável `data` contém somente os pontos minímos dos passos, selecionados a partir do raw da seguinte forma: data = raw_data[0::2,:].
"""
self.raw_data = raw_data.copy() # Copia dos dados raw.
self.qtd_steps = qtd_steps # Quantidade de Passos (Exemplo: Manobra C tem 4 passos).
self.manobra = manobra # Manobra.
self.subject = subject # Nome do porco.
self.estimators = estimators # Lista de estimadores.
self.pressures = data[:,0] # Seleciona somente as PEEP de cada passo.
self.volumes = data[:,1] # Seleciona somente os volumes minimos de cada passo
self.pmin = min(self.pressures) #
self.interpolate = False
def _interpolate(self, n_interp_point:int):
last_point = self.pressures[:self.qtd_steps][-1]
self.interp_pressures = np.linspace(self.pmin, last_point, n_interp_point, endpoint=True)
interp_func = interp1d(self.pressures[:self.qtd_steps], self.volumes[:self.qtd_steps], kind=self.interp_method)
self.interp_volumes = interp_func(self.interp_pressures)
def _get_error(self, func, parameters):
hat_volumes = func(self.pressures[:self.qtd_steps], *parameters)
return mean_squared_error(np.array(self.volumes[:self.qtd_steps]), np.array(hat_volumes),squared=False)
def _make_fit_report(self, models:list, estimators:list, n_interp_point:int):
subject = []
qtd_steps = []
interp_data = []
interp_run = []
data = []
run = []
cols = ["subject","manobra","qtd_steps","model", "function_name", "estimator", "error", "param", "raw_data"]
interp_cols = ["subject","manobra","qtd_steps", "model", "function_name", "estimator", "error", "param", "interp_point", "interp_pressure", "interp_volume","raw_data"]
for model in models:
for estimator in self.estimators:
if self.interpolate:
for point in range(5, n_interp_point, 5):
try:
self._interpolate(point)
parameters, pcov = curve_fit(f = model.function,
xdata = self.pressures[:self.qtd_steps],
ydata = self.volumes[:self.qtd_steps],
method = estimator,
p0 = model.inits,
bounds = model.bounds)
err = self._get_error(func=model.function, parameters=parameters)
interp_run.append(self.subject)
interp_run.append(self.manobra)
interp_run.append(self.qtd_steps)
interp_run.append(model)
interp_run.append(model.function.__name__)
interp_run.append(estimator)
interp_run.append(err)
interp_run.append(parameters)
interp_run.append(point)
interp_run.append(self.interp_pressures)
interp_run.append(self.interp_volumes)
interp_run.append(self.raw_data)
interp_data.append(interp_run)
interp_run = []
except Exception as e:
pass
else:
try:
parameters, pcov = curve_fit(f = model.function,
xdata = self.pressures[:self.qtd_steps],
ydata = self.volumes[:self.qtd_steps],
method = estimator,
p0 = model.inits,
bounds = model.bounds)
err = self._get_error(func=model.function, parameters=parameters)
run.append(self.subject)
run.append(self.manobra)
run.append(self.qtd_steps)
run.append(model)
run.append(model.function.__name__)
run.append(estimator)
run.append(err)
run.append(parameters)
run.append(self.raw_data)
data.append(run)
run = []
except Exception as e:
pass
if self.interpolate:
return pd.DataFrame(interp_data, columns=interp_cols)
else:
return pd.DataFrame(data, columns=cols)
def fit(self, models, interpolate:bool=False, n_interp_point:int=30, interp_method:str="linear"):
self.n_interp_point = n_interp_point
self.interp_method = interp_method
self.interpolate = interpolate
return self._make_fit_report(models=models, estimators=self.estimators, n_interp_point=n_interp_point)
def make_plot(self, df:pd.DataFrame, n_best = 6):
if len(df) == 0:
print("Does not exist available plot")
return None
n_col = 2
n_row = int(np.ceil(n_best/n_col))
colors = ["b","g","r","m","y"]
df.reset_index(drop = True, inplace = True)
best_fits= df["error"].nsmallest(n_best).index
fig, axs = plt.subplots(n_row, n_col, figsize = (5*n_col,4*n_row))
for row, ax in zip(df.iloc[best_fits].iterrows(), axs.flatten()):
new_pressures = range(0,100,7)
_, data = row
ax.set_title(f"Model: {data['function_name']} Error: {round(data['error'], 2)}")
for fst_run, c in zip(data["raw_data"][::2], colors):
ax.scatter(fst_run[0], fst_run[1], c=c)
if self.interpolate:
ax.scatter(data["interp_pressure"], data["interp_volume"], c = 'k', marker = '.', label = "Interpolated")
ax.text(0.98, 0.6, f"n interp points: {data['interp_point']}",
horizontalalignment='right',
verticalalignment='bottom',
transform = ax.transAxes)
ax.scatter(new_pressures, data["model"].function(new_pressures, *data["param"]), c = 'g', marker = '+', label = "Fit")
ax.set_xlabel('Pressure')
ax.set_ylabel('Volume')
ax.legend(fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax.text(0.98, 0.5, f"Estimator: {data['estimator']}",
horizontalalignment='right',
verticalalignment='bottom',
transform = ax.transAxes)
plt.tight_layout()
plt.show() | [
"scipy.optimize.curve_fit",
"numpy.ceil",
"matplotlib.pyplot.style.use",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((71, 94), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (84, 94), True, 'import matplotlib.pyplot as plt\n'), ((3449, 3514), 'numpy.linspace', 'np.linspace', (['self.pmin', 'last_point', 'n_interp_point'], {'endpoint': '(True)'}), '(self.pmin, last_point, n_interp_point, endpoint=True)\n', (3460, 3514), True, 'import numpy as np\n'), ((3537, 3638), 'scipy.interpolate.interp1d', 'interp1d', (['self.pressures[:self.qtd_steps]', 'self.volumes[:self.qtd_steps]'], {'kind': 'self.interp_method'}), '(self.pressures[:self.qtd_steps], self.volumes[:self.qtd_steps],\n kind=self.interp_method)\n', (3545, 3638), False, 'from scipy.interpolate import interp1d\n'), ((8533, 8591), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_row', 'n_col'], {'figsize': '(5 * n_col, 4 * n_row)'}), '(n_row, n_col, figsize=(5 * n_col, 4 * n_row))\n', (8545, 8591), True, 'import matplotlib.pyplot as plt\n'), ((9844, 9862), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9860, 9862), True, 'import matplotlib.pyplot as plt\n'), ((9871, 9881), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9879, 9881), True, 'import matplotlib.pyplot as plt\n'), ((3868, 3907), 'numpy.array', 'np.array', (['self.volumes[:self.qtd_steps]'], {}), '(self.volumes[:self.qtd_steps])\n', (3876, 3907), True, 'import numpy as np\n'), ((3909, 3930), 'numpy.array', 'np.array', (['hat_volumes'], {}), '(hat_volumes)\n', (3917, 3930), True, 'import numpy as np\n'), ((7625, 7671), 'pandas.DataFrame', 'pd.DataFrame', (['interp_data'], {'columns': 'interp_cols'}), '(interp_data, columns=interp_cols)\n', (7637, 7671), True, 'import pandas as pd\n'), ((7709, 7741), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (7721, 7741), True, 'import pandas as pd\n'), ((8327, 8350), 'numpy.ceil', 'np.ceil', (['(n_best / n_col)'], {}), '(n_best / n_col)\n', (8334, 8350), True, 'import numpy as np\n'), ((6376, 6544), 'scipy.optimize.curve_fit', 'curve_fit', ([], {'f': 'model.function', 'xdata': 'self.pressures[:self.qtd_steps]', 'ydata': 'self.volumes[:self.qtd_steps]', 'method': 'estimator', 'p0': 'model.inits', 'bounds': 'model.bounds'}), '(f=model.function, xdata=self.pressures[:self.qtd_steps], ydata=\n self.volumes[:self.qtd_steps], method=estimator, p0=model.inits, bounds\n =model.bounds)\n', (6385, 6544), False, 'from scipy.optimize import curve_fit\n'), ((4786, 4954), 'scipy.optimize.curve_fit', 'curve_fit', ([], {'f': 'model.function', 'xdata': 'self.pressures[:self.qtd_steps]', 'ydata': 'self.volumes[:self.qtd_steps]', 'method': 'estimator', 'p0': 'model.inits', 'bounds': 'model.bounds'}), '(f=model.function, xdata=self.pressures[:self.qtd_steps], ydata=\n self.volumes[:self.qtd_steps], method=estimator, p0=model.inits, bounds\n =model.bounds)\n', (4795, 4954), False, 'from scipy.optimize import curve_fit\n')] |
# This is the simulation of our evolving RS model under the SECOND framework of our assumptions on edge weights.
import numpy as np
import random
import matplotlib.pyplot as plt
import powerlaw
import pandas as pd
class assumption_2nd:
# initializing the whole model
def __init__(self, beta, iterations, rating_scale, Cu, Ci, Unum, Inum, K, L, C):
self.init_paramter(beta, iterations, rating_scale, Cu, Ci, Unum, Inum, K, L, C)
self.init_assumption()
k = self.stat()
self.iterate()
res = self.stat()
tdu = self.calcdegree_user()
twu = self.calcweight_user()
tdi = self.calcdegree_item()
twi = self.calcweight_item()
k = (res, tdu, twu, tdi, twi)
x = np.zeros(self.rating_scale)
self.degseq_user = np.zeros(self.iterations + 1)
self.weiseq_user = np.zeros(self.iterations + 1)
self.degseq_item = np.zeros(self.iterations + 1)
self.weiseq_item = np.zeros(self.iterations + 1)
x[:res.size] = x[:res.size] + res
self.degseq_user[:min(self.iterations+1,k[1].size)] = self.degseq_user[:min(self.iterations+1,k[1].size)] + k[1][:min(self.iterations+1,k[1].size)]
self.weiseq_user[:min(self.iterations+1,k[2].size)] = self.weiseq_user[:min(self.iterations+1,k[2].size)] + k[2][:min(self.iterations+1,k[2].size)]
self.degseq_item[:min(self.iterations+1,k[3].size)] = self.degseq_item[:min(self.iterations+1,k[3].size)] + k[3][:min(self.iterations+1,k[3].size)]
self.weiseq_item[:min(self.iterations+1,k[4].size)] = self.weiseq_item[:min(self.iterations+1,k[4].size)] + k[4][:min(self.iterations+1,k[4].size)]
np.set_printoptions(threshold=np.inf)
xind = np.zeros(self.iterations + 1)
for i in range(1,self.iterations + 1):
xind[i] = xind[i-1] + 1
self.xind_user = xind
self.xind_item = xind
print("finish all the staff")
#Initial settings of parameters in our weighted bipartite graph model B(U,I).
def init_paramter(self, beta, iterations, rating_scale, Cu, Ci, Unum, Inum, K, L, C):
#Initial settings of parameters in our weighted bipartite graph model B(U,I).
self.beta = beta # the probability to add a new vertex in U
self.iterations = iterations # the number of iterations to run the simulation
self.rating_scale = rating_scale # the preassigned rating scale
self.Cu = Cu # the least number of edges connected to vertices in U
self.Ci = Ci # the least number of edges connected to vertices in I
self.Unum = Unum # the number of vertices in U in the initial graph at t=0
self.Inum = Inum # the number of vertices in I in the initial graph at t=1
self.K = K # the number of basic user type in our assumption
self.L = L # the number of basic item level in our assumption
self.C = C # the number of adding edge
self.Hui = np.zeros((rating_scale,K,L)) # the rating pmf for the pair of K user types and L item levels
self.Fmean = np.zeros((K,)) # the mean of the distribution of users' weight vector (assumed to be Gaussian)
self.Gmean = np.zeros((L,)) # the mean of the distribution of items' weight vector (assumed to be Gaussian)
self.edges = np.zeros((iterations+50,iterations+50), dtype=int) # the matrix storing edge information
self.Uweight = np.zeros((iterations+50,K)) # the matrix storing users' weight vectors
self.Iweight = np.zeros((iterations+50,L)) # the matrix storing items' weight vectors
# Initalization of the sampling of edge weights from the mixture distribution
def init_weightgenerator(self):
# include K,L,Huser,Hitem,Hui,rating_scale,Fmean,Gmean
self.Hui = np.random.sample((self.rating_scale, self.K, self.L))
Huisubsum = np.sum(self.Hui,axis=0)
Huisubsum = np.array([Huisubsum] * self.rating_scale)
self.Hui = self.Hui/Huisubsum
self.Huser = np.random.sample((self.K, self.rating_scale))
Husersubsum = np.sum(self.Huser, axis=1)
Husersubsum = np.array([Husersubsum] * self.rating_scale)
Husersubsum = np.transpose(Husersubsum)
self.Huser = self.Huser/Husersubsum
self.Hitem = np.random.sample((self.L, self.rating_scale))
Hitemsubsum = np.sum(self.Hitem, axis=1)
Hitemsubsum = np.array([Hitemsubsum] * self.rating_scale)
Hitemsubsum = np.transpose(Hitemsubsum)
self.Hitem = self.Hitem/Hitemsubsum
self.Fmean = np.random.sample(self.K,)
self.Fmean = self.Fmean/np.sum(self.Fmean)
self.Gmean = np.random.sample(self.L,)
self.Gmean = self.Gmean/np.sum(self.Gmean)
#Sample edge weight for the edge between vertices (Uid, Iid)
def weightgenerator(self, Uid, Iid):
# include K,L,Huser,Hitem,Hui,rating_scale,Fmean,Gmean
Uw = self.Uweight[Uid:Uid+1,:]
Iw = self.Iweight[Iid:Iid+1,:]
Uw = np.transpose(Uw)
Hr = np.dot(Uw,Iw)
Hr = np.array([Hr] * self.rating_scale)
Hr = self.Hui * Hr
Hr = np.sum(Hr,axis=1)
Hr = np.sum(Hr,axis=1)
R = np.random.choice(self.rating_scale, 1, p=Hr) + 1
return R
# Initialization for the inital simple graph at t=0
def init_assumption(self):
# include edges,Unum,Inum,Uweight,Iweight,K,L,Fmean,Gmean
print("Initializing...", end="")
self.init_weightgenerator()
for i in range(self.Unum):
Utmp = np.random.normal(self.Fmean, 0.1)
Utmp[Utmp<0]=0
Utmp = Utmp/np.sum(Utmp)
self.Uweight[i,:]=Utmp
for i in range(self.Inum):
Itmp = np.random.normal(self.Gmean, 0.1)
Itmp[Itmp<0]=0
Itmp = Itmp/np.sum(Itmp)
self.Iweight[i,:]=Itmp
self.edges = np.zeros((self.iterations+50, self.iterations+50), dtype=int)
# We can assume that axis=1 is user sequence and the axis=0 is the item sequence
for i in range(self.Unum):
for j in range(self.Inum):
self.edges[i,j] = self.weightgenerator(i,j)
print("Done.")
# Select "prototype" from the existing vertex group
def prototype(self, arr, nb):
return np.count_nonzero(arr.cumsum() < nb)
# Conduct Edge-copy and assign new edge weights
def copyedge(self, template, desired, p_prime):
ls = []
new2old = template.nonzero()[0]
tmp = template[new2old].astype(float)
for i in range(desired):
tmp /= tmp.sum()
sampled = np.nonzero(np.random.multinomial(1, tmp))[0][0]
ls.append(sampled)
tmp[sampled] = 0
ls.sort()
return new2old[ls]
# Add new vertices to U (respectively. I)
def addnode(self, nb_axis):
# include edges,Unum,Inum,Fmean,Gmean
weightsum = np.sum(self.edges[:self.Unum,:self.Inum], axis=nb_axis)
totalsum = np.sum(weightsum)
randnum = np.random.randint(1, totalsum+1)
p_prime = self.prototype(weightsum, randnum)
weighted = np.zeros(1)
if nb_axis == 1:
Utmp = np.random.normal(self.Fmean, 0.1)
Utmp[Utmp<0] = 0
Utmp = Utmp/np.sum(Utmp)
self.Uweight[self.Unum,:]=Utmp
template = self.edges[p_prime,:self.Inum]
desired = self.Cu
idx = self.copyedge(template, desired, p_prime)
new = np.zeros(template.shape[0], dtype=int)
for i in range(idx.shape[0]):
new[idx[i]]= self.weightgenerator(self.Unum, idx[i])
self.edges[self.Unum,:self.Inum] = new
self.Unum = self.Unum + 1
else:
Itmp = np.random.normal(self.Gmean, 0.1)
Itmp[Itmp<0] = 0
Itmp = Itmp/np.sum(Itmp)
self.Iweight[self.Inum,:]=Itmp
template = self.edges[:self.Unum, p_prime]
desired = self.Ci
idx = self.copyedge(template, desired, p_prime)
new = np.zeros(template.shape[0], dtype=int)
for i in range(idx.shape[0]):
new[idx[i]]= self.weightgenerator(idx[i], self.Inum)
self.edges[:self.Unum,self.Inum] = new
self.Inum = self.Inum + 1
# Add new edges to Graph
def addedge(self):
# include edges,Unum,Inum
randnum_user = random.randint(1,self.Unum-1)
randnum_item = random.randint(1,self.Inum-1)
self.edges[randnum_user,randnum_item] = random.randint(1, self.rating_scale)
# Evolution of U (or I)
def evolution(self):
randnum = np.random.rand()
if randnum < self.beta:
self.addnode(1)
else:
self.addnode(0)
for i in range(self.C):
self.addedge()
# pass
# Iterate
def iterate(self):
print("Begin iteration...", end="")
for i in range(self.iterations):
self.evolution()
print("Done")
# Gather statistic information
def stat(self):
# include edges
tmps = self.edges.flatten().astype(int)
count = np.bincount(tmps)
count = count[1:]
count = 1.0*count/count.sum()
return count
# Calculate user degree distributions
def calcdegree_user(self):
# include edges
sumdegree = self.edges.astype(bool).sum(axis=1)
return np.bincount(sumdegree)
# Calculate user weight distributions
def calcweight_user(self):
# include edges
sumdegree = self.edges.sum(axis=1)
return np.bincount(sumdegree)
# Calculate item degree distributions
def calcdegree_item(self):
# include edges
sumdegree = self.edges.astype(bool).sum(axis=0)
return np.bincount(sumdegree)
# Calculate item weight distributions
def calcweight_item(self):
# include edges
sumdegree = self.edges.sum(axis=0)
return np.bincount(sumdegree)
def get_distribution(self, target="user"):
if target == "item":
return self.degseq_item, self.weiseq_item, self.xind_item
else:
return self.degseq_user, self.weiseq_user, self.xind_user
def get_graph(self):
return self.edges, self.Inum, self.Unum
def get_pvalue_alpha_xmin_2(seq):
results = powerlaw.Fit(seq)
alpha = results.power_law.alpha
xmin = results.power_law.xmin
R, p_value = results.distribution_compare('power_law', 'lognormal')
print("p_value:", p_value, "alpha:", alpha, "xmin:", xmin)
return p_value, alpha, xmin | [
"numpy.random.normal",
"powerlaw.Fit",
"numpy.random.rand",
"numpy.random.choice",
"numpy.random.multinomial",
"numpy.sum",
"numpy.zeros",
"numpy.random.sample",
"numpy.array",
"numpy.dot",
"numpy.random.randint",
"numpy.transpose",
"numpy.bincount",
"random.randint",
"numpy.set_printopt... | [((10483, 10500), 'powerlaw.Fit', 'powerlaw.Fit', (['seq'], {}), '(seq)\n', (10495, 10500), False, 'import powerlaw\n'), ((756, 783), 'numpy.zeros', 'np.zeros', (['self.rating_scale'], {}), '(self.rating_scale)\n', (764, 783), True, 'import numpy as np\n'), ((811, 840), 'numpy.zeros', 'np.zeros', (['(self.iterations + 1)'], {}), '(self.iterations + 1)\n', (819, 840), True, 'import numpy as np\n'), ((868, 897), 'numpy.zeros', 'np.zeros', (['(self.iterations + 1)'], {}), '(self.iterations + 1)\n', (876, 897), True, 'import numpy as np\n'), ((925, 954), 'numpy.zeros', 'np.zeros', (['(self.iterations + 1)'], {}), '(self.iterations + 1)\n', (933, 954), True, 'import numpy as np\n'), ((982, 1011), 'numpy.zeros', 'np.zeros', (['(self.iterations + 1)'], {}), '(self.iterations + 1)\n', (990, 1011), True, 'import numpy as np\n'), ((1695, 1732), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (1714, 1732), True, 'import numpy as np\n'), ((1748, 1777), 'numpy.zeros', 'np.zeros', (['(self.iterations + 1)'], {}), '(self.iterations + 1)\n', (1756, 1777), True, 'import numpy as np\n'), ((2993, 3023), 'numpy.zeros', 'np.zeros', (['(rating_scale, K, L)'], {}), '((rating_scale, K, L))\n', (3001, 3023), True, 'import numpy as np\n'), ((3107, 3121), 'numpy.zeros', 'np.zeros', (['(K,)'], {}), '((K,))\n', (3115, 3121), True, 'import numpy as np\n'), ((3223, 3237), 'numpy.zeros', 'np.zeros', (['(L,)'], {}), '((L,))\n', (3231, 3237), True, 'import numpy as np\n'), ((3339, 3394), 'numpy.zeros', 'np.zeros', (['(iterations + 50, iterations + 50)'], {'dtype': 'int'}), '((iterations + 50, iterations + 50), dtype=int)\n', (3347, 3394), True, 'import numpy as np\n'), ((3451, 3481), 'numpy.zeros', 'np.zeros', (['(iterations + 50, K)'], {}), '((iterations + 50, K))\n', (3459, 3481), True, 'import numpy as np\n'), ((3545, 3575), 'numpy.zeros', 'np.zeros', (['(iterations + 50, L)'], {}), '((iterations + 50, L))\n', (3553, 3575), True, 'import numpy as np\n'), ((3821, 3874), 'numpy.random.sample', 'np.random.sample', (['(self.rating_scale, self.K, self.L)'], {}), '((self.rating_scale, self.K, self.L))\n', (3837, 3874), True, 'import numpy as np\n'), ((3895, 3919), 'numpy.sum', 'np.sum', (['self.Hui'], {'axis': '(0)'}), '(self.Hui, axis=0)\n', (3901, 3919), True, 'import numpy as np\n'), ((3939, 3980), 'numpy.array', 'np.array', (['([Huisubsum] * self.rating_scale)'], {}), '([Huisubsum] * self.rating_scale)\n', (3947, 3980), True, 'import numpy as np\n'), ((4049, 4094), 'numpy.random.sample', 'np.random.sample', (['(self.K, self.rating_scale)'], {}), '((self.K, self.rating_scale))\n', (4065, 4094), True, 'import numpy as np\n'), ((4117, 4143), 'numpy.sum', 'np.sum', (['self.Huser'], {'axis': '(1)'}), '(self.Huser, axis=1)\n', (4123, 4143), True, 'import numpy as np\n'), ((4166, 4209), 'numpy.array', 'np.array', (['([Husersubsum] * self.rating_scale)'], {}), '([Husersubsum] * self.rating_scale)\n', (4174, 4209), True, 'import numpy as np\n'), ((4232, 4257), 'numpy.transpose', 'np.transpose', (['Husersubsum'], {}), '(Husersubsum)\n', (4244, 4257), True, 'import numpy as np\n'), ((4332, 4377), 'numpy.random.sample', 'np.random.sample', (['(self.L, self.rating_scale)'], {}), '((self.L, self.rating_scale))\n', (4348, 4377), True, 'import numpy as np\n'), ((4400, 4426), 'numpy.sum', 'np.sum', (['self.Hitem'], {'axis': '(1)'}), '(self.Hitem, axis=1)\n', (4406, 4426), True, 'import numpy as np\n'), ((4449, 4492), 'numpy.array', 'np.array', (['([Hitemsubsum] * self.rating_scale)'], {}), '([Hitemsubsum] * self.rating_scale)\n', (4457, 4492), True, 'import numpy as np\n'), ((4515, 4540), 'numpy.transpose', 'np.transpose', (['Hitemsubsum'], {}), '(Hitemsubsum)\n', (4527, 4540), True, 'import numpy as np\n'), ((4615, 4639), 'numpy.random.sample', 'np.random.sample', (['self.K'], {}), '(self.K)\n', (4631, 4639), True, 'import numpy as np\n'), ((4713, 4737), 'numpy.random.sample', 'np.random.sample', (['self.L'], {}), '(self.L)\n', (4729, 4737), True, 'import numpy as np\n'), ((5055, 5071), 'numpy.transpose', 'np.transpose', (['Uw'], {}), '(Uw)\n', (5067, 5071), True, 'import numpy as np\n'), ((5085, 5099), 'numpy.dot', 'np.dot', (['Uw', 'Iw'], {}), '(Uw, Iw)\n', (5091, 5099), True, 'import numpy as np\n'), ((5112, 5146), 'numpy.array', 'np.array', (['([Hr] * self.rating_scale)'], {}), '([Hr] * self.rating_scale)\n', (5120, 5146), True, 'import numpy as np\n'), ((5187, 5205), 'numpy.sum', 'np.sum', (['Hr'], {'axis': '(1)'}), '(Hr, axis=1)\n', (5193, 5205), True, 'import numpy as np\n'), ((5218, 5236), 'numpy.sum', 'np.sum', (['Hr'], {'axis': '(1)'}), '(Hr, axis=1)\n', (5224, 5236), True, 'import numpy as np\n'), ((5944, 6009), 'numpy.zeros', 'np.zeros', (['(self.iterations + 50, self.iterations + 50)'], {'dtype': 'int'}), '((self.iterations + 50, self.iterations + 50), dtype=int)\n', (5952, 6009), True, 'import numpy as np\n'), ((6991, 7047), 'numpy.sum', 'np.sum', (['self.edges[:self.Unum, :self.Inum]'], {'axis': 'nb_axis'}), '(self.edges[:self.Unum, :self.Inum], axis=nb_axis)\n', (6997, 7047), True, 'import numpy as np\n'), ((7066, 7083), 'numpy.sum', 'np.sum', (['weightsum'], {}), '(weightsum)\n', (7072, 7083), True, 'import numpy as np\n'), ((7102, 7136), 'numpy.random.randint', 'np.random.randint', (['(1)', '(totalsum + 1)'], {}), '(1, totalsum + 1)\n', (7119, 7136), True, 'import numpy as np\n'), ((7207, 7218), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (7215, 7218), True, 'import numpy as np\n'), ((8503, 8535), 'random.randint', 'random.randint', (['(1)', '(self.Unum - 1)'], {}), '(1, self.Unum - 1)\n', (8517, 8535), False, 'import random\n'), ((8556, 8588), 'random.randint', 'random.randint', (['(1)', '(self.Inum - 1)'], {}), '(1, self.Inum - 1)\n', (8570, 8588), False, 'import random\n'), ((8634, 8670), 'random.randint', 'random.randint', (['(1)', 'self.rating_scale'], {}), '(1, self.rating_scale)\n', (8648, 8670), False, 'import random\n'), ((8751, 8767), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8765, 8767), True, 'import numpy as np\n'), ((9267, 9284), 'numpy.bincount', 'np.bincount', (['tmps'], {}), '(tmps)\n', (9278, 9284), True, 'import numpy as np\n'), ((9539, 9561), 'numpy.bincount', 'np.bincount', (['sumdegree'], {}), '(sumdegree)\n', (9550, 9561), True, 'import numpy as np\n'), ((9718, 9740), 'numpy.bincount', 'np.bincount', (['sumdegree'], {}), '(sumdegree)\n', (9729, 9740), True, 'import numpy as np\n'), ((9910, 9932), 'numpy.bincount', 'np.bincount', (['sumdegree'], {}), '(sumdegree)\n', (9921, 9932), True, 'import numpy as np\n'), ((10089, 10111), 'numpy.bincount', 'np.bincount', (['sumdegree'], {}), '(sumdegree)\n', (10100, 10111), True, 'import numpy as np\n'), ((4673, 4691), 'numpy.sum', 'np.sum', (['self.Fmean'], {}), '(self.Fmean)\n', (4679, 4691), True, 'import numpy as np\n'), ((4771, 4789), 'numpy.sum', 'np.sum', (['self.Gmean'], {}), '(self.Gmean)\n', (4777, 4789), True, 'import numpy as np\n'), ((5248, 5292), 'numpy.random.choice', 'np.random.choice', (['self.rating_scale', '(1)'], {'p': 'Hr'}), '(self.rating_scale, 1, p=Hr)\n', (5264, 5292), True, 'import numpy as np\n'), ((5603, 5636), 'numpy.random.normal', 'np.random.normal', (['self.Fmean', '(0.1)'], {}), '(self.Fmean, 0.1)\n', (5619, 5636), True, 'import numpy as np\n'), ((5790, 5823), 'numpy.random.normal', 'np.random.normal', (['self.Gmean', '(0.1)'], {}), '(self.Gmean, 0.1)\n', (5806, 5823), True, 'import numpy as np\n'), ((7263, 7296), 'numpy.random.normal', 'np.random.normal', (['self.Fmean', '(0.1)'], {}), '(self.Fmean, 0.1)\n', (7279, 7296), True, 'import numpy as np\n'), ((7568, 7606), 'numpy.zeros', 'np.zeros', (['template.shape[0]'], {'dtype': 'int'}), '(template.shape[0], dtype=int)\n', (7576, 7606), True, 'import numpy as np\n'), ((7840, 7873), 'numpy.random.normal', 'np.random.normal', (['self.Gmean', '(0.1)'], {}), '(self.Gmean, 0.1)\n', (7856, 7873), True, 'import numpy as np\n'), ((8146, 8184), 'numpy.zeros', 'np.zeros', (['template.shape[0]'], {'dtype': 'int'}), '(template.shape[0], dtype=int)\n', (8154, 8184), True, 'import numpy as np\n'), ((5688, 5700), 'numpy.sum', 'np.sum', (['Utmp'], {}), '(Utmp)\n', (5694, 5700), True, 'import numpy as np\n'), ((5875, 5887), 'numpy.sum', 'np.sum', (['Itmp'], {}), '(Itmp)\n', (5881, 5887), True, 'import numpy as np\n'), ((7350, 7362), 'numpy.sum', 'np.sum', (['Utmp'], {}), '(Utmp)\n', (7356, 7362), True, 'import numpy as np\n'), ((7927, 7939), 'numpy.sum', 'np.sum', (['Itmp'], {}), '(Itmp)\n', (7933, 7939), True, 'import numpy as np\n'), ((6704, 6733), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'tmp'], {}), '(1, tmp)\n', (6725, 6733), True, 'import numpy as np\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def get_alignment(attn_probs, mel_lens, n_head):
max_F = 0
assert attn_probs[0].shape[0] % n_head == 0
batch_size = int(attn_probs[0].shape[0] // n_head)
for i in range(len(attn_probs)):
multi_attn = attn_probs[i].numpy()
for j in range(n_head):
attn = multi_attn[j * batch_size:(j + 1) * batch_size]
F = score_F(attn)
if max_F < F:
max_F = F
max_attn = attn
alignment = compute_duration(max_attn, mel_lens)
return alignment, max_attn
def score_F(attn):
max = np.max(attn, axis=-1)
mean = np.mean(max)
return mean
def compute_duration(attn, mel_lens):
alignment = np.zeros([attn.shape[2]])
#for i in range(attn.shape[0]):
for j in range(mel_lens):
max_index = np.argmax(attn[0, j])
alignment[max_index] += 1
return alignment
| [
"numpy.mean",
"numpy.zeros",
"numpy.argmax",
"numpy.max"
] | [((1205, 1226), 'numpy.max', 'np.max', (['attn'], {'axis': '(-1)'}), '(attn, axis=-1)\n', (1211, 1226), True, 'import numpy as np\n'), ((1238, 1250), 'numpy.mean', 'np.mean', (['max'], {}), '(max)\n', (1245, 1250), True, 'import numpy as np\n'), ((1323, 1348), 'numpy.zeros', 'np.zeros', (['[attn.shape[2]]'], {}), '([attn.shape[2]])\n', (1331, 1348), True, 'import numpy as np\n'), ((1435, 1456), 'numpy.argmax', 'np.argmax', (['attn[0, j]'], {}), '(attn[0, j])\n', (1444, 1456), True, 'import numpy as np\n')] |
"""
Supplementary Fig. 2
"""
"""This script is used to benchmark GLIPH's performance across a variety of clustering thresholds by
varying the hamming distance parameter. This script required a local installation of GLIPH. The output
of this script is saved as GLIPH.csv and can be found in the github repository."""
import pandas as pd
import os
import glob
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
directory = '../../Data/Glanville/'
antigens = os.listdir(directory)
seq = []
label = []
for antigen in antigens:
df = pd.read_csv(os.path.join(directory,antigen,antigen+'.tsv'),sep='\t')
seq.extend(df['aminoAcid'].tolist())
label.extend([antigen]*len(df))
df_out = pd.DataFrame()
df_out['Sequences'] = seq
df_out.to_csv('cdr3.txt',index=False)
df_ref = pd.DataFrame()
df_ref['Beta_Sequences'] = seq
df_ref['Labels'] = label
df_ref_dict = df_ref.set_index('Beta_Sequences').T.to_dict('list')
total_seq = len(seq)
num_clusters = []
variance = []
x=[]
y=[]
r = np.asarray(range(5))
for t in r:
#Erase previous GLIPH outputs
files = glob.glob('cdr3*')
for file in files:
if file != 'cdr3.txt':
os.remove(file)
#Run GLIPH
os.system('gliph/bin/gliph-group-discovery.pl --tcr cdr3.txt --gccutoff='+str(t))
df_in = pd.read_csv('cdr3-convergence-groups.txt',sep='\t',header=None)
#Collect Clusters
DFs = []
for c in range(len(df_in)):
df_temp = pd.DataFrame()
seq = df_in[2][c]
seq = seq.split()
label = []
for s in seq:
label.append(df_ref_dict[s][0])
df_temp['Beta_Sequences'] = seq
df_temp['Labels'] = label
DFs.append(df_temp)
#Determine Specificity
correct = 0
clustered = 0
df_clusters = []
for df in DFs:
if len(df) >= 3:
common = df['Labels'].value_counts()
if len(common) == 1:
most_common = df['Labels'].value_counts().index[0]
correct += np.sum(df['Labels'] == most_common)
clustered += len(df)
df_clusters.append(df)
elif (common[0] > common[1]):
most_common = df['Labels'].value_counts().index[0]
correct += np.sum(df['Labels'] == most_common)
clustered += len(df)
df_clusters.append(df)
x.append(clustered/total_seq)
y.append(correct/clustered)
#Save Data
df_out = pd.DataFrame()
df_out['Percent Clustered'] = 100*np.asarray(x)
df_out['Percent Correctly Clustered'] = 100*np.asarray(y)
df_out.to_csv('GLIPH.csv',index=False)
plt.figure()
sns.regplot(data=df_out,x='Percent Clustered',y='Percent Correctly Clustered',fit_reg=False)
| [
"seaborn.regplot",
"os.listdir",
"pandas.read_csv",
"numpy.asarray",
"os.path.join",
"numpy.sum",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"glob.glob",
"os.remove"
] | [((482, 503), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (492, 503), False, 'import os\n'), ((715, 729), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (727, 729), True, 'import pandas as pd\n'), ((804, 818), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (816, 818), True, 'import pandas as pd\n'), ((2462, 2476), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2474, 2476), True, 'import pandas as pd\n'), ((2622, 2634), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2632, 2634), True, 'import matplotlib.pyplot as plt\n'), ((2635, 2735), 'seaborn.regplot', 'sns.regplot', ([], {'data': 'df_out', 'x': '"""Percent Clustered"""', 'y': '"""Percent Correctly Clustered"""', 'fit_reg': '(False)'}), "(data=df_out, x='Percent Clustered', y=\n 'Percent Correctly Clustered', fit_reg=False)\n", (2646, 2735), True, 'import seaborn as sns\n'), ((1089, 1107), 'glob.glob', 'glob.glob', (['"""cdr3*"""'], {}), "('cdr3*')\n", (1098, 1107), False, 'import glob\n'), ((1304, 1369), 'pandas.read_csv', 'pd.read_csv', (['"""cdr3-convergence-groups.txt"""'], {'sep': '"""\t"""', 'header': 'None'}), "('cdr3-convergence-groups.txt', sep='\\t', header=None)\n", (1315, 1369), True, 'import pandas as pd\n'), ((2511, 2524), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (2521, 2524), True, 'import numpy as np\n'), ((2569, 2582), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (2579, 2582), True, 'import numpy as np\n'), ((571, 621), 'os.path.join', 'os.path.join', (['directory', 'antigen', "(antigen + '.tsv')"], {}), "(directory, antigen, antigen + '.tsv')\n", (583, 621), False, 'import os\n'), ((1454, 1468), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1466, 1468), True, 'import pandas as pd\n'), ((1174, 1189), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (1183, 1189), False, 'import os\n'), ((2011, 2046), 'numpy.sum', 'np.sum', (["(df['Labels'] == most_common)"], {}), "(df['Labels'] == most_common)\n", (2017, 2046), True, 'import numpy as np\n'), ((2260, 2295), 'numpy.sum', 'np.sum', (["(df['Labels'] == most_common)"], {}), "(df['Labels'] == most_common)\n", (2266, 2295), True, 'import numpy as np\n')] |
# Author: <NAME>(ICSRL)
# Created: 4/14/2020, 7:15 AM
# Email: <EMAIL>
import tensorflow as tf
import numpy as np
from network.loss_functions import huber_loss, mse_loss
from network.network import *
from numpy import linalg as LA
class initialize_network_DeepQLearning():
def __init__(self, cfg, name, vehicle_name):
self.g = tf.Graph()
self.vehicle_name = vehicle_name
self.first_frame = True
self.last_frame = []
with self.g.as_default():
stat_writer_path = cfg.network_path + self.vehicle_name + '/return_plot/'
loss_writer_path = cfg.network_path + self.vehicle_name + '/loss' + name + '/'
self.stat_writer = tf.summary.FileWriter(stat_writer_path)
# name_array = 'D:/train/loss'+'/'+name
self.loss_writer = tf.summary.FileWriter(loss_writer_path)
self.env_type = cfg.env_type
self.input_size = cfg.input_size
self.num_actions = cfg.num_actions
# Placeholders
self.batch_size = tf.placeholder(tf.int32, shape=())
self.learning_rate = tf.placeholder(tf.float32, shape=())
self.X1 = tf.placeholder(tf.float32, [None, cfg.input_size, cfg.input_size, 3], name='States')
# self.X = tf.image.resize_images(self.X1, (227, 227))
self.X = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), self.X1)
self.target = tf.placeholder(tf.float32, shape=[None], name='Qvals')
self.actions = tf.placeholder(tf.int32, shape=[None], name='Actions')
# self.model = AlexNetDuel(self.X, cfg.num_actions, cfg.train_fc)
self.model = C3F2(self.X, cfg.num_actions, cfg.train_fc)
self.predict = self.model.output
ind = tf.one_hot(self.actions, cfg.num_actions)
pred_Q = tf.reduce_sum(tf.multiply(self.model.output, ind), axis=1)
self.loss = huber_loss(pred_Q, self.target)
self.train = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.99).minimize(
self.loss, name="train")
self.sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
self.saver = tf.train.Saver()
self.all_vars = tf.trainable_variables()
self.sess.graph.finalize()
# Load custom weights from custom_load_path if required
if cfg.custom_load:
print('Loading weights from: ', cfg.custom_load_path)
self.load_network(cfg.custom_load_path)
def get_vars(self):
return self.sess.run(self.all_vars)
def initialize_graphs_with_average(self, agent, agent_on_same_network):
values = {}
var = {}
all_assign = {}
for name_agent in agent_on_same_network:
values[name_agent] = agent[name_agent].network_model.get_vars()
var[name_agent] = agent[name_agent].network_model.all_vars
all_assign[name_agent] = []
for i in range(len(values[name_agent])):
val = []
for name_agent in agent_on_same_network:
val.append(values[name_agent][i])
# Take mean here
mean_val = np.average(val, axis=0)
for name_agent in agent_on_same_network:
# all_assign[name_agent].append(tf.assign(var[name_agent][i], mean_val))
var[name_agent][i].load(mean_val, agent[name_agent].network_model.sess)
def Q_val(self, xs):
target = np.zeros(shape=[xs.shape[0]], dtype=np.float32)
actions = np.zeros(dtype=int, shape=[xs.shape[0]])
return self.sess.run(self.predict,
feed_dict={self.batch_size: xs.shape[0], self.learning_rate: 0, self.X1: xs,
self.target: target, self.actions: actions})
def train_n(self, xs, ys, actions, batch_size, dropout_rate, lr, epsilon, iter):
_, loss, Q = self.sess.run([self.train, self.loss, self.predict],
feed_dict={self.batch_size: batch_size, self.learning_rate: lr, self.X1: xs,
self.target: ys, self.actions: actions})
meanQ = np.mean(Q)
maxQ = np.max(Q)
# Log to tensorboard
self.log_to_tensorboard(tag='Loss', group=self.vehicle_name, value=LA.norm(loss) / batch_size, index=iter)
self.log_to_tensorboard(tag='Epsilon', group=self.vehicle_name, value=epsilon, index=iter)
self.log_to_tensorboard(tag='Learning Rate', group=self.vehicle_name, value=lr, index=iter)
self.log_to_tensorboard(tag='MeanQ', group=self.vehicle_name, value=meanQ, index=iter)
self.log_to_tensorboard(tag='MaxQ', group=self.vehicle_name, value=maxQ, index=iter)
def action_selection(self, state):
target = np.zeros(shape=[state.shape[0]], dtype=np.float32)
actions = np.zeros(dtype=int, shape=[state.shape[0]])
qvals = self.sess.run(self.predict,
feed_dict={self.batch_size: state.shape[0], self.learning_rate: 0.0001,
self.X1: state,
self.target: target, self.actions: actions})
if qvals.shape[0] > 1:
# Evaluating batch
action = np.argmax(qvals, axis=1)
else:
# Evaluating one sample
action = np.zeros(1)
action[0] = np.argmax(qvals)
return action.astype(int)
def log_to_tensorboard(self, tag, group, value, index):
summary = tf.Summary()
tag = group + '/' + tag
summary.value.add(tag=tag, simple_value=value)
self.stat_writer.add_summary(summary, index)
def save_network(self, save_path, episode=''):
save_path = save_path + self.vehicle_name + '/' + self.vehicle_name + '_' + str(episode)
self.saver.save(self.sess, save_path)
print('Model Saved: ', save_path)
def load_network(self, load_path):
self.saver.restore(self.sess, load_path)
def get_weights(self):
xs = np.zeros(shape=(32, 227, 227, 3))
actions = np.zeros(dtype=int, shape=[xs.shape[0]])
ys = np.zeros(shape=[xs.shape[0]], dtype=np.float32)
return self.sess.run(self.weights,
feed_dict={self.batch_size: xs.shape[0], self.learning_rate: 0,
self.X1: xs,
self.target: ys, self.actions: actions})
###########################################################################
# DeepREINFORCE: Class
###########################################################################
class initialize_network_DeepREINFORCE():
def __init__(self, cfg, name, vehicle_name):
self.g = tf.Graph()
self.vehicle_name = vehicle_name
self.iter_baseline = 0
self.iter_policy = 0
self.first_frame = True
self.last_frame = []
self.iter_combined = 0
with self.g.as_default():
stat_writer_path = cfg.network_path + self.vehicle_name + '/return_plot/'
loss_writer_path = cfg.network_path + self.vehicle_name + '/loss' + name + '/'
self.stat_writer = tf.summary.FileWriter(stat_writer_path)
# name_array = 'D:/train/loss'+'/'+name
self.loss_writer = tf.summary.FileWriter(loss_writer_path)
self.env_type = cfg.env_type
self.input_size = cfg.input_size
self.num_actions = cfg.num_actions
# Placeholders
self.batch_size = tf.placeholder(tf.int32, shape=())
self.learning_rate = tf.placeholder(tf.float32, shape=())
self.X1 = tf.placeholder(tf.float32, [None, cfg.input_size, cfg.input_size, 3], name='States')
# self.X = tf.image.resize_images(self.X1, (227, 227))
self.X = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), self.X1)
# self.target = tf.placeholder(tf.float32, shape=[None], name='action_probs')
# self.target_baseline = tf.placeholder(tf.float32, shape=[None], name='baseline')
self.actions = tf.placeholder(tf.int32, shape=[None, 1], name='Actions')
self.G = tf.placeholder(tf.float32, shape=[None, 1], name='G')
self.B = tf.placeholder(tf.float32, shape=[None, 1], name='B')
# Select the deep network
self.model = C3F2_REINFORCE_with_baseline(self.X, cfg.num_actions, cfg.train_fc)
self.predict = self.model.output
self.baseline = self.model.baseline
self.ind = tf.one_hot(tf.squeeze(self.actions), cfg.num_actions)
self.prob_action = tf.reduce_sum(tf.multiply(self.predict, self.ind), axis=1)
loss_policy = tf.reduce_mean(tf.log(tf.transpose([self.prob_action])) * (self.G - self.B))
loss_entropy = -tf.reduce_mean(tf.multiply((tf.log(self.predict) + 1e-8), self.predict))
self.loss_main = -loss_policy - .2 * loss_entropy
self.loss_branch = mse_loss(self.baseline, self.G)
self.train_main = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.99).minimize(
self.loss_main, name="train_main")
self.train_branch = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9,
beta2=0.99).minimize(
self.loss_branch, name="train_branch")
# self.train_combined = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9,
# beta2=0.99).minimize(
# self.loss_combined, name="train_combined")
self.sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
self.saver = tf.train.Saver()
self.all_vars = tf.trainable_variables()
self.sess.graph.finalize()
# Load custom weights from custom_load_path if required
if cfg.custom_load:
print('Loading weights from: ', cfg.custom_load_path)
self.load_network(cfg.custom_load_path)
def get_vars(self):
return self.sess.run(self.all_vars)
def initialize_graphs_with_average(self, agent, agent_on_same_network):
values = {}
var = {}
all_assign = {}
for name_agent in agent_on_same_network:
values[name_agent] = agent[name_agent].network_model.get_vars()
var[name_agent] = agent[name_agent].network_model.all_vars
all_assign[name_agent] = []
for i in range(len(values[name_agent])):
val = []
for name_agent in agent_on_same_network:
val.append(values[name_agent][i])
# Take mean here
mean_val = np.average(val, axis=0)
for name_agent in agent_on_same_network:
# all_assign[name_agent].append(tf.assign(var[name_agent][i], mean_val))
var[name_agent][i].load(mean_val, agent[name_agent].network_model.sess)
def prob_actions(self, xs):
G = np.zeros(shape=[1], dtype=np.float32)
B = np.zeros(shape=[1], dtype=np.float32)
actions = np.zeros(dtype=int, shape=[xs.shape[0]])
return self.sess.run(self.predict,
feed_dict={self.batch_size: xs.shape[0], self.learning_rate: 0, self.X1: xs,
self.actions: actions,
self.B: B,
self.G: G})
def train_baseline(self, xs, G, actions, lr, iter):
self.iter_baseline += 1
batch_size = xs.shape[0]
B = np.zeros(shape=[xs.shape[0], 1], dtype=np.float32)
_, loss, baseline_val = self.sess.run([self.train_branch, self.loss_branch, self.baseline],
feed_dict={self.batch_size: xs.shape[0], self.learning_rate: lr,
self.X1: xs,
self.actions: actions,
self.B: B,
self.G: G})
max_baseline = np.max(baseline_val)
# Log to tensorboard
self.log_to_tensorboard(tag='Loss_Baseline', group=self.vehicle_name, value=loss / batch_size,
index=self.iter_baseline)
# self.log_to_tensorboard(tag='Epsilon', group=self.vehicle_name, value=epsilon, index=iter)
self.log_to_tensorboard(tag='Learning Rate', group=self.vehicle_name, value=lr, index=self.iter_baseline)
# self.log_to_tensorboard(tag='MeanQ', group=self.vehicle_name, value=meanQ, index=iter)
self.log_to_tensorboard(tag='Max_baseline', group=self.vehicle_name, value=max_baseline,
index=self.iter_baseline)
return baseline_val
def get_baseline(self, xs):
lr = 0
actions = np.zeros(dtype=int, shape=[xs.shape[0], 1])
B = np.zeros(shape=[xs.shape[0], 1], dtype=np.float32)
G = np.zeros(shape=[xs.shape[0], 1], dtype=np.float32)
baseline = self.sess.run(self.baseline,
feed_dict={self.batch_size: xs.shape[0], self.learning_rate: lr,
self.X1: xs,
self.actions: actions,
self.B: B,
self.G: G})
return baseline
def train_policy(self, xs, actions, B, G, lr, iter):
self.iter_policy += 1
batch_size = xs.shape[0]
train_eval = self.train_main
loss_eval = self.loss_main
predict_eval = self.predict
_, loss, ProbActions = self.sess.run([train_eval, loss_eval, predict_eval],
feed_dict={self.batch_size: xs.shape[0], self.learning_rate: lr,
self.X1: xs,
self.actions: actions,
self.B: B,
self.G: G})
MaxProbActions = np.max(ProbActions)
# Log to tensorboard
self.log_to_tensorboard(tag='Loss_Policy', group=self.vehicle_name, value=LA.norm(loss) / batch_size,
index=self.iter_policy)
self.log_to_tensorboard(tag='Learning Rate', group=self.vehicle_name, value=lr, index=self.iter_policy)
self.log_to_tensorboard(tag='MaxProb', group=self.vehicle_name, value=MaxProbActions, index=self.iter_policy)
def action_selection(self, state):
action = np.zeros(dtype=int, shape=[state.shape[0], 1])
probs = self.sess.run(self.predict,
feed_dict={self.batch_size: state.shape[0], self.learning_rate: 0.0001,
self.X1: state,
self.actions: action})
for j in range(probs.shape[0]):
action[j] = np.random.choice(self.num_actions, 1, p=probs[j])[0]
return action.astype(int)
def log_to_tensorboard(self, tag, group, value, index):
summary = tf.Summary()
tag = group + '/' + tag
summary.value.add(tag=tag, simple_value=value)
self.stat_writer.add_summary(summary, index)
def save_network(self, save_path, episode=''):
save_path = save_path + self.vehicle_name + '/' + self.vehicle_name + '_' + str(episode)
self.saver.save(self.sess, save_path)
print('Model Saved: ', save_path)
def load_network(self, load_path):
self.saver.restore(self.sess, load_path)
| [
"tensorflow.local_variables_initializer",
"tensorflow.transpose",
"tensorflow.multiply",
"numpy.linalg.norm",
"network.loss_functions.mse_loss",
"tensorflow.log",
"tensorflow.Graph",
"numpy.mean",
"tensorflow.placeholder",
"numpy.max",
"tensorflow.trainable_variables",
"tensorflow.train.AdamOp... | [((341, 351), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (349, 351), True, 'import tensorflow as tf\n'), ((3605, 3652), 'numpy.zeros', 'np.zeros', ([], {'shape': '[xs.shape[0]]', 'dtype': 'np.float32'}), '(shape=[xs.shape[0]], dtype=np.float32)\n', (3613, 3652), True, 'import numpy as np\n'), ((3671, 3711), 'numpy.zeros', 'np.zeros', ([], {'dtype': 'int', 'shape': '[xs.shape[0]]'}), '(dtype=int, shape=[xs.shape[0]])\n', (3679, 3711), True, 'import numpy as np\n'), ((4322, 4332), 'numpy.mean', 'np.mean', (['Q'], {}), '(Q)\n', (4329, 4332), True, 'import numpy as np\n'), ((4348, 4357), 'numpy.max', 'np.max', (['Q'], {}), '(Q)\n', (4354, 4357), True, 'import numpy as np\n'), ((4947, 4997), 'numpy.zeros', 'np.zeros', ([], {'shape': '[state.shape[0]]', 'dtype': 'np.float32'}), '(shape=[state.shape[0]], dtype=np.float32)\n', (4955, 4997), True, 'import numpy as np\n'), ((5016, 5059), 'numpy.zeros', 'np.zeros', ([], {'dtype': 'int', 'shape': '[state.shape[0]]'}), '(dtype=int, shape=[state.shape[0]])\n', (5024, 5059), True, 'import numpy as np\n'), ((5697, 5709), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (5707, 5709), True, 'import tensorflow as tf\n'), ((6220, 6253), 'numpy.zeros', 'np.zeros', ([], {'shape': '(32, 227, 227, 3)'}), '(shape=(32, 227, 227, 3))\n', (6228, 6253), True, 'import numpy as np\n'), ((6272, 6312), 'numpy.zeros', 'np.zeros', ([], {'dtype': 'int', 'shape': '[xs.shape[0]]'}), '(dtype=int, shape=[xs.shape[0]])\n', (6280, 6312), True, 'import numpy as np\n'), ((6326, 6373), 'numpy.zeros', 'np.zeros', ([], {'shape': '[xs.shape[0]]', 'dtype': 'np.float32'}), '(shape=[xs.shape[0]], dtype=np.float32)\n', (6334, 6373), True, 'import numpy as np\n'), ((6932, 6942), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6940, 6942), True, 'import tensorflow as tf\n'), ((11370, 11407), 'numpy.zeros', 'np.zeros', ([], {'shape': '[1]', 'dtype': 'np.float32'}), '(shape=[1], dtype=np.float32)\n', (11378, 11407), True, 'import numpy as np\n'), ((11420, 11457), 'numpy.zeros', 'np.zeros', ([], {'shape': '[1]', 'dtype': 'np.float32'}), '(shape=[1], dtype=np.float32)\n', (11428, 11457), True, 'import numpy as np\n'), ((11476, 11516), 'numpy.zeros', 'np.zeros', ([], {'dtype': 'int', 'shape': '[xs.shape[0]]'}), '(dtype=int, shape=[xs.shape[0]])\n', (11484, 11516), True, 'import numpy as np\n'), ((11966, 12016), 'numpy.zeros', 'np.zeros', ([], {'shape': '[xs.shape[0], 1]', 'dtype': 'np.float32'}), '(shape=[xs.shape[0], 1], dtype=np.float32)\n', (11974, 12016), True, 'import numpy as np\n'), ((12540, 12560), 'numpy.max', 'np.max', (['baseline_val'], {}), '(baseline_val)\n', (12546, 12560), True, 'import numpy as np\n'), ((13313, 13356), 'numpy.zeros', 'np.zeros', ([], {'dtype': 'int', 'shape': '[xs.shape[0], 1]'}), '(dtype=int, shape=[xs.shape[0], 1])\n', (13321, 13356), True, 'import numpy as np\n'), ((13369, 13419), 'numpy.zeros', 'np.zeros', ([], {'shape': '[xs.shape[0], 1]', 'dtype': 'np.float32'}), '(shape=[xs.shape[0], 1], dtype=np.float32)\n', (13377, 13419), True, 'import numpy as np\n'), ((13432, 13482), 'numpy.zeros', 'np.zeros', ([], {'shape': '[xs.shape[0], 1]', 'dtype': 'np.float32'}), '(shape=[xs.shape[0], 1], dtype=np.float32)\n', (13440, 13482), True, 'import numpy as np\n'), ((14622, 14641), 'numpy.max', 'np.max', (['ProbActions'], {}), '(ProbActions)\n', (14628, 14641), True, 'import numpy as np\n'), ((15124, 15170), 'numpy.zeros', 'np.zeros', ([], {'dtype': 'int', 'shape': '[state.shape[0], 1]'}), '(dtype=int, shape=[state.shape[0], 1])\n', (15132, 15170), True, 'import numpy as np\n'), ((15670, 15682), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (15680, 15682), True, 'import tensorflow as tf\n'), ((697, 736), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['stat_writer_path'], {}), '(stat_writer_path)\n', (718, 736), True, 'import tensorflow as tf\n'), ((820, 859), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['loss_writer_path'], {}), '(loss_writer_path)\n', (841, 859), True, 'import tensorflow as tf\n'), ((1051, 1085), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '()'}), '(tf.int32, shape=())\n', (1065, 1085), True, 'import tensorflow as tf\n'), ((1119, 1155), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '()'}), '(tf.float32, shape=())\n', (1133, 1155), True, 'import tensorflow as tf\n'), ((1178, 1267), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, cfg.input_size, cfg.input_size, 3]'], {'name': '"""States"""'}), "(tf.float32, [None, cfg.input_size, cfg.input_size, 3], name=\n 'States')\n", (1192, 1267), True, 'import tensorflow as tf\n'), ((1455, 1509), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None]', 'name': '"""Qvals"""'}), "(tf.float32, shape=[None], name='Qvals')\n", (1469, 1509), True, 'import tensorflow as tf\n'), ((1537, 1591), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]', 'name': '"""Actions"""'}), "(tf.int32, shape=[None], name='Actions')\n", (1551, 1591), True, 'import tensorflow as tf\n'), ((1804, 1845), 'tensorflow.one_hot', 'tf.one_hot', (['self.actions', 'cfg.num_actions'], {}), '(self.actions, cfg.num_actions)\n', (1814, 1845), True, 'import tensorflow as tf\n'), ((1950, 1981), 'network.loss_functions.huber_loss', 'huber_loss', (['pred_Q', 'self.target'], {}), '(pred_Q, self.target)\n', (1960, 1981), False, 'from network.loss_functions import huber_loss, mse_loss\n'), ((2163, 2186), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (2184, 2186), True, 'import tensorflow as tf\n'), ((2315, 2331), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2329, 2331), True, 'import tensorflow as tf\n'), ((2360, 2384), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2382, 2384), True, 'import tensorflow as tf\n'), ((3307, 3330), 'numpy.average', 'np.average', (['val'], {'axis': '(0)'}), '(val, axis=0)\n', (3317, 3330), True, 'import numpy as np\n'), ((5433, 5457), 'numpy.argmax', 'np.argmax', (['qvals'], {'axis': '(1)'}), '(qvals, axis=1)\n', (5442, 5457), True, 'import numpy as np\n'), ((5529, 5540), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (5537, 5540), True, 'import numpy as np\n'), ((5565, 5581), 'numpy.argmax', 'np.argmax', (['qvals'], {}), '(qvals)\n', (5574, 5581), True, 'import numpy as np\n'), ((7378, 7417), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['stat_writer_path'], {}), '(stat_writer_path)\n', (7399, 7417), True, 'import tensorflow as tf\n'), ((7501, 7540), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['loss_writer_path'], {}), '(loss_writer_path)\n', (7522, 7540), True, 'import tensorflow as tf\n'), ((7732, 7766), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '()'}), '(tf.int32, shape=())\n', (7746, 7766), True, 'import tensorflow as tf\n'), ((7800, 7836), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '()'}), '(tf.float32, shape=())\n', (7814, 7836), True, 'import tensorflow as tf\n'), ((7859, 7948), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, cfg.input_size, cfg.input_size, 3]'], {'name': '"""States"""'}), "(tf.float32, [None, cfg.input_size, cfg.input_size, 3], name=\n 'States')\n", (7873, 7948), True, 'import tensorflow as tf\n'), ((8322, 8379), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, 1]', 'name': '"""Actions"""'}), "(tf.int32, shape=[None, 1], name='Actions')\n", (8336, 8379), True, 'import tensorflow as tf\n'), ((8401, 8454), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 1]', 'name': '"""G"""'}), "(tf.float32, shape=[None, 1], name='G')\n", (8415, 8454), True, 'import tensorflow as tf\n'), ((8476, 8529), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 1]', 'name': '"""B"""'}), "(tf.float32, shape=[None, 1], name='B')\n", (8490, 8529), True, 'import tensorflow as tf\n'), ((9222, 9253), 'network.loss_functions.mse_loss', 'mse_loss', (['self.baseline', 'self.G'], {}), '(self.baseline, self.G)\n', (9230, 9253), False, 'from network.loss_functions import huber_loss, mse_loss\n'), ((9929, 9952), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (9950, 9952), True, 'import tensorflow as tf\n'), ((10081, 10097), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (10095, 10097), True, 'import tensorflow as tf\n'), ((10126, 10150), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (10148, 10150), True, 'import tensorflow as tf\n'), ((11071, 11094), 'numpy.average', 'np.average', (['val'], {'axis': '(0)'}), '(val, axis=0)\n', (11081, 11094), True, 'import numpy as np\n'), ((1881, 1916), 'tensorflow.multiply', 'tf.multiply', (['self.model.output', 'ind'], {}), '(self.model.output, ind)\n', (1892, 1916), True, 'import tensorflow as tf\n'), ((8790, 8814), 'tensorflow.squeeze', 'tf.squeeze', (['self.actions'], {}), '(self.actions)\n', (8800, 8814), True, 'import tensorflow as tf\n'), ((8878, 8913), 'tensorflow.multiply', 'tf.multiply', (['self.predict', 'self.ind'], {}), '(self.predict, self.ind)\n', (8889, 8913), True, 'import tensorflow as tf\n'), ((15503, 15552), 'numpy.random.choice', 'np.random.choice', (['self.num_actions', '(1)'], {'p': 'probs[j]'}), '(self.num_actions, 1, p=probs[j])\n', (15519, 15552), True, 'import numpy as np\n'), ((1377, 1418), 'tensorflow.image.per_image_standardization', 'tf.image.per_image_standardization', (['frame'], {}), '(frame)\n', (1411, 1418), True, 'import tensorflow as tf\n'), ((2007, 2086), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate', 'beta1': '(0.9)', 'beta2': '(0.99)'}), '(learning_rate=self.learning_rate, beta1=0.9, beta2=0.99)\n', (2029, 2086), True, 'import tensorflow as tf\n'), ((2199, 2232), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2230, 2232), True, 'import tensorflow as tf\n'), ((2251, 2283), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (2281, 2283), True, 'import tensorflow as tf\n'), ((4462, 4475), 'numpy.linalg.norm', 'LA.norm', (['loss'], {}), '(loss)\n', (4469, 4475), True, 'from numpy import linalg as LA\n'), ((8058, 8099), 'tensorflow.image.per_image_standardization', 'tf.image.per_image_standardization', (['frame'], {}), '(frame)\n', (8092, 8099), True, 'import tensorflow as tf\n'), ((9285, 9364), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate', 'beta1': '(0.9)', 'beta2': '(0.99)'}), '(learning_rate=self.learning_rate, beta1=0.9, beta2=0.99)\n', (9307, 9364), True, 'import tensorflow as tf\n'), ((9459, 9538), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate', 'beta1': '(0.9)', 'beta2': '(0.99)'}), '(learning_rate=self.learning_rate, beta1=0.9, beta2=0.99)\n', (9481, 9538), True, 'import tensorflow as tf\n'), ((9965, 9998), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9996, 9998), True, 'import tensorflow as tf\n'), ((10017, 10049), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (10047, 10049), True, 'import tensorflow as tf\n'), ((14753, 14766), 'numpy.linalg.norm', 'LA.norm', (['loss'], {}), '(loss)\n', (14760, 14766), True, 'from numpy import linalg as LA\n'), ((8972, 9004), 'tensorflow.transpose', 'tf.transpose', (['[self.prob_action]'], {}), '([self.prob_action])\n', (8984, 9004), True, 'import tensorflow as tf\n'), ((9083, 9103), 'tensorflow.log', 'tf.log', (['self.predict'], {}), '(self.predict)\n', (9089, 9103), True, 'import tensorflow as tf\n')] |
#
# From https://github.com/rguthrie3/BiLSTM-CRF/blob/master/model.py
#
import dynet
import numpy as np
class CRF():
def __init__(self, model, id_to_tag):
self.id_to_tag = id_to_tag
self.tag_to_id = {tag: id for id, tag in list(id_to_tag.items())}
self.n_tags = len(self.id_to_tag)
self.b_id = len(self.tag_to_id)
self.e_id = len(self.tag_to_id) + 1
self.transitions = model.add_lookup_parameters((self.n_tags+2,
self.n_tags+2),
name="transitions")
def score_sentence(self, observations, tags):
assert len(observations) == len(tags)
score_seq = [0]
score = dynet.scalarInput(0)
tags = [self.b_id] + tags
for i, obs in enumerate(observations):
# print self.b_id
# print self.e_id
# print obs.value()
# print tags
# print self.transitions
# print self.transitions[tags[i+1]].value()
score = score \
+ dynet.pick(self.transitions[tags[i + 1]], tags[i])\
+ dynet.pick(obs, tags[i + 1])
score_seq.append(score.value())
score = score + dynet.pick(self.transitions[self.e_id], tags[-1])
return score
def viterbi_loss(self, observations, tags):
observations = [dynet.concatenate([obs, dynet.inputVector([-1e10, -1e10])], d=0) for obs in
observations]
viterbi_tags, viterbi_score = self.viterbi_decoding(observations)
if viterbi_tags != tags:
gold_score = self.score_sentence(observations, tags)
return (viterbi_score - gold_score), viterbi_tags
else:
return dynet.scalarInput(0), viterbi_tags
def neg_log_loss(self, observations, tags):
observations = [dynet.concatenate([obs, dynet.inputVector([-1e10, -1e10])], d=0) for obs in observations]
gold_score = self.score_sentence(observations, tags)
forward_score = self.forward(observations)
return forward_score - gold_score
def forward(self, observations):
def log_sum_exp(scores):
npval = scores.npvalue()
argmax_score = np.argmax(npval)
max_score_expr = dynet.pick(scores, argmax_score)
max_score_expr_broadcast = dynet.concatenate([max_score_expr] * (self.n_tags+2))
return max_score_expr + dynet.log(
dynet.sum_dim(dynet.transpose(dynet.exp(scores - max_score_expr_broadcast)), [1]))
init_alphas = [-1e10] * (self.n_tags + 2)
init_alphas[self.b_id] = 0
for_expr = dynet.inputVector(init_alphas)
for idx, obs in enumerate(observations):
# print "obs: ", obs.value()
alphas_t = []
for next_tag in range(self.n_tags+2):
obs_broadcast = dynet.concatenate([dynet.pick(obs, next_tag)] * (self.n_tags + 2))
# print "for_expr: ", for_expr.value()
# print "transitions next_tag: ", self.transitions[next_tag].value()
# print "obs_broadcast: ", obs_broadcast.value()
next_tag_expr = for_expr + self.transitions[next_tag] + obs_broadcast
alphas_t.append(log_sum_exp(next_tag_expr))
for_expr = dynet.concatenate(alphas_t)
terminal_expr = for_expr + self.transitions[self.e_id]
alpha = log_sum_exp(terminal_expr)
return alpha
def viterbi_decoding(self, observations):
backpointers = []
init_vvars = [-1e10] * (self.n_tags + 2)
init_vvars[self.b_id] = 0 # <Start> has all the probability
for_expr = dynet.inputVector(init_vvars)
trans_exprs = [self.transitions[idx] for idx in range(self.n_tags + 2)]
for obs in observations:
bptrs_t = []
vvars_t = []
for next_tag in range(self.n_tags + 2):
next_tag_expr = for_expr + trans_exprs[next_tag]
next_tag_arr = next_tag_expr.npvalue()
best_tag_id = np.argmax(next_tag_arr)
bptrs_t.append(best_tag_id)
vvars_t.append(dynet.pick(next_tag_expr, best_tag_id))
for_expr = dynet.concatenate(vvars_t) + obs
backpointers.append(bptrs_t)
# Perform final transition to terminal
terminal_expr = for_expr + trans_exprs[self.e_id]
terminal_arr = terminal_expr.npvalue()
best_tag_id = np.argmax(terminal_arr)
path_score = dynet.pick(terminal_expr, best_tag_id)
# Reverse over the backpointers to get the best path
best_path = [best_tag_id] # Start with the tag that was best for terminal
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
start = best_path.pop() # Remove the start symbol
best_path.reverse()
assert start == self.b_id
# Return best path and best path's score
return best_path, path_score | [
"dynet.scalarInput",
"dynet.exp",
"numpy.argmax",
"dynet.pick",
"dynet.inputVector",
"dynet.concatenate"
] | [((741, 761), 'dynet.scalarInput', 'dynet.scalarInput', (['(0)'], {}), '(0)\n', (758, 761), False, 'import dynet\n'), ((2709, 2739), 'dynet.inputVector', 'dynet.inputVector', (['init_alphas'], {}), '(init_alphas)\n', (2726, 2739), False, 'import dynet\n'), ((3746, 3775), 'dynet.inputVector', 'dynet.inputVector', (['init_vvars'], {}), '(init_vvars)\n', (3763, 3775), False, 'import dynet\n'), ((4551, 4574), 'numpy.argmax', 'np.argmax', (['terminal_arr'], {}), '(terminal_arr)\n', (4560, 4574), True, 'import numpy as np\n'), ((4596, 4634), 'dynet.pick', 'dynet.pick', (['terminal_expr', 'best_tag_id'], {}), '(terminal_expr, best_tag_id)\n', (4606, 4634), False, 'import dynet\n'), ((1274, 1323), 'dynet.pick', 'dynet.pick', (['self.transitions[self.e_id]', 'tags[-1]'], {}), '(self.transitions[self.e_id], tags[-1])\n', (1284, 1323), False, 'import dynet\n'), ((2286, 2302), 'numpy.argmax', 'np.argmax', (['npval'], {}), '(npval)\n', (2295, 2302), True, 'import numpy as np\n'), ((2332, 2364), 'dynet.pick', 'dynet.pick', (['scores', 'argmax_score'], {}), '(scores, argmax_score)\n', (2342, 2364), False, 'import dynet\n'), ((2404, 2459), 'dynet.concatenate', 'dynet.concatenate', (['([max_score_expr] * (self.n_tags + 2))'], {}), '([max_score_expr] * (self.n_tags + 2))\n', (2421, 2459), False, 'import dynet\n'), ((3380, 3407), 'dynet.concatenate', 'dynet.concatenate', (['alphas_t'], {}), '(alphas_t)\n', (3397, 3407), False, 'import dynet\n'), ((1177, 1205), 'dynet.pick', 'dynet.pick', (['obs', 'tags[i + 1]'], {}), '(obs, tags[i + 1])\n', (1187, 1205), False, 'import dynet\n'), ((1799, 1819), 'dynet.scalarInput', 'dynet.scalarInput', (['(0)'], {}), '(0)\n', (1816, 1819), False, 'import dynet\n'), ((4141, 4164), 'numpy.argmax', 'np.argmax', (['next_tag_arr'], {}), '(next_tag_arr)\n', (4150, 4164), True, 'import numpy as np\n'), ((4303, 4329), 'dynet.concatenate', 'dynet.concatenate', (['vvars_t'], {}), '(vvars_t)\n', (4320, 4329), False, 'import dynet\n'), ((1103, 1153), 'dynet.pick', 'dynet.pick', (['self.transitions[tags[i + 1]]', 'tags[i]'], {}), '(self.transitions[tags[i + 1]], tags[i])\n', (1113, 1153), False, 'import dynet\n'), ((1442, 1493), 'dynet.inputVector', 'dynet.inputVector', (['[-10000000000.0, -10000000000.0]'], {}), '([-10000000000.0, -10000000000.0])\n', (1459, 1493), False, 'import dynet\n'), ((1931, 1982), 'dynet.inputVector', 'dynet.inputVector', (['[-10000000000.0, -10000000000.0]'], {}), '([-10000000000.0, -10000000000.0])\n', (1948, 1982), False, 'import dynet\n'), ((4240, 4278), 'dynet.pick', 'dynet.pick', (['next_tag_expr', 'best_tag_id'], {}), '(next_tag_expr, best_tag_id)\n', (4250, 4278), False, 'import dynet\n'), ((2551, 2595), 'dynet.exp', 'dynet.exp', (['(scores - max_score_expr_broadcast)'], {}), '(scores - max_score_expr_broadcast)\n', (2560, 2595), False, 'import dynet\n'), ((2957, 2982), 'dynet.pick', 'dynet.pick', (['obs', 'next_tag'], {}), '(obs, next_tag)\n', (2967, 2982), False, 'import dynet\n')] |
import numpy as np
shape = tuple(map(int,input().strip().split()))
zeros = np.zeros(shape,dtype=np.int32)
ones = np.ones(shape,dtype=np.int32)
print(zeros)
print(ones)
| [
"numpy.zeros",
"numpy.ones"
] | [((75, 106), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.int32'}), '(shape, dtype=np.int32)\n', (83, 106), True, 'import numpy as np\n'), ((113, 143), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.int32'}), '(shape, dtype=np.int32)\n', (120, 143), True, 'import numpy as np\n')] |
import pyconll
import urllib
import numpy as np
from pathlib import Path
from keras.preprocessing.sequence import pad_sequences
'''
Universal Dependencies Treebank Dataset
https://universaldependencies.org
'''
def read_conllu(path):
data = pyconll.load_from_file(path)
tagged_sentences = list()
t = 0
for sentence in data:
tagged_sentence = list()
for token in sentence:
if token.upos and token.form:
t += 1
tagged_sentence.append((token.form.lower(), token.upos))
tagged_sentences.append(tagged_sentence)
return tagged_sentences
def tag_sequence(sentences):
return [[t for w, t in sentence] for sentence in sentences]
def text_sequence(sentences):
return [[w for w, t in sentence] for sentence in sentences]
def sentence_split(sentences, max_len):
new_sentence = list()
for data in sentences:
new_sentence.append(([data[x:x + max_len] for x in range(0, len(data), max_len)]))
new_sentence = [val for sublist in new_sentence for val in sublist]
return new_sentence
def convert_ner_format(text, label, file):
with open(file, 'w') as f:
words = 0
i = 0
for zip_i in zip(text, label):
a, b = tuple(zip_i)
for r in range(len(a)):
item = a[r]+' '+b[r]
f.write("%s\n" % item)
words += 1
f.write("\n")
i += 1
#if i==3: break
print('Sentences:', i, 'Words:', words)
def to_categorical(sequences, categories):
cat_sequences = []
for s in sequences:
cats = []
for item in s:
cats.append(np.zeros(categories))
cats[-1][item] = 1.0
cat_sequences.append(cats)
return np.array(cat_sequences)
def load_udtb_dataset():
# Download and load the dataset
UDTB_FOLDER = '/tank/local/ruiliu/dataset/ud_treebank'
udtb_folder = Path(UDTB_FOLDER)
if not udtb_folder.exists():
udtb_folder.mkdir(parents=True, exist_ok=True)
UD_ENGLISH_TRAIN = 'en_partut-ud-train.conllu'
UD_ENGLISH_DEV = 'en_partut-ud-dev.conllu'
UD_ENGLISH_TEST = 'en_partut-ud-test.conllu'
ud_train_file = Path(UDTB_FOLDER + '/' + UD_ENGLISH_TRAIN)
if not ud_train_file.exists():
urllib.request.urlretrieve('http://archive.aueb.gr:8085/files/en_partut-ud-train.conllu',
UDTB_FOLDER + '/' + UD_ENGLISH_TRAIN)
ud_dev_file = Path(UDTB_FOLDER + '/' + UD_ENGLISH_DEV)
if not ud_dev_file.exists():
urllib.request.urlretrieve('http://archive.aueb.gr:8085/files/en_partut-ud-dev.conllu',
UDTB_FOLDER + '/' + UD_ENGLISH_DEV)
ud_test_file = Path(UDTB_FOLDER + '/' + UD_ENGLISH_TEST)
if not ud_test_file.exists():
urllib.request.urlretrieve('http://archive.aueb.gr:8085/files/en_partut-ud-test.conllu',
UDTB_FOLDER + '/' + UD_ENGLISH_TEST)
train_sentences = read_conllu(UDTB_FOLDER + '/' + UD_ENGLISH_TRAIN)
val_sentences = read_conllu(UDTB_FOLDER + '/' + UD_ENGLISH_DEV)
# read the train and eval text
train_text = text_sequence(train_sentences)
val_text = text_sequence(val_sentences)
train_label = tag_sequence(train_sentences)
val_label = tag_sequence(val_sentences)
# build dictionary with tag vocabulary
words, tags = set([]), set([])
for s in train_text:
for w in s:
words.add(w.lower())
for ts in train_label:
for t in ts:
tags.add(t)
word2index = {w: i + 2 for i, w in enumerate(list(words))}
word2index['-PAD-'] = 0
word2index['-OOV-'] = 1
tag2index = {t: i + 1 for i, t in enumerate(list(tags))}
tag2index['-PAD-'] = 0
# prepare the training data
train_sentences_x, val_sentences_x, train_tags_y, val_tags_y = [], [], [], []
for s in train_text:
s_int = []
for w in s:
try:
s_int.append(word2index[w.lower()])
except KeyError:
s_int.append(word2index['-OOV-'])
train_sentences_x.append(s_int)
for s in val_text:
s_int = []
for w in s:
try:
s_int.append(word2index[w.lower()])
except KeyError:
s_int.append(word2index['-OOV-'])
val_sentences_x.append(s_int)
for s in train_label:
train_tags_y.append([tag2index[t] for t in s])
for s in val_label:
val_tags_y.append([tag2index[t] for t in s])
MAX_LENGTH = len(max(train_sentences_x, key=len))
train_sentences_x = pad_sequences(train_sentences_x, maxlen=MAX_LENGTH, padding='post')
val_sentences_x = pad_sequences(val_sentences_x, maxlen=MAX_LENGTH, padding='post')
train_tags_y = pad_sequences(train_tags_y, maxlen=MAX_LENGTH, padding='post')
val_tags_y = pad_sequences(val_tags_y, maxlen=MAX_LENGTH, padding='post')
return train_sentences_x, val_sentences_x, train_tags_y, val_tags_y, MAX_LENGTH, word2index, tag2index
| [
"pyconll.load_from_file",
"urllib.request.urlretrieve",
"pathlib.Path",
"numpy.array",
"numpy.zeros",
"keras.preprocessing.sequence.pad_sequences"
] | [((247, 275), 'pyconll.load_from_file', 'pyconll.load_from_file', (['path'], {}), '(path)\n', (269, 275), False, 'import pyconll\n'), ((1787, 1810), 'numpy.array', 'np.array', (['cat_sequences'], {}), '(cat_sequences)\n', (1795, 1810), True, 'import numpy as np\n'), ((1952, 1969), 'pathlib.Path', 'Path', (['UDTB_FOLDER'], {}), '(UDTB_FOLDER)\n', (1956, 1969), False, 'from pathlib import Path\n'), ((2227, 2269), 'pathlib.Path', 'Path', (["(UDTB_FOLDER + '/' + UD_ENGLISH_TRAIN)"], {}), "(UDTB_FOLDER + '/' + UD_ENGLISH_TRAIN)\n", (2231, 2269), False, 'from pathlib import Path\n'), ((2495, 2535), 'pathlib.Path', 'Path', (["(UDTB_FOLDER + '/' + UD_ENGLISH_DEV)"], {}), "(UDTB_FOLDER + '/' + UD_ENGLISH_DEV)\n", (2499, 2535), False, 'from pathlib import Path\n'), ((2756, 2797), 'pathlib.Path', 'Path', (["(UDTB_FOLDER + '/' + UD_ENGLISH_TEST)"], {}), "(UDTB_FOLDER + '/' + UD_ENGLISH_TEST)\n", (2760, 2797), False, 'from pathlib import Path\n'), ((4654, 4721), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['train_sentences_x'], {'maxlen': 'MAX_LENGTH', 'padding': '"""post"""'}), "(train_sentences_x, maxlen=MAX_LENGTH, padding='post')\n", (4667, 4721), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((4744, 4809), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['val_sentences_x'], {'maxlen': 'MAX_LENGTH', 'padding': '"""post"""'}), "(val_sentences_x, maxlen=MAX_LENGTH, padding='post')\n", (4757, 4809), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((4829, 4891), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['train_tags_y'], {'maxlen': 'MAX_LENGTH', 'padding': '"""post"""'}), "(train_tags_y, maxlen=MAX_LENGTH, padding='post')\n", (4842, 4891), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((4909, 4969), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['val_tags_y'], {'maxlen': 'MAX_LENGTH', 'padding': '"""post"""'}), "(val_tags_y, maxlen=MAX_LENGTH, padding='post')\n", (4922, 4969), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((2313, 2450), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['"""http://archive.aueb.gr:8085/files/en_partut-ud-train.conllu"""', "(UDTB_FOLDER + '/' + UD_ENGLISH_TRAIN)"], {}), "(\n 'http://archive.aueb.gr:8085/files/en_partut-ud-train.conllu', \n UDTB_FOLDER + '/' + UD_ENGLISH_TRAIN)\n", (2339, 2450), False, 'import urllib\n'), ((2577, 2710), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['"""http://archive.aueb.gr:8085/files/en_partut-ud-dev.conllu"""', "(UDTB_FOLDER + '/' + UD_ENGLISH_DEV)"], {}), "(\n 'http://archive.aueb.gr:8085/files/en_partut-ud-dev.conllu', \n UDTB_FOLDER + '/' + UD_ENGLISH_DEV)\n", (2603, 2710), False, 'import urllib\n'), ((2840, 2975), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['"""http://archive.aueb.gr:8085/files/en_partut-ud-test.conllu"""', "(UDTB_FOLDER + '/' + UD_ENGLISH_TEST)"], {}), "(\n 'http://archive.aueb.gr:8085/files/en_partut-ud-test.conllu', \n UDTB_FOLDER + '/' + UD_ENGLISH_TEST)\n", (2866, 2975), False, 'import urllib\n'), ((1686, 1706), 'numpy.zeros', 'np.zeros', (['categories'], {}), '(categories)\n', (1694, 1706), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import rospy, sys
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2, os, struct
from sensor_msgs.msg import CompressedImage
import numpy as np
class PngWriter:
def __init__(self):
self.save_root = ""
if rospy.has_param("save_root"):
self.save_root = rospy.get_param("save_root")
rospy.loginfo("Save to directory: {}".format(self.save_root))
# File format
self.format_string = "frame%s.png"
if rospy.has_param("~filename_format"):
self.format_string = rospy.get_param("~filename_format")
rospy.loginfo("Using format: {}".format(self.format_string))
self.compressed = False
if rospy.has_param("~compressed"):
self.compressed = rospy.get_param("~compressed")
rospy.loginfo("Using compressed image messages: {}".format(self.compressed))
if self.compressed:
self.image_sub = rospy.Subscriber("/image", CompressedImage, self.callback)
else:
# Input
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/image", Image, self.callback)
rospy.loginfo("Writer initialized")
def callback(self, img_msg):
stamp = img_msg.header.stamp
time_str = '%d.%06d' % (stamp.secs, stamp.nsecs)
filepath = os.path.join(self.save_root, (self.format_string % time_str))
# Convert ros image to cv matrix using same encoding
if self.compressed:
cv_image = self.compressedDepthDecode(img_msg)
else:
try:
if rospy.has_param("~format"):
format = rospy.get_param("~format")
else:
format = 'mono16'
cv_image = self.bridge.imgmsg_to_cv2(img_msg, desired_encoding=format)
except CvBridgeError as e:
print(e)
return
# Save as png image
cv2.imwrite(filepath, cv_image)
rospy.logdebug("Saved {}".format(filepath))
def compressedDepthDecode(self, img_msg):
# Code from https://answers.ros.org/question/249775/display-compresseddepth-image-python-cv2/
# 'msg' as type CompressedImage
depth_fmt, compr_type = img_msg.format.split(';')
# remove white space
depth_fmt = depth_fmt.strip()
compr_type = compr_type.strip()
if compr_type != "compressedDepth":
raise Exception("Compression type is not 'compressedDepth'."
"You probably subscribed to the wrong topic.")
# remove header from raw data
depth_header_size = 12
raw_data = img_msg.data[depth_header_size:]
depth_img_raw = cv2.imdecode(np.fromstring(raw_data, np.uint8), cv2.IMREAD_UNCHANGED)
if depth_img_raw is None:
# probably wrong header size
raise Exception("Could not decode compressed depth image."
"You may need to change 'depth_header_size'!")
if depth_fmt == "16UC1":
# write raw image data
return depth_img_raw
elif depth_fmt == "32FC1":
raw_header = img_msg.data[:depth_header_size]
# header: int, float, float
[compfmt, depthQuantA, depthQuantB] = struct.unpack('iff', raw_header)
depth_img_scaled = depthQuantA / (depth_img_raw.astype(np.float32) - depthQuantB)
# filter max values
depth_img_scaled[depth_img_raw == 0] = 0
# depth_img_scaled provides distance in meters as f32
# for storing it as png, we need to convert it to 16UC1 again (depth in mm)
depth_img_mm = (depth_img_scaled * 1000).astype(np.uint16)
return depth_img_mm
else:
raise Exception("Decoding of '" + depth_fmt + "' is not implemented!")
def main(args):
rospy.init_node('writer', anonymous=True)
node = PngWriter()
try:
rospy.spin()
except KeyboardInterrupt:
rospy.loginfo("Shutting down")
if __name__ == '__main__':
main(sys.argv) | [
"cv2.imwrite",
"rospy.init_node",
"rospy.get_param",
"rospy.has_param",
"os.path.join",
"numpy.fromstring",
"cv_bridge.CvBridge",
"struct.unpack",
"rospy.spin",
"rospy.Subscriber",
"rospy.loginfo"
] | [((3950, 3991), 'rospy.init_node', 'rospy.init_node', (['"""writer"""'], {'anonymous': '(True)'}), "('writer', anonymous=True)\n", (3965, 3991), False, 'import rospy, sys\n'), ((292, 320), 'rospy.has_param', 'rospy.has_param', (['"""save_root"""'], {}), "('save_root')\n", (307, 320), False, 'import rospy, sys\n'), ((531, 566), 'rospy.has_param', 'rospy.has_param', (['"""~filename_format"""'], {}), "('~filename_format')\n", (546, 566), False, 'import rospy, sys\n'), ((753, 783), 'rospy.has_param', 'rospy.has_param', (['"""~compressed"""'], {}), "('~compressed')\n", (768, 783), False, 'import rospy, sys\n'), ((1209, 1244), 'rospy.loginfo', 'rospy.loginfo', (['"""Writer initialized"""'], {}), "('Writer initialized')\n", (1222, 1244), False, 'import rospy, sys\n'), ((1392, 1451), 'os.path.join', 'os.path.join', (['self.save_root', '(self.format_string % time_str)'], {}), '(self.save_root, self.format_string % time_str)\n', (1404, 1451), False, 'import cv2, os, struct\n'), ((2008, 2039), 'cv2.imwrite', 'cv2.imwrite', (['filepath', 'cv_image'], {}), '(filepath, cv_image)\n', (2019, 2039), False, 'import cv2, os, struct\n'), ((4032, 4044), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (4042, 4044), False, 'import rospy, sys\n'), ((351, 379), 'rospy.get_param', 'rospy.get_param', (['"""save_root"""'], {}), "('save_root')\n", (366, 379), False, 'import rospy, sys\n'), ((601, 636), 'rospy.get_param', 'rospy.get_param', (['"""~filename_format"""'], {}), "('~filename_format')\n", (616, 636), False, 'import rospy, sys\n'), ((815, 845), 'rospy.get_param', 'rospy.get_param', (['"""~compressed"""'], {}), "('~compressed')\n", (830, 845), False, 'import rospy, sys\n'), ((993, 1051), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/image"""', 'CompressedImage', 'self.callback'], {}), "('/image', CompressedImage, self.callback)\n", (1009, 1051), False, 'import rospy, sys\n'), ((1112, 1122), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (1120, 1122), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((1152, 1200), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/image"""', 'Image', 'self.callback'], {}), "('/image', Image, self.callback)\n", (1168, 1200), False, 'import rospy, sys\n'), ((2799, 2832), 'numpy.fromstring', 'np.fromstring', (['raw_data', 'np.uint8'], {}), '(raw_data, np.uint8)\n', (2812, 2832), True, 'import numpy as np\n'), ((4083, 4113), 'rospy.loginfo', 'rospy.loginfo', (['"""Shutting down"""'], {}), "('Shutting down')\n", (4096, 4113), False, 'import rospy, sys\n'), ((1653, 1679), 'rospy.has_param', 'rospy.has_param', (['"""~format"""'], {}), "('~format')\n", (1668, 1679), False, 'import rospy, sys\n'), ((3362, 3394), 'struct.unpack', 'struct.unpack', (['"""iff"""', 'raw_header'], {}), "('iff', raw_header)\n", (3375, 3394), False, 'import cv2, os, struct\n'), ((1710, 1736), 'rospy.get_param', 'rospy.get_param', (['"""~format"""'], {}), "('~format')\n", (1725, 1736), False, 'import rospy, sys\n')] |
"""CLI for optimal subgroups based on individual preferences"""
from argparse import ArgumentParser
import pandas as pd
import numpy as np
from pulp import LpProblem, LpMinimize, LpVariable, lpSum
NAN_VALUE = 10000 # Arbitrarily high value to force optimization away from empty choices
def optimize():
"""Run the optimizer"""
args = get_args()
rawdata = pd.read_excel(args.file_path, 'Sheet1', index_col=None, na_values=['NA'])
prob = LpProblem("Optimal 10x Grouping", LpMinimize)
alternatives = list(rawdata.columns[1:]) # Column labels
persons = list(rawdata['Persons'])
cost_matrix = rawdata.drop('Persons', 'columns').to_numpy() # Numpy array: [person][class]
(n_persons, n_alternatives) = cost_matrix.shape
costs = [NAN_VALUE if np.isnan(x) else x for x in cost_matrix.flatten()]
# Create binary LpVariables for every person/alternative combination
choices = []
make_name = make_var_name_factory(persons, alternatives)
for i in range(n_persons):
choices.append([LpVariable(make_name(i, j), cat='Binary') for j in range(n_alternatives)])
# Create binary LpVariables tracking if a group has any members
has_membership = [LpVariable(f"{alternatives[j]}", cat='Binary') for j in range(n_alternatives)]
# Add constraints
# See https://cs.stackexchange.com/questions/12102/express-boolean-logic-operations-in-zero-one-
# integer-linear-programming-ilp
# for a good explanation of logical operations (AND, OR, etc.) via linear constraints
for i in range(n_persons):
prob += sum(choices[i]) == 1 # Only one result per person
for j in range(n_alternatives):
# If the sum of every choice for the alternative is 0, has_membership must be 0
prob += has_membership[j] <= sum([choices[i][j] for i in range(n_persons)])
for i in range(n_persons):
prob += has_membership[j] >= choices[i][j] # has_membership is 1 if any choice is 1
# If a group has any members, enforce a minimum number
prob += sum([choices[i][j] for i in range(n_persons)]) >= args.min * has_membership[j]
# Define and calculate the optimization
choices_full = np.array(choices).flatten()
prob += lpSum([choices_full[i] * costs[i] for i in range(len(choices_full))])
prob.solve()
display_results(
optimized_value=prob.objective.value(),
persons=persons,
alternatives=alternatives,
choices=choices,
cost_matrix=cost_matrix,
)
def get_args():
"""Parse CLI arguments"""
parser = ArgumentParser(description='Sort a group into optimal preference-based subgroups.')
parser.add_argument('min', type=int, help='Min group size if group has > 0')
parser.add_argument('file_path', help='Path to input file with "Persons" in first column')
return parser.parse_args()
def make_var_name_factory(person_list, alternative_list):
"""Create an LpVariable name factory that accepts indexes"""
def name_factory(i, j):
person = person_list[i]
alternative = alternative_list[j]
return f"Choice:{person},{alternative}"
return name_factory
def display_results(optimized_value, persons, alternatives, choices, cost_matrix):
"""Print results to the console"""
(n_persons, n_alternatives) = cost_matrix.shape
perfect_value = n_persons
worst_value = sum([np.nanmax(x) for x in cost_matrix])
score = 100 - round(100 * (optimized_value - perfect_value) / (worst_value - perfect_value), 0)
print(f"Optimization score (0-100): {score}")
print(f"Achieved: {optimized_value}")
print(f"Perfect: {perfect_value}")
print(f"Worst: {worst_value}")
for j in range(n_alternatives):
selected = [persons[i] for i in range(n_persons) if choices[i][j].varValue == 1.0]
if selected:
print(f"\n{alternatives[j]}:")
for person in selected:
print(f" {person}")
if __name__ == "__main__":
optimize()
| [
"pulp.LpProblem",
"argparse.ArgumentParser",
"numpy.array",
"numpy.isnan",
"numpy.nanmax",
"pandas.read_excel",
"pulp.LpVariable"
] | [((369, 442), 'pandas.read_excel', 'pd.read_excel', (['args.file_path', '"""Sheet1"""'], {'index_col': 'None', 'na_values': "['NA']"}), "(args.file_path, 'Sheet1', index_col=None, na_values=['NA'])\n", (382, 442), True, 'import pandas as pd\n'), ((454, 499), 'pulp.LpProblem', 'LpProblem', (['"""Optimal 10x Grouping"""', 'LpMinimize'], {}), "('Optimal 10x Grouping', LpMinimize)\n", (463, 499), False, 'from pulp import LpProblem, LpMinimize, LpVariable, lpSum\n'), ((2580, 2668), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Sort a group into optimal preference-based subgroups."""'}), "(description=\n 'Sort a group into optimal preference-based subgroups.')\n", (2594, 2668), False, 'from argparse import ArgumentParser\n'), ((1201, 1247), 'pulp.LpVariable', 'LpVariable', (['f"""{alternatives[j]}"""'], {'cat': '"""Binary"""'}), "(f'{alternatives[j]}', cat='Binary')\n", (1211, 1247), False, 'from pulp import LpProblem, LpMinimize, LpVariable, lpSum\n'), ((775, 786), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (783, 786), True, 'import numpy as np\n'), ((2198, 2215), 'numpy.array', 'np.array', (['choices'], {}), '(choices)\n', (2206, 2215), True, 'import numpy as np\n'), ((3400, 3412), 'numpy.nanmax', 'np.nanmax', (['x'], {}), '(x)\n', (3409, 3412), True, 'import numpy as np\n')] |
import os
import numpy as np
import xarray as xr
import pytest
import intake_io
from .fixtures import *
def test_round_trip_uncompressed(tmp_path):
fpath = os.path.join(tmp_path, "uncompressed.klb")
if os.path.exists(fpath):
os.remove(fpath)
for img0, shape, axes, spacing, units in random_images():
if "i" in axes:
continue
try:
assert not os.path.exists(fpath)
intake_io.imsave(img0, fpath, compress=False)
assert os.path.exists(fpath)
with intake_io.source.KlbSource(fpath) as src:
img1 = intake_io.imload(src)["image"]
assert axes == intake_io.get_axes(img1)
assert shape == img1.shape
_spacing = [1.0 if i is None else i for i in spacing]
np.testing.assert_array_almost_equal(_spacing, intake_io.get_spacing(img1))
# assert units == intake_io.get_spacing_units(img1)
assert np.mean(img0.data) not in (0, 1)
assert np.mean(img0.data) == np.mean(img1.data)
finally:
if os.path.exists(fpath):
os.remove(fpath)
def test_round_trip_compressed(tmp_path):
fpaths = {
False: os.path.join(tmp_path, "uncompressed.klb"),
True: os.path.join(tmp_path, "compressed.klb")
}
for fpath in fpaths.values():
if os.path.exists(fpath):
os.remove(fpath)
for img0 in ramp_images():
if "i" in intake_io.get_axes(img0):
continue
try:
for compress, fpath in fpaths.items():
if os.path.exists(fpath):
os.remove(fpath)
assert not os.path.exists(fpath)
intake_io.imsave(img0, fpath, compress=compress)
assert os.path.exists(fpath)
with intake_io.source.KlbSource(fpath) as src:
img1 = intake_io.imload(src)["image"]
img2 = intake_io.imload(fpath)["image"]
assert img0.shape == img1.shape == img2.shape
assert img0.dims == img1.dims == img2.dims
assert np.mean(img0.data) not in (0, 1)
assert np.mean(img0.data) == np.mean(img1.data) == np.mean(img2.data)
assert os.path.getsize(fpaths[True]) <= 0.9 * os.path.getsize(fpaths[False])
finally:
for fpath in fpaths.values():
if os.path.exists(fpath):
os.remove(fpath)
| [
"os.path.exists",
"numpy.mean",
"os.path.getsize",
"intake_io.source.KlbSource",
"os.path.join",
"intake_io.get_spacing",
"intake_io.get_axes",
"intake_io.imload",
"intake_io.imsave",
"os.remove"
] | [((162, 204), 'os.path.join', 'os.path.join', (['tmp_path', '"""uncompressed.klb"""'], {}), "(tmp_path, 'uncompressed.klb')\n", (174, 204), False, 'import os\n'), ((212, 233), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (226, 233), False, 'import os\n'), ((243, 259), 'os.remove', 'os.remove', (['fpath'], {}), '(fpath)\n', (252, 259), False, 'import os\n'), ((1235, 1277), 'os.path.join', 'os.path.join', (['tmp_path', '"""uncompressed.klb"""'], {}), "(tmp_path, 'uncompressed.klb')\n", (1247, 1277), False, 'import os\n'), ((1293, 1333), 'os.path.join', 'os.path.join', (['tmp_path', '"""compressed.klb"""'], {}), "(tmp_path, 'compressed.klb')\n", (1305, 1333), False, 'import os\n'), ((1385, 1406), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (1399, 1406), False, 'import os\n'), ((438, 483), 'intake_io.imsave', 'intake_io.imsave', (['img0', 'fpath'], {'compress': '(False)'}), '(img0, fpath, compress=False)\n', (454, 483), False, 'import intake_io\n'), ((503, 524), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (517, 524), False, 'import os\n'), ((1105, 1126), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (1119, 1126), False, 'import os\n'), ((1420, 1436), 'os.remove', 'os.remove', (['fpath'], {}), '(fpath)\n', (1429, 1436), False, 'import os\n'), ((1487, 1511), 'intake_io.get_axes', 'intake_io.get_axes', (['img0'], {}), '(img0)\n', (1505, 1511), False, 'import intake_io\n'), ((404, 425), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (418, 425), False, 'import os\n'), ((543, 576), 'intake_io.source.KlbSource', 'intake_io.source.KlbSource', (['fpath'], {}), '(fpath)\n', (569, 576), False, 'import intake_io\n'), ((679, 703), 'intake_io.get_axes', 'intake_io.get_axes', (['img1'], {}), '(img1)\n', (697, 703), False, 'import intake_io\n'), ((868, 895), 'intake_io.get_spacing', 'intake_io.get_spacing', (['img1'], {}), '(img1)\n', (889, 895), False, 'import intake_io\n'), ((980, 998), 'numpy.mean', 'np.mean', (['img0.data'], {}), '(img0.data)\n', (987, 998), True, 'import numpy as np\n'), ((1032, 1050), 'numpy.mean', 'np.mean', (['img0.data'], {}), '(img0.data)\n', (1039, 1050), True, 'import numpy as np\n'), ((1054, 1072), 'numpy.mean', 'np.mean', (['img1.data'], {}), '(img1.data)\n', (1061, 1072), True, 'import numpy as np\n'), ((1144, 1160), 'os.remove', 'os.remove', (['fpath'], {}), '(fpath)\n', (1153, 1160), False, 'import os\n'), ((1617, 1638), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (1631, 1638), False, 'import os\n'), ((1743, 1791), 'intake_io.imsave', 'intake_io.imsave', (['img0', 'fpath'], {'compress': 'compress'}), '(img0, fpath, compress=compress)\n', (1759, 1791), False, 'import intake_io\n'), ((1815, 1836), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (1829, 1836), False, 'import os\n'), ((2315, 2344), 'os.path.getsize', 'os.path.getsize', (['fpaths[True]'], {}), '(fpaths[True])\n', (2330, 2344), False, 'import os\n'), ((2463, 2484), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (2477, 2484), False, 'import os\n'), ((608, 629), 'intake_io.imload', 'intake_io.imload', (['src'], {}), '(src)\n', (624, 629), False, 'import intake_io\n'), ((1660, 1676), 'os.remove', 'os.remove', (['fpath'], {}), '(fpath)\n', (1669, 1676), False, 'import os\n'), ((1705, 1726), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (1719, 1726), False, 'import os\n'), ((1859, 1892), 'intake_io.source.KlbSource', 'intake_io.source.KlbSource', (['fpath'], {}), '(fpath)\n', (1885, 1892), False, 'import intake_io\n'), ((1982, 2005), 'intake_io.imload', 'intake_io.imload', (['fpath'], {}), '(fpath)\n', (1998, 2005), False, 'import intake_io\n'), ((2176, 2194), 'numpy.mean', 'np.mean', (['img0.data'], {}), '(img0.data)\n', (2183, 2194), True, 'import numpy as np\n'), ((2232, 2250), 'numpy.mean', 'np.mean', (['img0.data'], {}), '(img0.data)\n', (2239, 2250), True, 'import numpy as np\n'), ((2254, 2272), 'numpy.mean', 'np.mean', (['img1.data'], {}), '(img1.data)\n', (2261, 2272), True, 'import numpy as np\n'), ((2276, 2294), 'numpy.mean', 'np.mean', (['img2.data'], {}), '(img2.data)\n', (2283, 2294), True, 'import numpy as np\n'), ((2354, 2384), 'os.path.getsize', 'os.path.getsize', (['fpaths[False]'], {}), '(fpaths[False])\n', (2369, 2384), False, 'import os\n'), ((2506, 2522), 'os.remove', 'os.remove', (['fpath'], {}), '(fpath)\n', (2515, 2522), False, 'import os\n'), ((1928, 1949), 'intake_io.imload', 'intake_io.imload', (['src'], {}), '(src)\n', (1944, 1949), False, 'import intake_io\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : memory.py
# @Author: zixiao
# @Date : 2019-04-01
# @Desc :
import numpy as np
class SumTree(object):
data_index = 0
def __init__(self, size, frame_len, w, h):
self.data_size = size
self.tree_size = 2 * size - 1
self.tree = np.zeros(self.tree_size,dtype=np.float32)
self.data_obs = np.zeros((size, w, h), dtype=np.uint8)
self.data_reward = np.zeros(size, dtype=np.float32)
self.data_action = np.zeros(size, dtype=np.uint8)
self.frame_len = frame_len
self.num_data = 0
self.data_count = 0
def add(self, tree_point, action, reward, obs_):
tree_index = self.data_index + self.data_size - 1
self.data_action[self.data_index] = action
self.data_reward[self.data_index] = reward
self.data_index = int((self.data_index + 1) % self.data_size)
self.data_obs[self.data_index] = obs_
self.update(tree_index, tree_point)
self.data_count += 1
self.num_data = min(self.data_size, self.data_count)
def update(self, tree_index, pointer):
change = pointer - self.tree[tree_index]
self.tree[tree_index] = pointer
while tree_index != 0:
tree_index = (tree_index - 1) // 2
self.tree[tree_index] += change
@property
def total_weight(self):
return self.tree[0]
def get_leaf(self, value):
parent_index = 0
while True:
left_index = 2 * parent_index + 1
right_index = left_index + 1
if left_index >= self.tree_size:
break
else:
if value <= self.tree[left_index]:
parent_index = left_index
else:
value -= self.tree[left_index]
parent_index = right_index
leaf_index = parent_index
# if leaf_index == 0:
# leaf_index = 1
data_index = leaf_index - (self.data_size - 1)
# data_index_ = (data_index + 1) % self.data_size
obs_frame, obs_frame_ = self.get_frame(data_index)
return leaf_index, self.tree[leaf_index], obs_frame, self.data_action[data_index], \
self.data_reward[data_index], obs_frame_
def store_obs(self, obs):
self.data_obs[self.data_index] = obs
def get_last_frame(self):
start = self.data_index - self.frame_len + 1
end = self.data_index
if start < 0:
start += self.num_data
obs_frame = np.concatenate((self.data_obs[start:self.num_data],
self.data_obs[0:end + 1]))
else:
obs_frame = self.data_obs[start:end + 1]
return obs_frame
def get_frame(self, data_index):
obs_start = data_index - self.frame_len + 1
obs_end = data_index
obs_start_ = int((data_index + 1) % self.num_data)
obs_end_ = obs_start_ + self.frame_len - 1
if obs_start < 0:
obs_start += self.num_data
obs_frame = np.concatenate((self.data_obs[obs_start:self.num_data], self.data_obs[0:obs_end + 1]))
else:
obs_frame = self.data_obs[obs_start:obs_end + 1]
if obs_end_ >= self.num_data:
obs_end_ -= self.num_data
obs_frame_ = np.concatenate((self.data_obs[obs_start_:self.num_data], self.data_obs[0:obs_end_ + 1]))
else:
obs_frame_ = self.data_obs[obs_start_:obs_end_ + 1]
# if obs_frame.shape[0] != self.frame_len or obs_frame_.shape[0] != self.frame_len:
# print('\r --------', obs_start, obs_end, obs_start_, obs_end_)
return obs_frame, obs_frame_
class Memory(object):
epsilon = 0.01
alpha = 0.6
beta = 0.4
beta_increment_per_sampling = 0.001
abs_err_upper = 1
def __init__(self, size, frame_len, w, h):
self.size = size
self.frame_len = frame_len
self.w = w
self.h = h
self.tree = SumTree(size=self.size, frame_len=self.frame_len, w=self.w, h=self.h)
def store_transition(self, action, reward, obs_):
max_leaf_weight = np.max(self.tree.tree[-self.tree.data_size:])
if max_leaf_weight == 0:
max_leaf_weight = self.abs_err_upper
self.tree.add(max_leaf_weight, action, reward, obs_)
def get_memory(self, batch_size):
batch_leaf_index = np.zeros(batch_size, dtype=np.int32)
batch_action = np.zeros(batch_size, dtype=np.uint8)
batch_reward = np.zeros(batch_size, dtype=np.float32)
batch_obs = np.zeros((batch_size, self.frame_len, self.w, self.h), dtype=np.uint8)
batch_obs_ = np.zeros((batch_size, self.frame_len, self.w, self.h), dtype=np.uint8)
IS_weights = np.zeros((batch_size, 1))
priority_segment = self.tree.total_weight / batch_size
print('total_weight: ', self.tree.total_weight)
self.beta = np.min([1, self.beta + self.beta_increment_per_sampling])
end = self.tree.data_size + self.tree.num_data - 1
min_probability = np.min(self.tree.tree[-self.tree.data_size:end]) / self.tree.total_weight
values = []
leafs = []
leaf_values = []
for i in range(batch_size):
low = priority_segment * i
high = priority_segment * (i + 1)
# print('low: ', low, 'high', high, 'priority_segment:', priority_segment,
# 'total_weight: ', self.tree.total_weight, 'min_probability: ', min_probability, 'end: ', end,
# 'data_size: ',
# self.tree.data_size, 'num_data: ', self.tree.num_data, )
value = np.random.uniform(low, high)
leaf_index, leaf_value, obs, action, reward, obs_ = self.tree.get_leaf(value)
probability = leaf_value / self.tree.total_weight
IS_weights[i, 0] = np.power(probability / min_probability, -self.beta)
batch_leaf_index[i] = leaf_index
values.append(value)
leafs.append(leaf_index)
leaf_values.append(leaf_value)
batch_obs[i] = obs
batch_obs_[i] = obs_
batch_action[i] = action
batch_reward[i] = reward
# print(values)
# print(leafs)
print(leaf_values)
return batch_leaf_index, IS_weights, batch_obs, batch_action, batch_reward, batch_obs_
def batch_update(self, tree_index, abs_errors):
abs_errors += self.epsilon
clipped_errors = np.minimum(abs_errors, self.abs_err_upper)
ps = np.power(clipped_errors, self.alpha)
for t_index, p in zip(tree_index, ps):
self.tree.update(t_index, p)
def store_frame(self, obs):
self.tree.store_obs(obs)
def get_last_frame(self):
return self.tree.get_last_frame()
| [
"numpy.minimum",
"numpy.power",
"numpy.max",
"numpy.zeros",
"numpy.random.uniform",
"numpy.concatenate",
"numpy.min"
] | [((317, 359), 'numpy.zeros', 'np.zeros', (['self.tree_size'], {'dtype': 'np.float32'}), '(self.tree_size, dtype=np.float32)\n', (325, 359), True, 'import numpy as np\n'), ((383, 421), 'numpy.zeros', 'np.zeros', (['(size, w, h)'], {'dtype': 'np.uint8'}), '((size, w, h), dtype=np.uint8)\n', (391, 421), True, 'import numpy as np\n'), ((449, 481), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (457, 481), True, 'import numpy as np\n'), ((509, 539), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.uint8'}), '(size, dtype=np.uint8)\n', (517, 539), True, 'import numpy as np\n'), ((4189, 4234), 'numpy.max', 'np.max', (['self.tree.tree[-self.tree.data_size:]'], {}), '(self.tree.tree[-self.tree.data_size:])\n', (4195, 4234), True, 'import numpy as np\n'), ((4444, 4480), 'numpy.zeros', 'np.zeros', (['batch_size'], {'dtype': 'np.int32'}), '(batch_size, dtype=np.int32)\n', (4452, 4480), True, 'import numpy as np\n'), ((4504, 4540), 'numpy.zeros', 'np.zeros', (['batch_size'], {'dtype': 'np.uint8'}), '(batch_size, dtype=np.uint8)\n', (4512, 4540), True, 'import numpy as np\n'), ((4564, 4602), 'numpy.zeros', 'np.zeros', (['batch_size'], {'dtype': 'np.float32'}), '(batch_size, dtype=np.float32)\n', (4572, 4602), True, 'import numpy as np\n'), ((4623, 4693), 'numpy.zeros', 'np.zeros', (['(batch_size, self.frame_len, self.w, self.h)'], {'dtype': 'np.uint8'}), '((batch_size, self.frame_len, self.w, self.h), dtype=np.uint8)\n', (4631, 4693), True, 'import numpy as np\n'), ((4715, 4785), 'numpy.zeros', 'np.zeros', (['(batch_size, self.frame_len, self.w, self.h)'], {'dtype': 'np.uint8'}), '((batch_size, self.frame_len, self.w, self.h), dtype=np.uint8)\n', (4723, 4785), True, 'import numpy as np\n'), ((4807, 4832), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (4815, 4832), True, 'import numpy as np\n'), ((4973, 5030), 'numpy.min', 'np.min', (['[1, self.beta + self.beta_increment_per_sampling]'], {}), '([1, self.beta + self.beta_increment_per_sampling])\n', (4979, 5030), True, 'import numpy as np\n'), ((6553, 6595), 'numpy.minimum', 'np.minimum', (['abs_errors', 'self.abs_err_upper'], {}), '(abs_errors, self.abs_err_upper)\n', (6563, 6595), True, 'import numpy as np\n'), ((6609, 6645), 'numpy.power', 'np.power', (['clipped_errors', 'self.alpha'], {}), '(clipped_errors, self.alpha)\n', (6617, 6645), True, 'import numpy as np\n'), ((2571, 2649), 'numpy.concatenate', 'np.concatenate', (['(self.data_obs[start:self.num_data], self.data_obs[0:end + 1])'], {}), '((self.data_obs[start:self.num_data], self.data_obs[0:end + 1]))\n', (2585, 2649), True, 'import numpy as np\n'), ((3100, 3191), 'numpy.concatenate', 'np.concatenate', (['(self.data_obs[obs_start:self.num_data], self.data_obs[0:obs_end + 1])'], {}), '((self.data_obs[obs_start:self.num_data], self.data_obs[0:\n obs_end + 1]))\n', (3114, 3191), True, 'import numpy as np\n'), ((3363, 3456), 'numpy.concatenate', 'np.concatenate', (['(self.data_obs[obs_start_:self.num_data], self.data_obs[0:obs_end_ + 1])'], {}), '((self.data_obs[obs_start_:self.num_data], self.data_obs[0:\n obs_end_ + 1]))\n', (3377, 3456), True, 'import numpy as np\n'), ((5116, 5164), 'numpy.min', 'np.min', (['self.tree.tree[-self.tree.data_size:end]'], {}), '(self.tree.tree[-self.tree.data_size:end])\n', (5122, 5164), True, 'import numpy as np\n'), ((5708, 5736), 'numpy.random.uniform', 'np.random.uniform', (['low', 'high'], {}), '(low, high)\n', (5725, 5736), True, 'import numpy as np\n'), ((5921, 5972), 'numpy.power', 'np.power', (['(probability / min_probability)', '(-self.beta)'], {}), '(probability / min_probability, -self.beta)\n', (5929, 5972), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Score the predictions with gold labels, using precision, recall and F1 metrics.
"""
import argparse
import sys, os
from collections import Counter
import numpy as np
from pathlib import Path
NO_RELATION = "no_relation"
def parse_arguments():
parser = argparse.ArgumentParser(description='Score a prediction file using the gold labels.')
parser.add_argument('gold_file', help='The gold relation file; one relation per line')
parser.add_argument(
'pred_file',
help='A prediction file; one relation per line, in the same order as the gold file.')
args = parser.parse_args()
return args
def score(key, prediction, verbose=False, NO_RELATION=NO_RELATION):
correct_by_relation = Counter()
guessed_by_relation = Counter()
gold_by_relation = Counter()
# Loop over the data to compute a score
for row in range(len(key)):
gold = key[row]
guess = prediction[row]
if gold == NO_RELATION and guess == NO_RELATION:
pass
elif gold == NO_RELATION and guess != NO_RELATION:
guessed_by_relation[guess] += 1
elif gold != NO_RELATION and guess == NO_RELATION:
gold_by_relation[gold] += 1
elif gold != NO_RELATION and guess != NO_RELATION:
guessed_by_relation[guess] += 1
gold_by_relation[gold] += 1
if gold == guess:
correct_by_relation[guess] += 1
# Print verbose information
if verbose:
print("Per-relation statistics:")
relations = gold_by_relation.keys()
longest_relation = 0
for relation in sorted(relations):
longest_relation = max(len(relation), longest_relation)
for relation in sorted(relations):
# (compute the score)
correct = correct_by_relation[relation]
guessed = guessed_by_relation[relation]
gold = gold_by_relation[relation]
prec = 1.0
if guessed > 0:
prec = float(correct) / float(guessed)
recall = 0.0
if gold > 0:
recall = float(correct) / float(gold)
f1 = 0.0
if prec + recall > 0:
f1 = 2.0 * prec * recall / (prec + recall)
# (print the score)
sys.stdout.write(("{:<" + str(longest_relation) + "}").format(relation))
sys.stdout.write(" P: ")
if prec < 0.1:
sys.stdout.write(' ')
if prec < 1.0:
sys.stdout.write(' ')
sys.stdout.write("{:.2%}".format(prec))
sys.stdout.write(" R: ")
if recall < 0.1:
sys.stdout.write(' ')
if recall < 1.0:
sys.stdout.write(' ')
sys.stdout.write("{:.2%}".format(recall))
sys.stdout.write(" F1: ")
if f1 < 0.1:
sys.stdout.write(' ')
if f1 < 1.0:
sys.stdout.write(' ')
sys.stdout.write("{:.2%}".format(f1))
sys.stdout.write(" #: %d" % gold)
sys.stdout.write("\n")
print("")
# Print the aggregate score
if verbose:
print("Final Score:")
prec_micro = 1.0
if sum(guessed_by_relation.values()) > 0:
prec_micro = float(sum(correct_by_relation.values())) / float(
sum(guessed_by_relation.values()))
recall_micro = 0.0
if sum(gold_by_relation.values()) > 0:
recall_micro = float(sum(correct_by_relation.values())) / float(
sum(gold_by_relation.values()))
f1_micro = 0.0
if prec_micro + recall_micro > 0.0:
f1_micro = 2.0 * prec_micro * recall_micro / (prec_micro + recall_micro)
print("SET NO_RELATION ID: ", NO_RELATION)
print("Precision (micro): {:.3%}".format(prec_micro))
print(" Recall (micro): {:.3%}".format(recall_micro))
print(" F1 (micro): {:.3%}".format(f1_micro))
return prec_micro, recall_micro, f1_micro
def AUC(logits, labels):
num_right = sum(labels)
num_total = len(labels)
num_total_pairs = (num_total - num_right) * num_right
if num_total_pairs == 0:
return 0.5
num_right_pairs = 0
hit_count = 0
for label in labels:
if label == 0:
num_right_pairs += hit_count
else:
hit_count += 1
return float(num_right_pairs) / num_total_pairs
def print_table(*args, header='',logger = None):
if logger is not None:
logger.log(header)
for tup in zip(*args):
logger.log('\t'.join(['%.4f' % t for t in tup]))
print(header)
for tup in zip(*args):
print('\t'.join(['%.4f' % t for t in tup]))
def result_summary(result_dir, dr=None, write_to_file=True):
prefix = 'dr' + str(dr[0]) + '_' + str(dr[1])
base_test_p = []
base_test_r = []
base_test_f1 = []
dev_p = []
dev_r = []
dev_f1 = []
test_p = []
test_r = []
test_f1 = []
for file_name in Path(result_dir).glob(prefix + '*.txt'): # each seed
with open(file_name) as f:
lines = f.readlines()
for j in range(len(lines) - 1, 0, -1):
if lines[j].startswith('Best dev and test F1'):
break
if j == 1:
print('Miss result in: ' + str(file_name))
continue
dev_f1_total = []
test_f1_total = []
for k in range(j + 1, len(lines)):
line = lines[k]
dev_f1_total.append(float(line.split('\t')[0].strip()))
test_f1_total.append(float(line.split('\t')[1].strip()))
i = dev_f1_total.index(max(dev_f1_total))
# get i: the best round
for j in range(len(lines) - 1, 0, -1):
if lines[j].startswith('Final evaluation #0 on test set'):
break
for k in range(j, len(lines)):
if lines[k].startswith('Precision'):
break
p = float(lines[k].split()[-1][:-1])
r = float(lines[k + 1].split()[-1][:-1])
f1 = float(lines[k + 2].split()[-1][:-1])
base_test_p += [p]
base_test_r += [r]
base_test_f1 += [f1]
for j in range(len(lines) - 1, 0, -1):
if lines[j].startswith('Final evaluation #' + str(i) + ' on dev set'):
break
for k in range(j, len(lines)):
if lines[k].startswith('Precision'):
break
p = float(lines[k].split()[-1][:-1])
r = float(lines[k + 1].split()[-1][:-1])
f1 = float(lines[k + 2].split()[-1][:-1])
dev_p += [p]
dev_r += [r]
dev_f1 += [f1]
for j in range(len(lines) - 1, 0, -1):
if lines[j].startswith('Final evaluation #' + str(i) + ' on test set'):
break
for k in range(j, len(lines)):
if lines[k].startswith('Precision'):
break
p = float(lines[k].split()[-1][:-1])
r = float(lines[k + 1].split()[-1][:-1])
f1 = float(lines[k + 2].split()[-1][:-1])
test_p += [p]
test_r += [r]
test_f1 += [f1]
if len(base_test_p) == 0:
return
base_mean_p, base_std_p = float(np.mean(base_test_p)), float(np.std(base_test_p))
base_mean_r, base_std_r = float(np.mean(base_test_r)), float(np.std(base_test_r))
base_mean_f1, base_std_f1 = float(np.mean(base_test_f1)), float(np.std(base_test_f1))
dev_mean_p, dev_std_p = float(np.mean(dev_p)), float(np.std(dev_p))
dev_mean_r, dev_std_r = float(np.mean(dev_r)), float(np.std(dev_r))
dev_mean_f1, dev_std_f1 = float(np.mean(dev_f1)), float(np.std(dev_f1))
test_mean_p, test_std_p = float(np.mean(test_p)), float(np.std(test_p))
test_mean_r, test_std_r = float(np.mean(test_r)), float(np.std(test_r))
test_mean_f1, test_std_f1 = float(np.mean(test_f1)), float(np.std(test_f1))
print('\n\n#####\t%s\t#####' % prefix)
print(len(base_test_p), 'seeds')
print('base: %.2f $\pm$ %.2f\t%.2f $\pm$ %.2f\t%.2f $\pm$ %.2f' %
(base_mean_p, base_std_p, base_mean_r, base_std_r, base_mean_f1, base_std_f1))
print('dev: %.2f $\pm$ %.2f\t%.2f $\pm$ %.2f\t%.2f $\pm$ %.2f' %
(dev_mean_p, dev_std_p, dev_mean_r, dev_std_r, dev_mean_f1, dev_std_f1))
print('test: %.2f $\pm$ %.2f\t%.2f $\pm$ %.2f\t%.2f $\pm$ %.2f' %
(test_mean_p, test_std_p, test_mean_r, test_std_r, test_mean_f1, test_std_f1))
if __name__ == "__main__":
data_name = sys.argv[1]
model_name = sys.argv[2]
data_ratio = (float(sys.argv[3]), float(sys.argv[4]))
result_summary(
'./results/' + data_name + '/' + model_name + '/', dr=data_ratio, write_to_file=False)
| [
"numpy.mean",
"argparse.ArgumentParser",
"pathlib.Path",
"collections.Counter",
"numpy.std",
"sys.stdout.write"
] | [((302, 392), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Score a prediction file using the gold labels."""'}), "(description=\n 'Score a prediction file using the gold labels.')\n", (325, 392), False, 'import argparse\n'), ((772, 781), 'collections.Counter', 'Counter', ([], {}), '()\n', (779, 781), False, 'from collections import Counter\n'), ((809, 818), 'collections.Counter', 'Counter', ([], {}), '()\n', (816, 818), False, 'from collections import Counter\n'), ((843, 852), 'collections.Counter', 'Counter', ([], {}), '()\n', (850, 852), False, 'from collections import Counter\n'), ((2481, 2506), 'sys.stdout.write', 'sys.stdout.write', (['""" P: """'], {}), "(' P: ')\n", (2497, 2506), False, 'import sys, os\n'), ((2707, 2732), 'sys.stdout.write', 'sys.stdout.write', (['""" R: """'], {}), "(' R: ')\n", (2723, 2732), False, 'import sys, os\n'), ((2939, 2965), 'sys.stdout.write', 'sys.stdout.write', (['""" F1: """'], {}), "(' F1: ')\n", (2955, 2965), False, 'import sys, os\n'), ((3160, 3194), 'sys.stdout.write', 'sys.stdout.write', (["(' #: %d' % gold)"], {}), "(' #: %d' % gold)\n", (3176, 3194), False, 'import sys, os\n'), ((3208, 3230), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (3224, 3230), False, 'import sys, os\n'), ((5170, 5186), 'pathlib.Path', 'Path', (['result_dir'], {}), '(result_dir)\n', (5174, 5186), False, 'from pathlib import Path\n'), ((7625, 7645), 'numpy.mean', 'np.mean', (['base_test_p'], {}), '(base_test_p)\n', (7632, 7645), True, 'import numpy as np\n'), ((7654, 7673), 'numpy.std', 'np.std', (['base_test_p'], {}), '(base_test_p)\n', (7660, 7673), True, 'import numpy as np\n'), ((7712, 7732), 'numpy.mean', 'np.mean', (['base_test_r'], {}), '(base_test_r)\n', (7719, 7732), True, 'import numpy as np\n'), ((7741, 7760), 'numpy.std', 'np.std', (['base_test_r'], {}), '(base_test_r)\n', (7747, 7760), True, 'import numpy as np\n'), ((7801, 7822), 'numpy.mean', 'np.mean', (['base_test_f1'], {}), '(base_test_f1)\n', (7808, 7822), True, 'import numpy as np\n'), ((7831, 7851), 'numpy.std', 'np.std', (['base_test_f1'], {}), '(base_test_f1)\n', (7837, 7851), True, 'import numpy as np\n'), ((7890, 7904), 'numpy.mean', 'np.mean', (['dev_p'], {}), '(dev_p)\n', (7897, 7904), True, 'import numpy as np\n'), ((7913, 7926), 'numpy.std', 'np.std', (['dev_p'], {}), '(dev_p)\n', (7919, 7926), True, 'import numpy as np\n'), ((7963, 7977), 'numpy.mean', 'np.mean', (['dev_r'], {}), '(dev_r)\n', (7970, 7977), True, 'import numpy as np\n'), ((7986, 7999), 'numpy.std', 'np.std', (['dev_r'], {}), '(dev_r)\n', (7992, 7999), True, 'import numpy as np\n'), ((8038, 8053), 'numpy.mean', 'np.mean', (['dev_f1'], {}), '(dev_f1)\n', (8045, 8053), True, 'import numpy as np\n'), ((8062, 8076), 'numpy.std', 'np.std', (['dev_f1'], {}), '(dev_f1)\n', (8068, 8076), True, 'import numpy as np\n'), ((8117, 8132), 'numpy.mean', 'np.mean', (['test_p'], {}), '(test_p)\n', (8124, 8132), True, 'import numpy as np\n'), ((8141, 8155), 'numpy.std', 'np.std', (['test_p'], {}), '(test_p)\n', (8147, 8155), True, 'import numpy as np\n'), ((8194, 8209), 'numpy.mean', 'np.mean', (['test_r'], {}), '(test_r)\n', (8201, 8209), True, 'import numpy as np\n'), ((8218, 8232), 'numpy.std', 'np.std', (['test_r'], {}), '(test_r)\n', (8224, 8232), True, 'import numpy as np\n'), ((8273, 8289), 'numpy.mean', 'np.mean', (['test_f1'], {}), '(test_f1)\n', (8280, 8289), True, 'import numpy as np\n'), ((8298, 8313), 'numpy.std', 'np.std', (['test_f1'], {}), '(test_f1)\n', (8304, 8313), True, 'import numpy as np\n'), ((2552, 2573), 'sys.stdout.write', 'sys.stdout.write', (['""" """'], {}), "(' ')\n", (2568, 2573), False, 'import sys, os\n'), ((2619, 2640), 'sys.stdout.write', 'sys.stdout.write', (['""" """'], {}), "(' ')\n", (2635, 2640), False, 'import sys, os\n'), ((2780, 2801), 'sys.stdout.write', 'sys.stdout.write', (['""" """'], {}), "(' ')\n", (2796, 2801), False, 'import sys, os\n'), ((2849, 2870), 'sys.stdout.write', 'sys.stdout.write', (['""" """'], {}), "(' ')\n", (2865, 2870), False, 'import sys, os\n'), ((3009, 3030), 'sys.stdout.write', 'sys.stdout.write', (['""" """'], {}), "(' ')\n", (3025, 3030), False, 'import sys, os\n'), ((3074, 3095), 'sys.stdout.write', 'sys.stdout.write', (['""" """'], {}), "(' ')\n", (3090, 3095), False, 'import sys, os\n')] |
import numpy as np
from skimage.morphology import white_tophat as skimage_white_tophat
from skimage.exposure import rescale_intensity
from skimage.restoration import richardson_lucy
def white_tophat(image, radius):
"""
"""
selem = np.ones((radius,)*image.ndim)
return skimage_white_tophat(image, selem=selem)
def rl_decon(image, psf, **kwargs):
"""
"""
# normalize image
norm_image = rescale_intensity(
image,
in_range=(0, image.max()),
out_range=(0, 1),
)
# run decon
return richardson_lucy(norm_image, psf, **kwargs)
def wiener_decon(image, psf, nsr=1e-3):
"""
"""
# normalize image
norm_image = rescale_intensity(
image,
in_range=(0, image.max()),
out_range=(0, 1),
)
# run decon
G = np.fft.fftn(norm_image)
H = np.fft.fftn(psf, s=norm_image.shape)
H = np.conj(H) / (np.abs(H)**2 + nsr)
return np.abs(np.fft.ifftn(G * H))
| [
"numpy.abs",
"skimage.morphology.white_tophat",
"numpy.ones",
"numpy.conj",
"numpy.fft.fftn",
"skimage.restoration.richardson_lucy",
"numpy.fft.ifftn"
] | [((246, 277), 'numpy.ones', 'np.ones', (['((radius,) * image.ndim)'], {}), '((radius,) * image.ndim)\n', (253, 277), True, 'import numpy as np\n'), ((287, 327), 'skimage.morphology.white_tophat', 'skimage_white_tophat', (['image'], {'selem': 'selem'}), '(image, selem=selem)\n', (307, 327), True, 'from skimage.morphology import white_tophat as skimage_white_tophat\n'), ((551, 593), 'skimage.restoration.richardson_lucy', 'richardson_lucy', (['norm_image', 'psf'], {}), '(norm_image, psf, **kwargs)\n', (566, 593), False, 'from skimage.restoration import richardson_lucy\n'), ((818, 841), 'numpy.fft.fftn', 'np.fft.fftn', (['norm_image'], {}), '(norm_image)\n', (829, 841), True, 'import numpy as np\n'), ((850, 886), 'numpy.fft.fftn', 'np.fft.fftn', (['psf'], {'s': 'norm_image.shape'}), '(psf, s=norm_image.shape)\n', (861, 886), True, 'import numpy as np\n'), ((895, 905), 'numpy.conj', 'np.conj', (['H'], {}), '(H)\n', (902, 905), True, 'import numpy as np\n'), ((948, 967), 'numpy.fft.ifftn', 'np.fft.ifftn', (['(G * H)'], {}), '(G * H)\n', (960, 967), True, 'import numpy as np\n'), ((909, 918), 'numpy.abs', 'np.abs', (['H'], {}), '(H)\n', (915, 918), True, 'import numpy as np\n')] |
import pandas as pd
import math
from pandas import read_csv, to_numeric
import numpy
from numpy import nan
import statistics as sta
#df = pd.read_csv('/Users/king/Documents/python/control2/athlete_events.csv')
fields = ['Height', 'Year', "Sport"]
df = pd.read_csv('/Users/king/Documents/python/control2/athlete_events.csv', skipinitialspace=True, usecols=fields)
def preprocesamientoDeDatos(df):
"""
DESCRIPCION:
Proceso de depurado: eliminacion de registros con datos faltantes
y conversion a numericos de las columnas admisibles.
INPUT:
@param df: tabla de datos
@type df: pandas.DataFrame
OUTPUT:
@param tabla: tabla de datos
@type tabla: pandas.DataFrame
"""
tabla = df.copy()
## Eliminemos los registros con datos faltantes
tabla.replace(to_replace='NA', value=nan, inplace=True)
tabla.replace(to_replace='?', value=nan, inplace=True)
tabla.dropna(inplace=True)
## Convertimos las columnas que son "numericas" a numericas
for col in tabla.columns:
try:
to_numeric(tabla[col])
except:
error ='Columna: {} no es convertible a numerico'.format(col)
else:
tabla[col] = to_numeric(tabla[col])
## Retornar tabla procesada
return tabla
tabla_limpia=preprocesamientoDeDatos(df)
# print('Total de Filas original:',df.shape[0])
# print('Total Filas sin NaN:',tabla_limpia.shape[0])
print("")
print("NOTA: Se han limpiado los campos con valores igual a NaN ","quedando de: ",df.shape[0], " a: ",tabla_limpia.shape[0])
print("")
def comparaVectores(a,b):
if len(a) != len(b):
return False
for item_a, item_b in zip(a, b):
if item_a > item_b:
return False
return True
def annosDeporte(disciplina1,disciplina2):
#Esta función entregará los años del deporte que exista en el archivo.
annosDeporte =tabla_limpia[tabla_limpia["Sport"]==disciplina1]
a = numpy.asarray(annosDeporte["Year"].unique())
print("/--------------------------------Años deporte "+disciplina1+" -----------------------------------/")
print(numpy.sort(a))
annosDeporte2 =tabla_limpia[tabla_limpia["Sport"]==disciplina2]
b = numpy.asarray(annosDeporte2["Year"].unique())
print()
print("/--------------------------------Años deporte "+disciplina2+" -----------------------------------/")
print(numpy.sort(b))
annosiguales =[]
contador=0
res = comparaVectores(a,b)
if (res == True):
print("Tienen los mismos años para poder comparar")
else:
for i in a:
valor1=i
for x in b:
valor2=x
if (valor1==valor2):
annosiguales.append(valor1)
contador=contador+1
print()
print("/--------------------------------Años a comparar son: -----------------------------------/")
print(numpy.sort(annosiguales))
print("")
return annosiguales
def nAynB(disciplina,anno):
#Esta función entrega el total de jugadores por Disciplina y año
tabla_deporte=tabla_limpia.loc[(tabla_limpia['Sport'] == disciplina) & (tabla_limpia['Year'] == anno),]
totalDeportistas=len(tabla_deporte)
return totalDeportistas
def alturaPromedio(disciplina,anno):
# print("El largo del vector es: ",str(len(vectorAnnos)))
# print("El vector es: ")
#print(numpy.sort(vectorAnnos))
tabla_deporte=tabla_limpia.loc[(tabla_limpia['Sport'] == disciplina) & (tabla_limpia['Year'] == anno),]
promedio = tabla_deporte["Height"]
return sta.mean(promedio)
def varianzaPromedio(disciplina,anno):
#Como dice el enunciado es la varianza promedio de estatura entre los deportistas.
#Input: Solicitar el nombre de la disciplina
tabla_deporte=tabla_limpia.loc[(tabla_limpia['Sport'] == disciplina) & (tabla_limpia['Year'] == anno),]
varianza = tabla_deporte["Height"]
# print(tabla_deporte)
return varianza.var()
#return sta.variance(varianza)
def listadoDeportes():
vector_deportes = tabla_limpia["Sport"].unique()
vector_deportes.sort()
filas=len(vector_deportes)
matriz = []
contador=0
for x in range(1,filas):
for y in range (1,2):
matriz.append((x,vector_deportes[contador]))
contador=contador+1
print("/--------------------------------Menú Control 2-----------------------------------/")
print("/-------------------------------Escoja un número-----------------------------/")
print
print(matriz)
print("/--------------------------------Menú-----------------------------------/")
#opciones = opciones.split('/')
#print(vector_deportes)
#print(len(vector_deportes))
return matriz
def ecuacionD(XA,XB,VA,VB,NA,NB):
#Esta ecuación si el valor obtenido esta entre: D < -1.96 y D > 1.96, informará que las estaturas son significativas.
numerador = XA-XB
denominador= (VA/NA) +(VB/NB)
denominador= math.sqrt(denominador)
d = round(numerador / denominador,4)
if (d < -1.96 or d > 1.96):
mensaje="D: "+str(abs(d))+" Hay diferencia significativa"
else:
mensaje="D: "+str(abs(d))+" No Hay diferencia significativa"
return mensaje
matrix=listadoDeportes()
print("")
totalDeportes = len(matrix)
x=True
mensaje="Ingrese el número asociado al primer deporte a evaluar los valores deben estar entre 1 y "+str(totalDeportes)+" donde Taekwondo= 51 y Judo= 28: "
while x==True:
deporte1=input(mensaje)
if (deporte1.isdigit()):
deporte1 = int(deporte1)
if (deporte1<=totalDeportes and deporte1>0):
#print(deporte1)
deporte1 = deporte1-1
deporte1=matrix[deporte1][1]
print("UD. Ha escogido el deporte: ",str(deporte1), " El total de deportes son: ",str(totalDeportes))
while x==True:
deporte2=input("Escoja el segundo número del deporte a comparar: ")
if (deporte2.isdigit()):
deporte2 = int(deporte2)
if (deporte2<=totalDeportes and deporte2>0):
deporte2 = deporte2-1
deporte2=matrix[deporte2][1]
print("UD. Ha escogido el deporte: ",str(deporte2), " UD va a comparar entre: Deporte: ",str(deporte1)," y Deporte : ",str(deporte2))
x=False
else:
print("Debe escoger un número entre 1 y ",str(totalDeportes))
else:
print("Debe escoger un número entre 1 y ",str(totalDeportes))
else:
print("Debe escoger un número entre 1 y PRIMER IF",str(totalDeportes))
#Ahora obtendremos el promedio XA del primer deporte
year=annosDeporte(deporte1,deporte2)
#Hay que hacer un for por cada año de coincidencias
year.sort()
for i in year:
xA=alturaPromedio(deporte1,i)
xB=alturaPromedio(deporte2,i)
# print("/--------------------------------Año: ",str(i)," -----------------------------------/")
# print("")
# print(" Promedio estatura ")
# print("La Altura promedio del deporte ",deporte1," xA es: ",str(xA), "y La Altura promedio de",deporte2," xB es: ",str(xB))
# print("")
# print(" Varianza ")
vA=varianzaPromedio(deporte1,i)
vB=varianzaPromedio(deporte2,i)
#print("La Varianza promedio del deporte ",deporte1," vA es: ",str(vA)," y Varianza promedio de",deporte2," vB es: ",str(vB))
# #print("La Varianza promedio del deporte ",deporte2," vB es: ",str(vB))
# print("")
nA=nAynB(deporte1,i)
nB=nAynB(deporte2,i)
# print(" Total deportistas ")
# print("Para el deporte: ",deporte1," nA es: ",str(nA)," y para ",deporte2," nA es: ",str(nB))
# print("")
# print(" Existen diferencias significativas? ")
mensaje=ecuacionD(xA,xB,vA,vB,nA,nB)
print("Año: ",str(i),mensaje)
print("")
print("NOTA: Se llevo el valor de D al valor absoluto, solamente para evitar incoherencias al desplegar el valor final. Debido a que si uno invierte el orden de las disciplinas (Al momento de ingresar) no debería variar su resultado")
| [
"statistics.mean",
"pandas.read_csv",
"numpy.sort",
"math.sqrt",
"pandas.to_numeric"
] | [((257, 371), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/king/Documents/python/control2/athlete_events.csv"""'], {'skipinitialspace': '(True)', 'usecols': 'fields'}), "('/Users/king/Documents/python/control2/athlete_events.csv',\n skipinitialspace=True, usecols=fields)\n", (268, 371), True, 'import pandas as pd\n'), ((3623, 3641), 'statistics.mean', 'sta.mean', (['promedio'], {}), '(promedio)\n', (3631, 3641), True, 'import statistics as sta\n'), ((5016, 5038), 'math.sqrt', 'math.sqrt', (['denominador'], {}), '(denominador)\n', (5025, 5038), False, 'import math\n'), ((2157, 2170), 'numpy.sort', 'numpy.sort', (['a'], {}), '(a)\n', (2167, 2170), False, 'import numpy\n'), ((2428, 2441), 'numpy.sort', 'numpy.sort', (['b'], {}), '(b)\n', (2438, 2441), False, 'import numpy\n'), ((2948, 2972), 'numpy.sort', 'numpy.sort', (['annosiguales'], {}), '(annosiguales)\n', (2958, 2972), False, 'import numpy\n'), ((1100, 1122), 'pandas.to_numeric', 'to_numeric', (['tabla[col]'], {}), '(tabla[col])\n', (1110, 1122), False, 'from pandas import read_csv, to_numeric\n'), ((1252, 1274), 'pandas.to_numeric', 'to_numeric', (['tabla[col]'], {}), '(tabla[col])\n', (1262, 1274), False, 'from pandas import read_csv, to_numeric\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import glob
import re
import sys
import numpy as np
if 'linux' in sys.platform:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
else:
import matplotlib.pyplot as plt
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', type=str,
choices=['cis', 'multi'])
parser.add_argument('--epoch', type=int, default=400)
return parser.parse_args()
def get_breakeven(pre_rec):
return np.argmin((pre_rec[:, 0] - pre_rec[:, 1]) ** 2)
if __name__ == '__main__':
args = get_args()
plt.ylim([0.87, 1.0])
for model_type in ['cis', 'multi']:
bldg_recs = []
road_recs = []
for dname in glob.glob('results/{}_*'.format(model_type)):
d = '{0}/integrated_{epoch}/evaluation_{epoch}'.format(
dname, epoch=args.epoch)
ratio = float(re.search('{}_([0-9\.]+)'.format(model_type),
dname).groups()[0])
if ratio > 0.5:
continue
bldg = np.load('{}/pre_rec_1.npy'.format(d))
road = np.load('{}/pre_rec_2.npy'.format(d))
bldg_rec = bldg[get_breakeven(bldg)][1]
road_rec = road[get_breakeven(road)][1]
print('[{}, {}, {}]'.format(ratio, bldg_rec, road_rec))
bldg_recs.append((ratio, bldg_rec))
road_recs.append((ratio, road_rec))
bldg_recs = np.array(sorted(bldg_recs))
road_recs = np.array(sorted(road_recs))
plt.plot(bldg_recs[:, 0], bldg_recs[:, 1],
label='Building prediction ({})'.format(model_type))
plt.plot(road_recs[:, 0], road_recs[:, 1],
label='Road prediction ({})'.format(model_type))
plt.legend()
plt.xlabel('Percentage of data used for training')
plt.ylabel('Recall at breakeven point')
plt.savefig('dataset_ratio.png')
| [
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"numpy.argmin",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend"
] | [((171, 192), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (185, 192), False, 'import matplotlib\n'), ((302, 327), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (325, 327), False, 'import argparse\n'), ((558, 605), 'numpy.argmin', 'np.argmin', (['((pre_rec[:, 0] - pre_rec[:, 1]) ** 2)'], {}), '((pre_rec[:, 0] - pre_rec[:, 1]) ** 2)\n', (567, 605), True, 'import numpy as np\n'), ((660, 681), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.87, 1.0]'], {}), '([0.87, 1.0])\n', (668, 681), True, 'import matplotlib.pyplot as plt\n'), ((1849, 1861), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1859, 1861), True, 'import matplotlib.pyplot as plt\n'), ((1866, 1916), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Percentage of data used for training"""'], {}), "('Percentage of data used for training')\n", (1876, 1916), True, 'import matplotlib.pyplot as plt\n'), ((1921, 1960), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Recall at breakeven point"""'], {}), "('Recall at breakeven point')\n", (1931, 1960), True, 'import matplotlib.pyplot as plt\n'), ((1965, 1997), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""dataset_ratio.png"""'], {}), "('dataset_ratio.png')\n", (1976, 1997), True, 'import matplotlib.pyplot as plt\n')] |
import tensorflow.keras
from tensorflow.keras import layers
import os
import matplotlib.pyplot as plt
from PIL import Image
#from numpy import asarray
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.models import load_model
from tensorflow.keras import preprocessing
from tensorflow.keras import backend as K
from tensorflow.keras import models
import tensorflow as tf
from config import *
from model import ProposedModel, getAssembledModel
tf.config.run_functions_eagerly(True)
config = tf.compat.v1.ConfigProto(gpu_options =
tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.8)
# device_count = {'GPU': 1}
)
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(session)
def getHeatMap(image, encoded):
image_size = 224
# Load pre-trained Keras model and the image to classify
model = tf.keras.applications.vgg16.VGG16(include_top=False, weights="imagenet")
img_tensor = preprocessing.image.img_to_array(image)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor = preprocess_input(img_tensor)
conv_layer = model.get_layer("block5_conv3")
heatmap_model = encoded
with tf.GradientTape() as gtape:
conv_output = heatmap_model(tf.convert_to_tensor(img_tensor, dtype=tf.float32))
print(conv_output)
loss = predictions[:, np.argmax(predictions[0])]
grads = gtape.gradient(loss, conv_output)
pooled_grads = K.mean(grads, axis=(0, 1, 2))
heatmap = tf.reduce_mean(tf.multiply(pooled_grads, conv_output), axis=-1)
heatmap = np.maximum(heatmap, 0)
max_heat = np.max(heatmap)
if max_heat == 0:
max_heat = 1e-10
heatmap /= max_heat
print(heatmap.shape)
return heatmap
def getPatches(folder, isTraining, p):
patches = []
i_i = []
i_j = []
mean = 0
var = 10
sigma = var ** 0.5
act_size = 2240
gaussian = np.random.normal(mean, sigma, (act_size, act_size))
doChunking = False
index = 0
i2 = 1
for filename in os.listdir(folder):
if isTraining == True:
print(str(i2) + ", chunking training image '" + filename + "'")
else:
print(str(i2) + ", chunking test image '" + filename + "'")
i2 = i2 + 1
image = Image.open(folder + filename)
data = np.array(image)
if isTraining == True:
# adding Gaussian noise
if len(data.shape) == 2:
data = data + gaussian
else:
data[:, :, 0] = data[:, :, 0] + gaussian
data[:, :, 1] = data[:, :, 1] + gaussian
data[:, :, 2] = data[:, :, 2] + gaussian
data = data.astype('float32') / 255.
row, col,ch = data.shape
for i in range(row):
for j in range(col):
if (i+1)*p <= row and (j+1)*p <= col:
patch = data[(i)*p:(i+1)*p,(j)*p:(j+1)*p,:]
patches.append(patch)
i_i.append(i)
i_j.append(j)
if doChunking == True:
if index >= 10:
break
else:
index = index + 1
patches = np.array(patches)
return i_i, i_j, patches
def transferWeights(model1, model2):
for i in range(1,len(model1.layers)):
model2.layers[i].set_weights(model1.layers[i].get_weights())
return model2
autoencoder = getAssembledModel(p)
# ONE-TIME TRAINING
if doTraining == True:
_, _,x_train = getPatches(tr_folder, True, p)
_, _,x_valid = getPatches(te_folders[0], False, p) # using test dataset-1 for validation as well, it is user configurable
print(x_train.shape)
print(x_valid.shape)
autoencoder.fit(x_train, x_train,
epochs=200,
batch_size=16,
shuffle=True,
validation_data=(x_valid, x_valid))
autoencoder.save("model.tf")
else:
model1 = tf.keras.models.load_model("model.tf", compile=False)
autoencoder = transferWeights(model1, autoencoder)
# TESTING
for d in range(numDatasets):
i_i, i_j, x_test = getPatches(te_folders[d], False, p)
print(x_test.shape)
print("**********************Reconstructing Patches*******************")
decoded_imgs = []
l1, r1,c1,ch1 = x_test.shape
for i in range(l1):
decoded_imgs.append(autoencoder.predict(x_test[i].reshape(1, p, p, 3)))
decoded_imgs = np.array(decoded_imgs)
t, r, c, ch = x_test.shape
d_imgs = []
d_test = []
heatmaps = []
i = 0
j = 0
img = np.zeros((act_size, act_size, 3), dtype='float32')
img2 = np.zeros((act_size, act_size, 3), dtype='float32')
img3 = np.zeros((act_size, act_size, 3), dtype='float32')
print(decoded_imgs.shape)
row, col,ch = img.shape
print("**********************Stitching Images*******************")
for k in range(len(i_i)):
patch = decoded_imgs[k].reshape(p, p, 3);
i = i_i[k]
j = i_j[k]
img[(i)*p:(i+1)*p,(j)*p:(j+1)*p,:] = patch
# heatmap = getHeatMap(patch, keras.Model(input_img, encoded))
img3[i*p:(i+1)*p,j*p:(j+1)*p,:] = x_test[k].reshape(p, p, 3)-patch
patch = x_test[k].reshape(p, p, 3);
img2[i*p:(i+1)*p,j*p:(j+1)*p,:] = patch
if i == 9 and j == 9:
d_imgs.append(img)
img = np.zeros((act_size, act_size, 3), dtype='float32')
d_test.append(img2)
img2 = np.zeros((act_size, act_size, 3), dtype='float32')
heatmaps.append(img3)
img3 = np.zeros((act_size, act_size, 3), dtype='float32')
d_test = np.array(d_test)
d_imgs = np.array(d_imgs)
heatmaps = np.array(heatmaps)
print(d_imgs.shape)
print(d_test.shape)
t, r, c, ch = d_imgs.shape
m = d
folder = res_fake[m]
print("**********************Saving reconstructed images at " + folder + "*******************")
for i in range(t):
A = (255 * d_imgs[i].reshape(act_size, act_size, 3)).astype(np.uint8)
im = Image.fromarray(A)
newsize = (224, 224)
#im = im.resize(newsize)
#print(im.size)
# im.show()
im.save(folder + "Image" + str(i) + ".jpg")
t, r, c, ch = d_test.shape
folder = res_real[m]
print("**********************Saving real images at " + folder + "*******************")
for i in range(t):
A = (255 * d_test[i].reshape(act_size, act_size, 3)).astype(np.uint8)
im = Image.fromarray(A)
newsize = (224, 224)
# im = im.resize(newsize)
im.save(folder + "Image" + str(i) + ".jpg")
folder = res_disp[m]
print("**********************Saving disparity maps at " + folder + "*******************")
for i in range(t):
A = (255 * heatmaps[i].reshape(act_size, act_size, 3)).astype(np.uint8)
print("MSE: " + str(255*np.mean(heatmaps[i] * heatmaps[i])))
im = Image.fromarray(A)
newsize = (224, 224)
# im = im.resize(newsize)
im.save(folder + "Image" + str(i) + ".jpg")
# [Optional] Uncomment these line to plot results
# n = 9
# plt.figure(figsize=(20, 4))
# for i in range(1, n + 1):
# # Display original
# ax = plt.subplot(2, n, i)
# plt.gray()
# plt.imshow(d_test[i].reshape(act_size, act_size, 3))
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
# Display reconstruction
# ax = plt.subplot(2, n, i + n)
# plt.gray()
# plt.imshow(d_imgs[i].reshape(act_size, act_size, 3))
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
# plt.show()
| [
"tensorflow.config.run_functions_eagerly",
"tensorflow.multiply",
"tensorflow.GradientTape",
"numpy.array",
"tensorflow.keras.models.load_model",
"tensorflow.compat.v1.Session",
"numpy.mean",
"os.listdir",
"tensorflow.keras.backend.mean",
"numpy.max",
"tensorflow.convert_to_tensor",
"numpy.max... | [((625, 662), 'tensorflow.config.run_functions_eagerly', 'tf.config.run_functions_eagerly', (['(True)'], {}), '(True)\n', (656, 662), True, 'import tensorflow as tf\n'), ((883, 918), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (903, 918), True, 'import tensorflow as tf\n'), ((920, 967), 'tensorflow.compat.v1.keras.backend.set_session', 'tf.compat.v1.keras.backend.set_session', (['session'], {}), '(session)\n', (958, 967), True, 'import tensorflow as tf\n'), ((3863, 3883), 'model.getAssembledModel', 'getAssembledModel', (['p'], {}), '(p)\n', (3880, 3883), False, 'from model import ProposedModel, getAssembledModel\n'), ((1118, 1190), 'tensorflow.keras.applications.vgg16.VGG16', 'tf.keras.applications.vgg16.VGG16', ([], {'include_top': '(False)', 'weights': '"""imagenet"""'}), "(include_top=False, weights='imagenet')\n", (1151, 1190), True, 'import tensorflow as tf\n'), ((1209, 1248), 'tensorflow.keras.preprocessing.image.img_to_array', 'preprocessing.image.img_to_array', (['image'], {}), '(image)\n', (1241, 1248), False, 'from tensorflow.keras import preprocessing\n'), ((1267, 1301), 'numpy.expand_dims', 'np.expand_dims', (['img_tensor'], {'axis': '(0)'}), '(img_tensor, axis=0)\n', (1281, 1301), True, 'import numpy as np\n'), ((1320, 1348), 'tensorflow.keras.applications.vgg16.preprocess_input', 'preprocess_input', (['img_tensor'], {}), '(img_tensor)\n', (1336, 1348), False, 'from tensorflow.keras.applications.vgg16 import preprocess_input\n'), ((1850, 1872), 'numpy.maximum', 'np.maximum', (['heatmap', '(0)'], {}), '(heatmap, 0)\n', (1860, 1872), True, 'import numpy as np\n'), ((1889, 1904), 'numpy.max', 'np.max', (['heatmap'], {}), '(heatmap)\n', (1895, 1904), True, 'import numpy as np\n'), ((2213, 2264), 'numpy.random.normal', 'np.random.normal', (['mean', 'sigma', '(act_size, act_size)'], {}), '(mean, sigma, (act_size, act_size))\n', (2229, 2264), True, 'import numpy as np\n'), ((2350, 2368), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (2360, 2368), False, 'import os\n'), ((3600, 3617), 'numpy.array', 'np.array', (['patches'], {}), '(patches)\n', (3608, 3617), True, 'import numpy as np\n'), ((4432, 4485), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""model.tf"""'], {'compile': '(False)'}), "('model.tf', compile=False)\n", (4458, 4485), True, 'import tensorflow as tf\n'), ((4952, 4974), 'numpy.array', 'np.array', (['decoded_imgs'], {}), '(decoded_imgs)\n', (4960, 4974), True, 'import numpy as np\n'), ((5130, 5180), 'numpy.zeros', 'np.zeros', (['(act_size, act_size, 3)'], {'dtype': '"""float32"""'}), "((act_size, act_size, 3), dtype='float32')\n", (5138, 5180), True, 'import numpy as np\n'), ((5193, 5243), 'numpy.zeros', 'np.zeros', (['(act_size, act_size, 3)'], {'dtype': '"""float32"""'}), "((act_size, act_size, 3), dtype='float32')\n", (5201, 5243), True, 'import numpy as np\n'), ((5256, 5306), 'numpy.zeros', 'np.zeros', (['(act_size, act_size, 3)'], {'dtype': '"""float32"""'}), "((act_size, act_size, 3), dtype='float32')\n", (5264, 5306), True, 'import numpy as np\n'), ((6290, 6306), 'numpy.array', 'np.array', (['d_test'], {}), '(d_test)\n', (6298, 6306), True, 'import numpy as np\n'), ((6321, 6337), 'numpy.array', 'np.array', (['d_imgs'], {}), '(d_imgs)\n', (6329, 6337), True, 'import numpy as np\n'), ((6354, 6372), 'numpy.array', 'np.array', (['heatmaps'], {}), '(heatmaps)\n', (6362, 6372), True, 'import numpy as np\n'), ((739, 799), 'tensorflow.compat.v1.GPUOptions', 'tf.compat.v1.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.8)'}), '(per_process_gpu_memory_fraction=0.8)\n', (762, 799), True, 'import tensorflow as tf\n'), ((1446, 1463), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (1461, 1463), True, 'import tensorflow as tf\n'), ((1724, 1753), 'tensorflow.keras.backend.mean', 'K.mean', (['grads'], {'axis': '(0, 1, 2)'}), '(grads, axis=(0, 1, 2))\n', (1730, 1753), True, 'from tensorflow.keras import backend as K\n'), ((1786, 1824), 'tensorflow.multiply', 'tf.multiply', (['pooled_grads', 'conv_output'], {}), '(pooled_grads, conv_output)\n', (1797, 1824), True, 'import tensorflow as tf\n'), ((2615, 2644), 'PIL.Image.open', 'Image.open', (['(folder + filename)'], {}), '(folder + filename)\n', (2625, 2644), False, 'from PIL import Image\n'), ((2661, 2676), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2669, 2676), True, 'import numpy as np\n'), ((6721, 6739), 'PIL.Image.fromarray', 'Image.fromarray', (['A'], {}), '(A)\n', (6736, 6739), False, 'from PIL import Image\n'), ((7175, 7193), 'PIL.Image.fromarray', 'Image.fromarray', (['A'], {}), '(A)\n', (7190, 7193), False, 'from PIL import Image\n'), ((7627, 7645), 'PIL.Image.fromarray', 'Image.fromarray', (['A'], {}), '(A)\n', (7642, 7645), False, 'from PIL import Image\n'), ((1511, 1561), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['img_tensor'], {'dtype': 'tf.float32'}), '(img_tensor, dtype=tf.float32)\n', (1531, 1561), True, 'import tensorflow as tf\n'), ((5997, 6047), 'numpy.zeros', 'np.zeros', (['(act_size, act_size, 3)'], {'dtype': '"""float32"""'}), "((act_size, act_size, 3), dtype='float32')\n", (6005, 6047), True, 'import numpy as np\n'), ((6101, 6151), 'numpy.zeros', 'np.zeros', (['(act_size, act_size, 3)'], {'dtype': '"""float32"""'}), "((act_size, act_size, 3), dtype='float32')\n", (6109, 6151), True, 'import numpy as np\n'), ((6211, 6261), 'numpy.zeros', 'np.zeros', (['(act_size, act_size, 3)'], {'dtype': '"""float32"""'}), "((act_size, act_size, 3), dtype='float32')\n", (6219, 6261), True, 'import numpy as np\n'), ((1622, 1647), 'numpy.argmax', 'np.argmax', (['predictions[0]'], {}), '(predictions[0])\n', (1631, 1647), True, 'import numpy as np\n'), ((7576, 7610), 'numpy.mean', 'np.mean', (['(heatmaps[i] * heatmaps[i])'], {}), '(heatmaps[i] * heatmaps[i])\n', (7583, 7610), True, 'import numpy as np\n')] |
from utils_pos import get_word_tag, preprocess
import pandas as pd
from collections import defaultdict
import math
import numpy as np
import pickle
def test_create_dictionaries(target, training_corpus, vocab):
successful_cases = 0
failed_cases = []
test_cases = [
{
"name": "default_case",
"input": {
"training_corpus": training_corpus,
"vocab": vocab,
"verbose": False,
},
"expected": {
"len_emission_counts": 31140,
"len_transition_counts": 1421,
"len_tag_counts": 46,
"emission_counts": {
("DT", "the"): 41098,
("NNP", "--unk_upper--"): 4635,
("NNS", "Arts"): 2,
},
"transition_counts": {
("VBN", "TO"): 2142,
("CC", "IN"): 1227,
("VBN", "JJR"): 66,
},
"tag_counts": {"PRP": 17436, "UH": 97, ")": 1376,},
},
},
{
"name": "small_case",
"input": {
"training_corpus": training_corpus[:1000],
"vocab": vocab,
"verbose": False,
},
"expected": {
"len_emission_counts": 442,
"len_transition_counts": 272,
"len_tag_counts": 38,
"emission_counts": {
("DT", "the"): 48,
("NNP", "--unk_upper--"): 9,
("NNS", "Arts"): 1,
},
"transition_counts": {
("VBN", "TO"): 3,
("CC", "IN"): 2,
("VBN", "JJR"): 1,
},
"tag_counts": {"PRP": 11, "UH": 0, ")": 2,},
},
},
]
for test_case in test_cases:
result_emission, result_transition, result_tag = target(**test_case["input"])
# emission dictionary
try:
assert isinstance(result_emission, defaultdict)
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": defaultdict,
"got": type(result_emission),
}
)
print(
f"Wrong output type for emission_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')} \n\tGot: {failed_cases[-1].get('got')}."
)
try:
assert len(result_emission) == test_case["expected"]["len_emission_counts"]
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["len_emission_counts"],
"got": len(result_emission),
}
)
print(
f"Wrong output length for emission_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')} \n\tGot: {failed_cases[-1].get('got')}."
)
try:
for k, v in test_case["expected"]["emission_counts"].items():
assert np.isclose(result_emission[k], v)
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["emission_counts"],
"got": result_emission,
}
)
print(
f"Wrong output values for emission_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')}."
)
# transition dictionary
try:
assert isinstance(result_transition, defaultdict)
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": defaultdict,
"got": type(result_transition),
}
)
print(
f"Wrong output type for transition_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')} \n\tGot: {failed_cases[-1].get('got')}."
)
try:
assert (
len(result_transition) == test_case["expected"]["len_transition_counts"]
)
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["len_transition_counts"],
"got": len(result_transition),
}
)
print(
f"Wrong output length for transition_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')} \n\tGot: {failed_cases[-1].get('got')}."
)
try:
for k, v in test_case["expected"]["transition_counts"].items():
assert np.isclose(result_transition[k], v)
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["transition_counts"],
"got": result_transition,
}
)
print(
f"Wrong output values for transition_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')}."
)
# tags count
try:
assert isinstance(result_tag, defaultdict)
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": defaultdict,
"got": type(result_transition),
}
)
print(
f"Wrong output type for tag_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')} \n\tGot: {failed_cases[-1].get('got')}."
)
try:
assert len(result_tag) == test_case["expected"]["len_tag_counts"]
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["len_tag_counts"],
"got": len(result_tag),
}
)
print(
f"Wrong output length for tag_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')} \n\tGot: {failed_cases[-1].get('got')}."
)
try:
for k, v in test_case["expected"]["tag_counts"].items():
assert np.isclose(result_tag[k], v)
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["tag_counts"],
"got": result_tag,
}
)
print(
f"Wrong output values for tag_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')}."
)
if len(failed_cases) == 0:
print("\033[92m All tests passed")
else:
print("\033[92m", successful_cases, " Tests passed")
print("\033[91m", len(failed_cases), " Tests failed")
# return failed_cases, len(failed_cases) + successful_cases
def test_predict_pos(target, prep, y, emission_counts, vocab, states):
successful_cases = 0
failed_cases = []
test_cases = [
{
"name": "default_check",
"input": {
"prep": prep,
"y": y,
"emission_counts": emission_counts,
"vocab": vocab,
"states": states,
},
"expected": 0.8888563993099213,
},
{
"name": "small_check",
"input": {
"prep": prep[:1000],
"y": y[:1000],
"emission_counts": emission_counts,
"vocab": vocab,
"states": states,
},
"expected": 0.876,
},
]
for test_case in test_cases:
result = target(**test_case["input"])
try:
assert np.isclose(result, test_case["expected"])
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"],
"got": result,
}
)
print(
f"Wrong output values for tag_counts dictionary.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
if len(failed_cases) == 0:
print("\033[92m All tests passed")
else:
print("\033[92m", successful_cases, " Tests passed")
print("\033[91m", len(failed_cases), " Tests failed")
# return failed_cases, len(failed_cases) + successful_cases
def test_create_transition_matrix(target, tag_counts, transition_counts):
successful_cases = 0
failed_cases = []
test_cases = [
{
"name": "default_check",
"input": {
"alpha": 0.001,
"tag_counts": tag_counts,
"transition_counts": transition_counts,
},
"expected": {
"0:5": np.array(
[
[
7.03997297e-06,
7.03997297e-06,
7.03997297e-06,
7.03997297e-06,
7.03997297e-06,
],
[
1.35647553e-07,
1.35647553e-07,
1.35647553e-07,
1.35647553e-07,
1.35647553e-07,
],
[
1.44528595e-07,
1.44673124e-04,
6.93751711e-03,
6.79298851e-03,
5.05864537e-03,
],
[
7.32039770e-07,
1.69101919e-01,
7.32039770e-07,
7.32039770e-07,
7.32039770e-07,
],
[
7.26719892e-07,
7.27446612e-04,
7.26719892e-07,
7.27446612e-04,
7.26719892e-07,
],
]
),
"30:35": np.array(
[
[
2.21706877e-06,
2.21706877e-06,
2.21706877e-06,
8.87049214e-03,
2.21706877e-06,
],
[
3.75650909e-07,
7.51677469e-04,
3.75650909e-07,
5.10888993e-02,
3.75650909e-07,
],
[
1.72277159e-05,
1.72277159e-05,
1.72277159e-05,
1.72277159e-05,
1.72277159e-05,
],
[
4.47733569e-05,
4.47286283e-08,
4.47286283e-08,
8.95019852e-05,
4.47733569e-05,
],
[
1.03043917e-05,
1.03043917e-05,
1.03043917e-05,
6.18366548e-02,
3.09234796e-02,
],
]
),
},
},
{
"name": "alpha_check",
"input": {
"alpha": 0.05,
"tag_counts": tag_counts,
"transition_counts": transition_counts,
},
"expected": {
"0:5": np.array(
[
[
3.46500347e-04,
3.46500347e-04,
3.46500347e-04,
3.46500347e-04,
3.46500347e-04,
],
[
6.78030457e-06,
6.78030457e-06,
6.78030457e-06,
6.78030457e-06,
6.78030457e-06,
],
[
7.22407640e-06,
1.51705604e-04,
6.94233742e-03,
6.79785589e-03,
5.06407756e-03,
],
[
3.65416941e-05,
1.68859168e-01,
3.65416941e-05,
3.65416941e-05,
3.65416941e-05,
],
[
3.62765726e-05,
7.61808024e-04,
3.62765726e-05,
7.61808024e-04,
3.62765726e-05,
],
]
),
"30:35": np.array(
[
[
1.10302228e-04,
1.10302228e-04,
1.10302228e-04,
8.93448048e-03,
1.10302228e-04,
],
[
1.87666554e-05,
7.69432872e-04,
1.87666554e-05,
5.10640694e-02,
1.87666554e-05,
],
[
8.29187396e-04,
8.29187396e-04,
8.29187396e-04,
8.29187396e-04,
8.29187396e-04,
],
[
4.69603252e-05,
2.23620596e-06,
2.23620596e-06,
9.16844445e-05,
4.69603252e-05,
],
[
5.03524673e-04,
5.03524673e-04,
5.03524673e-04,
6.09264854e-02,
3.07150050e-02,
],
]
),
},
},
]
for test_case in test_cases:
result = target(**test_case["input"])
try:
assert isinstance(result, np.ndarray)
successful_cases += 1
except:
failed_cases.append(
{"name": test_case["name"], "expected": np.ndarray, "got": type(result),}
)
print(
f"Wrong output type .\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(result[0:5, 0:5], test_case["expected"]["0:5"])
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["0:5"],
"got": result[0:5, 0:5],
}
)
print(
f"Wrong output values in rows and columns with indexes between 0 and 5.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(result[30:35, 30:35], test_case["expected"]["30:35"])
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["30:35"],
"got": result[30:35, 30:35],
}
)
print(
f"Wrong output values in rows and columns with indexes between 30 and 35.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
if len(failed_cases) == 0:
print("\033[92m All tests passed")
else:
print("\033[92m", successful_cases, " Tests passed")
print("\033[91m", len(failed_cases), " Tests failed")
# return failed_cases, len(failed_cases) + successful_cases
def test_create_emission_matrix(target, tag_counts, emission_counts, vocab):
successful_cases = 0
failed_cases = []
test_cases = [
{
"name": "default_check",
"input": {
"alpha": 0.001,
"tag_counts": tag_counts,
"emission_counts": emission_counts,
"vocab": vocab,
},
"expected": {
"0:5": np.array(
[
[
6.03219988e-06,
6.03219988e-06,
8.56578416e-01,
6.03219988e-06,
6.03219988e-06,
],
[
1.35212298e-07,
1.35212298e-07,
1.35212298e-07,
9.71365280e-01,
1.35212298e-07,
],
[
1.44034584e-07,
1.44034584e-07,
1.44034584e-07,
1.44034584e-07,
1.44034584e-07,
],
[
7.19539897e-07,
7.19539897e-07,
7.19539897e-07,
7.19539897e-07,
7.19539897e-07,
],
[
7.14399508e-07,
7.14399508e-07,
7.14399508e-07,
7.14399508e-07,
7.14399508e-07,
],
]
),
"30:35": np.array(
[
[
2.10625199e-06,
2.10625199e-06,
2.10625199e-06,
2.10625199e-06,
2.10625199e-06,
],
[
3.72331731e-07,
3.72331731e-07,
3.72331731e-07,
3.72331731e-07,
3.72331731e-07,
],
[
1.22283772e-05,
1.22406055e-02,
1.22283772e-05,
1.22283772e-05,
1.22283772e-05,
],
[
4.46812012e-08,
4.46812012e-08,
4.46812012e-08,
4.46812012e-08,
4.46812012e-08,
],
[
8.27972213e-06,
4.96866125e-02,
8.27972213e-06,
8.27972213e-06,
8.27972213e-06,
],
]
),
},
},
{
"name": "alpha_check",
"input": {
"alpha": 0.05,
"tag_counts": tag_counts,
"emission_counts": emission_counts,
"vocab": vocab,
},
"expected": {
"0:5": np.array(
[
[
3.75699741e-05,
3.75699741e-05,
1.06736296e-01,
3.75699741e-05,
3.75699741e-05,
],
[
5.84054154e-06,
5.84054154e-06,
5.84054154e-06,
8.39174848e-01,
5.84054154e-06,
],
[
6.16686298e-06,
6.16686298e-06,
6.16686298e-06,
6.16686298e-06,
6.16686298e-06,
],
[
1.95706206e-05,
1.95706206e-05,
1.95706206e-05,
1.95706206e-05,
1.95706206e-05,
],
[
1.94943174e-05,
1.94943174e-05,
1.94943174e-05,
1.94943174e-05,
1.94943174e-05,
],
]
),
"30:35": np.array(
[
[
3.04905937e-05,
3.04905937e-05,
3.04905937e-05,
3.04905937e-05,
3.04905937e-05,
],
[
1.29841464e-05,
1.29841464e-05,
1.29841464e-05,
1.29841464e-05,
1.29841464e-05,
],
[
4.01010547e-05,
8.42122148e-04,
4.01010547e-05,
4.01010547e-05,
4.01010547e-05,
],
[
2.12351646e-06,
2.12351646e-06,
2.12351646e-06,
2.12351646e-06,
2.12351646e-06,
],
[
3.88847844e-05,
4.70505891e-03,
3.88847844e-05,
3.88847844e-05,
3.88847844e-05,
],
]
),
},
},
]
for test_case in test_cases:
result = target(**test_case["input"])
try:
assert isinstance(result, np.ndarray)
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": np.ndarray,
"got": type(result),
}
)
print(
f"Wrong output type .\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(result[0:5, 0:5], test_case["expected"]["0:5"])
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["0:5"],
"got": result[0:5, 0:5],
}
)
print(
f"Wrong output values in rows and columns with indexes between 0 and 5.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(result[30:35, 30:35], test_case["expected"]["30:35"])
successful_cases += 1
except:
failed_cases.append(
{
"name": test_case["name"],
"expected": test_case["expected"]["30:35"],
"got": result[30:35, 30:35],
}
)
print(
f"Wrong output values in rows and columns with indexes between 30 and 35.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
if len(failed_cases) == 0:
print("\033[92m All tests passed")
else:
print("\033[92m", successful_cases, " Tests passed")
print("\033[91m", len(failed_cases), " Tests failed")
# return failed_cases, len(failed_cases) + successful_cases
def test_initialize(target, states, tag_counts, A, B, corpus, vocab):
successful_cases = 0
failed_cases = []
test_cases = [
{
"name": "default_check",
"input": {
"states": states,
"tag_counts": tag_counts,
"A": A,
"B": B,
"corpus": corpus,
"vocab": vocab,
},
"expected": {
"best_probs_shape": (46, 34199),
"best_paths_shape": (46, 34199),
"best_probs_col0": np.array(
[
-22.60982633,
-23.07660654,
-23.57298822,
-19.76726066,
-24.74325104,
-35.20241402,
-35.00096024,
-34.99203854,
-21.35069072,
-19.85767814,
-21.92098414,
-4.01623741,
-19.16380593,
-21.1062242,
-20.47163973,
-21.10157273,
-21.49584851,
-20.4811853,
-18.25856307,
-23.39717471,
-21.92146798,
-9.41377777,
-21.03053445,
-21.08029591,
-20.10863677,
-33.48185979,
-19.47301382,
-20.77150242,
-20.11727696,
-20.56031676,
-20.57193964,
-32.30366295,
-18.07551522,
-22.58887909,
-19.1585905,
-16.02994331,
-24.30968545,
-20.92932218,
-21.96797222,
-24.29571895,
-23.45968569,
-22.43665883,
-20.46568904,
-22.75551606,
-19.6637215,
-18.36288463,
]
),
},
}
]
for test_case in test_cases:
result_best_probs, result_best_paths = target(**test_case["input"])
try:
assert isinstance(result_best_probs, np.ndarray)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]) + "index 0",
"expected": np.ndarray,
"got": type(result_best_probs),
}
)
print(
f"Wrong output type .\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert isinstance(result_best_paths, np.ndarray)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]) + "index 1",
"expected": np.ndarray,
"got": type(result_best_paths),
}
)
print(
f"Wrong output type .\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert result_best_probs.shape == test_case["expected"]["best_probs_shape"]
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["best_probs_shape"],
"got": result_best_probs.shape,
}
)
print(
f"Wrong output shape for best_probs.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert result_best_paths.shape == test_case["expected"]["best_paths_shape"]
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["best_paths_shape"],
"got": result_best_paths.shape,
}
)
print(
f"Wrong output shape for best_paths.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(
result_best_probs[:, 0], test_case["expected"]["best_probs_col0"]
)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["best_probs_col0"],
"got": result_best_probs[:, 0],
}
)
print(
f"Wrong non-zero values for best_probs.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.all((result_best_paths == 0))
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": "Array of zeros with shape (46, 34199)",
}
)
print(
f"Wrong values for best_paths.\n\t Expected: {failed_cases[-1].get('expected')}."
)
if len(failed_cases) == 0:
print("\033[92m All tests passed")
else:
print("\033[92m", successful_cases, " Tests passed")
print("\033[91m", len(failed_cases), " Tests failed")
# return failed_cases, len(failed_cases) + successful_cases
def test_viterbi_forward(target, A, B, test_corpus, vocab):
successful_cases = 0
failed_cases = []
test_cases = [
{
"name": "default_check",
"input": {
"A": A,
"B": B,
"test_corpus": test_corpus,
"best_probs": pickle.load(
open("./support_files/best_probs_initilized.pkl", "rb")
),
"best_paths": pickle.load(
open("./support_files/best_paths_initilized.pkl", "rb")
),
"vocab": vocab,
"verbose": False,
},
"expected": {
"best_probs0:5": np.array(
[
[
-22.60982633,
-24.78215633,
-34.08246498,
-34.34107105,
-49.56012613,
],
[
-23.07660654,
-24.51583896,
-35.04774303,
-35.28281026,
-50.52540418,
],
[
-23.57298822,
-29.98305064,
-31.98004656,
-38.99187549,
-47.45770771,
],
[
-19.76726066,
-25.7122143,
-31.54577612,
-37.38331695,
-47.02343727,
],
[
-24.74325104,
-28.78696025,
-31.458494,
-36.00456711,
-46.93615515,
],
]
),
"best_probs30:35": np.array(
[
[
-202.75618827,
-208.38838519,
-210.46938402,
-210.15943098,
-223.79223672,
],
[
-202.58297597,
-217.72266765,
-207.23725672,
-215.529735,
-224.13957203,
],
[
-202.00878092,
-214.23093833,
-217.41021623,
-220.73768708,
-222.03338753,
],
[
-200.44016117,
-209.46937757,
-209.06951664,
-216.22297765,
-221.09669653,
],
[
-208.74189499,
-214.62088817,
-209.79346523,
-213.52623459,
-228.70417526,
],
]
),
"best_paths0:5": np.array(
[
[0, 11, 20, 25, 20],
[0, 11, 20, 25, 20],
[0, 11, 20, 25, 20],
[0, 11, 20, 25, 20],
[0, 11, 20, 25, 20],
]
),
"best_paths30:35": np.array(
[
[20, 19, 35, 11, 21],
[20, 19, 35, 11, 21],
[20, 19, 35, 11, 21],
[20, 19, 35, 11, 21],
[35, 19, 35, 11, 34],
]
),
},
}
]
for test_case in test_cases:
result_best_probs, result_best_paths = target(**test_case["input"])
try:
assert isinstance(result_best_probs, np.ndarray)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]) + "index 0",
"expected": np.ndarray,
"got": type(result_best_probs),
}
)
print(
f"Wrong output type .\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert isinstance(result_best_paths, np.ndarray)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]) + "index 1",
"expected": np.ndarray,
"got": type(result_best_paths),
}
)
print(
f"Wrong output type .\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(
result_best_probs[0:5, 0:5], test_case["expected"]["best_probs0:5"]
)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["best_probs0:5"],
"got": result_best_probs[0:5, 0:5],
}
)
print(
f"Wrong values for best_probs.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(
result_best_probs[30:35, 30:35],
test_case["expected"]["best_probs30:35"],
)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["best_probs30:35"],
"got": result_best_probs[:, 0],
}
)
print(
f"Wrong values for best_probs.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(
result_best_paths[0:5, 0:5], test_case["expected"]["best_paths0:5"],
)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["best_paths0:5"],
"got": result_best_paths[0:5, 0:5],
}
)
print(
f"Wrong values for best_paths.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.allclose(
result_best_paths[30:35, 30:35],
test_case["expected"]["best_paths30:35"],
)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["best_paths30:35"],
"got": result_best_paths[30:35, 30:35],
}
)
print(
f"Wrong values for best_paths.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
if len(failed_cases) == 0:
print("\033[92m All tests passed")
else:
print("\033[92m", successful_cases, " Tests passed")
print("\033[91m", len(failed_cases), " Tests failed")
# return failed_cases, len(failed_cases) + successful_cases
def test_viterbi_backward(target, corpus, states):
successful_cases = 0
failed_cases = []
test_cases = [
{
"name": "default_check",
"input": {
"corpus": corpus,
"best_probs": pickle.load(
open("./support_files/best_probs_trained.pkl", "rb")
),
"best_paths": pickle.load(
open("./support_files/best_paths_trained.pkl", "rb")
),
"states": states,
},
"expected": {
"pred_len": 34199,
"pred_head": [
"DT",
"NN",
"POS",
"NN",
"MD",
"VB",
"VBN",
"IN",
"JJ",
"NN",
],
"pred_tail": [
"PRP",
"MD",
"RB",
"VB",
"PRP",
"RB",
"IN",
"PRP",
".",
"--s--",
],
},
}
]
for test_case in test_cases:
result = target(**test_case["input"])
try:
assert isinstance(result, list)
successful_cases += 1
except:
failed_cases.append(
{"name": str(test_case["name"]), "expected": list, "got": type(result)}
)
print(
f"Wrong output type .\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert len(result) == test_case["expected"]["pred_len"]
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["pred_len"],
"got": len(result),
}
)
print(
f"Wrong output lenght.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert result[:10] == test_case["expected"]["pred_head"]
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["pred_head"],
"got": result[:10],
}
)
print(
f"Wrong values for pred list.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert result[-10:] == test_case["expected"]["pred_tail"]
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": test_case["expected"]["pred_tail"],
"got": result[-10:],
}
)
print(
f"Wrong values for pred list.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
if len(failed_cases) == 0:
print("\033[92m All tests passed")
else:
print("\033[92m", successful_cases, " Tests passed")
print("\033[91m", len(failed_cases), " Tests failed")
# return failed_cases, len(failed_cases) + successful_cases
def test_compute_accuracy(target, pred, y):
successful_cases = 0
failed_cases = []
test_cases = [
{
"name": "default_check",
"input": {"pred": pred, "y": y},
"expected": 0.953063647155511,
},
{
"name": "small_check",
"input": {"pred": pred[:100], "y": y[:100]},
"expected": 0.979381443298969,
},
]
for test_case in test_cases:
result = target(**test_case["input"])
try:
assert isinstance(result, float)
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": float,
"got": type(result),
}
)
print(
f"Wrong output type.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
try:
assert np.isclose(result, test_case["expected"])
successful_cases += 1
except:
failed_cases.append(
{
"name": str(test_case["name"]),
"expected": float,
"got": type(result),
}
)
print(
f"Wrong output type.\n\t Expected: {failed_cases[-1].get('expected')}.\n\t Got: {failed_cases[-1].get('got')}."
)
if len(failed_cases) == 0:
print("\033[92m All tests passed")
else:
print("\033[92m", successful_cases, " Tests passed")
print("\033[91m", len(failed_cases), " Tests failed")
# return failed_cases, len(failed_cases) + successful_cases
| [
"numpy.array",
"numpy.all",
"numpy.allclose",
"numpy.isclose"
] | [((16263, 16322), 'numpy.allclose', 'np.allclose', (['result[0:5, 0:5]', "test_case['expected']['0:5']"], {}), "(result[0:5, 0:5], test_case['expected']['0:5'])\n", (16274, 16322), True, 'import numpy as np\n'), ((16799, 16864), 'numpy.allclose', 'np.allclose', (['result[30:35, 30:35]', "test_case['expected']['30:35']"], {}), "(result[30:35, 30:35], test_case['expected']['30:35'])\n", (16810, 16864), True, 'import numpy as np\n'), ((8484, 8525), 'numpy.isclose', 'np.isclose', (['result', "test_case['expected']"], {}), "(result, test_case['expected'])\n", (8494, 8525), True, 'import numpy as np\n'), ((24787, 24846), 'numpy.allclose', 'np.allclose', (['result[0:5, 0:5]', "test_case['expected']['0:5']"], {}), "(result[0:5, 0:5], test_case['expected']['0:5'])\n", (24798, 24846), True, 'import numpy as np\n'), ((25379, 25444), 'numpy.allclose', 'np.allclose', (['result[30:35, 30:35]', "test_case['expected']['30:35']"], {}), "(result[30:35, 30:35], test_case['expected']['30:35'])\n", (25390, 25444), True, 'import numpy as np\n'), ((31028, 31106), 'numpy.allclose', 'np.allclose', (['result_best_probs[:, 0]', "test_case['expected']['best_probs_col0']"], {}), "(result_best_probs[:, 0], test_case['expected']['best_probs_col0'])\n", (31039, 31106), True, 'import numpy as np\n'), ((31661, 31691), 'numpy.all', 'np.all', (['(result_best_paths == 0)'], {}), '(result_best_paths == 0)\n', (31667, 31691), True, 'import numpy as np\n'), ((37796, 37881), 'numpy.allclose', 'np.allclose', (['result_best_probs[0:5, 0:5]', "test_case['expected']['best_probs0:5']"], {}), "(result_best_probs[0:5, 0:5], test_case['expected']['best_probs0:5']\n )\n", (37807, 37881), True, 'import numpy as np\n'), ((38424, 38515), 'numpy.allclose', 'np.allclose', (['result_best_probs[30:35, 30:35]', "test_case['expected']['best_probs30:35']"], {}), "(result_best_probs[30:35, 30:35], test_case['expected'][\n 'best_probs30:35'])\n", (38435, 38515), True, 'import numpy as np\n'), ((39073, 39158), 'numpy.allclose', 'np.allclose', (['result_best_paths[0:5, 0:5]', "test_case['expected']['best_paths0:5']"], {}), "(result_best_paths[0:5, 0:5], test_case['expected']['best_paths0:5']\n )\n", (39084, 39158), True, 'import numpy as np\n'), ((39702, 39793), 'numpy.allclose', 'np.allclose', (['result_best_paths[30:35, 30:35]', "test_case['expected']['best_paths30:35']"], {}), "(result_best_paths[30:35, 30:35], test_case['expected'][\n 'best_paths30:35'])\n", (39713, 39793), True, 'import numpy as np\n'), ((45267, 45308), 'numpy.isclose', 'np.isclose', (['result', "test_case['expected']"], {}), "(result, test_case['expected'])\n", (45277, 45308), True, 'import numpy as np\n'), ((3313, 3346), 'numpy.isclose', 'np.isclose', (['result_emission[k]', 'v'], {}), '(result_emission[k], v)\n', (3323, 3346), True, 'import numpy as np\n'), ((5134, 5169), 'numpy.isclose', 'np.isclose', (['result_transition[k]', 'v'], {}), '(result_transition[k], v)\n', (5144, 5169), True, 'import numpy as np\n'), ((6864, 6892), 'numpy.isclose', 'np.isclose', (['result_tag[k]', 'v'], {}), '(result_tag[k], v)\n', (6874, 6892), True, 'import numpy as np\n'), ((9664, 10104), 'numpy.array', 'np.array', (['[[7.03997297e-06, 7.03997297e-06, 7.03997297e-06, 7.03997297e-06, \n 7.03997297e-06], [1.35647553e-07, 1.35647553e-07, 1.35647553e-07, \n 1.35647553e-07, 1.35647553e-07], [1.44528595e-07, 0.000144673124, \n 0.00693751711, 0.00679298851, 0.00505864537], [7.3203977e-07, \n 0.169101919, 7.3203977e-07, 7.3203977e-07, 7.3203977e-07], [\n 7.26719892e-07, 0.000727446612, 7.26719892e-07, 0.000727446612, \n 7.26719892e-07]]'], {}), '([[7.03997297e-06, 7.03997297e-06, 7.03997297e-06, 7.03997297e-06, \n 7.03997297e-06], [1.35647553e-07, 1.35647553e-07, 1.35647553e-07, \n 1.35647553e-07, 1.35647553e-07], [1.44528595e-07, 0.000144673124, \n 0.00693751711, 0.00679298851, 0.00505864537], [7.3203977e-07, \n 0.169101919, 7.3203977e-07, 7.3203977e-07, 7.3203977e-07], [\n 7.26719892e-07, 0.000727446612, 7.26719892e-07, 0.000727446612, \n 7.26719892e-07]])\n', (9672, 10104), True, 'import numpy as np\n'), ((11127, 11570), 'numpy.array', 'np.array', (['[[2.21706877e-06, 2.21706877e-06, 2.21706877e-06, 0.00887049214, \n 2.21706877e-06], [3.75650909e-07, 0.000751677469, 3.75650909e-07, \n 0.0510888993, 3.75650909e-07], [1.72277159e-05, 1.72277159e-05, \n 1.72277159e-05, 1.72277159e-05, 1.72277159e-05], [4.47733569e-05, \n 4.47286283e-08, 4.47286283e-08, 8.95019852e-05, 4.47733569e-05], [\n 1.03043917e-05, 1.03043917e-05, 1.03043917e-05, 0.0618366548, 0.0309234796]\n ]'], {}), '([[2.21706877e-06, 2.21706877e-06, 2.21706877e-06, 0.00887049214, \n 2.21706877e-06], [3.75650909e-07, 0.000751677469, 3.75650909e-07, \n 0.0510888993, 3.75650909e-07], [1.72277159e-05, 1.72277159e-05, \n 1.72277159e-05, 1.72277159e-05, 1.72277159e-05], [4.47733569e-05, \n 4.47286283e-08, 4.47286283e-08, 8.95019852e-05, 4.47733569e-05], [\n 1.03043917e-05, 1.03043917e-05, 1.03043917e-05, 0.0618366548, \n 0.0309234796]])\n', (11135, 11570), True, 'import numpy as np\n'), ((12852, 13295), 'numpy.array', 'np.array', (['[[0.000346500347, 0.000346500347, 0.000346500347, 0.000346500347, \n 0.000346500347], [6.78030457e-06, 6.78030457e-06, 6.78030457e-06, \n 6.78030457e-06, 6.78030457e-06], [7.2240764e-06, 0.000151705604, \n 0.00694233742, 0.00679785589, 0.00506407756], [3.65416941e-05, \n 0.168859168, 3.65416941e-05, 3.65416941e-05, 3.65416941e-05], [\n 3.62765726e-05, 0.000761808024, 3.62765726e-05, 0.000761808024, \n 3.62765726e-05]]'], {}), '([[0.000346500347, 0.000346500347, 0.000346500347, 0.000346500347, \n 0.000346500347], [6.78030457e-06, 6.78030457e-06, 6.78030457e-06, \n 6.78030457e-06, 6.78030457e-06], [7.2240764e-06, 0.000151705604, \n 0.00694233742, 0.00679785589, 0.00506407756], [3.65416941e-05, \n 0.168859168, 3.65416941e-05, 3.65416941e-05, 3.65416941e-05], [\n 3.62765726e-05, 0.000761808024, 3.62765726e-05, 0.000761808024, \n 3.62765726e-05]])\n', (12860, 13295), True, 'import numpy as np\n'), ((14315, 14757), 'numpy.array', 'np.array', (['[[0.000110302228, 0.000110302228, 0.000110302228, 0.00893448048, \n 0.000110302228], [1.87666554e-05, 0.000769432872, 1.87666554e-05, \n 0.0510640694, 1.87666554e-05], [0.000829187396, 0.000829187396, \n 0.000829187396, 0.000829187396, 0.000829187396], [4.69603252e-05, \n 2.23620596e-06, 2.23620596e-06, 9.16844445e-05, 4.69603252e-05], [\n 0.000503524673, 0.000503524673, 0.000503524673, 0.0609264854, 0.030715005]]'], {}), '([[0.000110302228, 0.000110302228, 0.000110302228, 0.00893448048, \n 0.000110302228], [1.87666554e-05, 0.000769432872, 1.87666554e-05, \n 0.0510640694, 1.87666554e-05], [0.000829187396, 0.000829187396, \n 0.000829187396, 0.000829187396, 0.000829187396], [4.69603252e-05, \n 2.23620596e-06, 2.23620596e-06, 9.16844445e-05, 4.69603252e-05], [\n 0.000503524673, 0.000503524673, 0.000503524673, 0.0609264854, 0.030715005]]\n )\n', (14323, 14757), True, 'import numpy as np\n'), ((18034, 18477), 'numpy.array', 'np.array', (['[[6.03219988e-06, 6.03219988e-06, 0.856578416, 6.03219988e-06, \n 6.03219988e-06], [1.35212298e-07, 1.35212298e-07, 1.35212298e-07, \n 0.97136528, 1.35212298e-07], [1.44034584e-07, 1.44034584e-07, \n 1.44034584e-07, 1.44034584e-07, 1.44034584e-07], [7.19539897e-07, \n 7.19539897e-07, 7.19539897e-07, 7.19539897e-07, 7.19539897e-07], [\n 7.14399508e-07, 7.14399508e-07, 7.14399508e-07, 7.14399508e-07, \n 7.14399508e-07]]'], {}), '([[6.03219988e-06, 6.03219988e-06, 0.856578416, 6.03219988e-06, \n 6.03219988e-06], [1.35212298e-07, 1.35212298e-07, 1.35212298e-07, \n 0.97136528, 1.35212298e-07], [1.44034584e-07, 1.44034584e-07, \n 1.44034584e-07, 1.44034584e-07, 1.44034584e-07], [7.19539897e-07, \n 7.19539897e-07, 7.19539897e-07, 7.19539897e-07, 7.19539897e-07], [\n 7.14399508e-07, 7.14399508e-07, 7.14399508e-07, 7.14399508e-07, \n 7.14399508e-07]])\n', (18042, 18477), True, 'import numpy as np\n'), ((19497, 19943), 'numpy.array', 'np.array', (['[[2.10625199e-06, 2.10625199e-06, 2.10625199e-06, 2.10625199e-06, \n 2.10625199e-06], [3.72331731e-07, 3.72331731e-07, 3.72331731e-07, \n 3.72331731e-07, 3.72331731e-07], [1.22283772e-05, 0.0122406055, \n 1.22283772e-05, 1.22283772e-05, 1.22283772e-05], [4.46812012e-08, \n 4.46812012e-08, 4.46812012e-08, 4.46812012e-08, 4.46812012e-08], [\n 8.27972213e-06, 0.0496866125, 8.27972213e-06, 8.27972213e-06, \n 8.27972213e-06]]'], {}), '([[2.10625199e-06, 2.10625199e-06, 2.10625199e-06, 2.10625199e-06, \n 2.10625199e-06], [3.72331731e-07, 3.72331731e-07, 3.72331731e-07, \n 3.72331731e-07, 3.72331731e-07], [1.22283772e-05, 0.0122406055, \n 1.22283772e-05, 1.22283772e-05, 1.22283772e-05], [4.46812012e-08, \n 4.46812012e-08, 4.46812012e-08, 4.46812012e-08, 4.46812012e-08], [\n 8.27972213e-06, 0.0496866125, 8.27972213e-06, 8.27972213e-06, \n 8.27972213e-06]])\n', (19505, 19943), True, 'import numpy as np\n'), ((21250, 21694), 'numpy.array', 'np.array', (['[[3.75699741e-05, 3.75699741e-05, 0.106736296, 3.75699741e-05, \n 3.75699741e-05], [5.84054154e-06, 5.84054154e-06, 5.84054154e-06, \n 0.839174848, 5.84054154e-06], [6.16686298e-06, 6.16686298e-06, \n 6.16686298e-06, 6.16686298e-06, 6.16686298e-06], [1.95706206e-05, \n 1.95706206e-05, 1.95706206e-05, 1.95706206e-05, 1.95706206e-05], [\n 1.94943174e-05, 1.94943174e-05, 1.94943174e-05, 1.94943174e-05, \n 1.94943174e-05]]'], {}), '([[3.75699741e-05, 3.75699741e-05, 0.106736296, 3.75699741e-05, \n 3.75699741e-05], [5.84054154e-06, 5.84054154e-06, 5.84054154e-06, \n 0.839174848, 5.84054154e-06], [6.16686298e-06, 6.16686298e-06, \n 6.16686298e-06, 6.16686298e-06, 6.16686298e-06], [1.95706206e-05, \n 1.95706206e-05, 1.95706206e-05, 1.95706206e-05, 1.95706206e-05], [\n 1.94943174e-05, 1.94943174e-05, 1.94943174e-05, 1.94943174e-05, \n 1.94943174e-05]])\n', (21258, 21694), True, 'import numpy as np\n'), ((22713, 23162), 'numpy.array', 'np.array', (['[[3.04905937e-05, 3.04905937e-05, 3.04905937e-05, 3.04905937e-05, \n 3.04905937e-05], [1.29841464e-05, 1.29841464e-05, 1.29841464e-05, \n 1.29841464e-05, 1.29841464e-05], [4.01010547e-05, 0.000842122148, \n 4.01010547e-05, 4.01010547e-05, 4.01010547e-05], [2.12351646e-06, \n 2.12351646e-06, 2.12351646e-06, 2.12351646e-06, 2.12351646e-06], [\n 3.88847844e-05, 0.00470505891, 3.88847844e-05, 3.88847844e-05, \n 3.88847844e-05]]'], {}), '([[3.04905937e-05, 3.04905937e-05, 3.04905937e-05, 3.04905937e-05, \n 3.04905937e-05], [1.29841464e-05, 1.29841464e-05, 1.29841464e-05, \n 1.29841464e-05, 1.29841464e-05], [4.01010547e-05, 0.000842122148, \n 4.01010547e-05, 4.01010547e-05, 4.01010547e-05], [2.12351646e-06, \n 2.12351646e-06, 2.12351646e-06, 2.12351646e-06, 2.12351646e-06], [\n 3.88847844e-05, 0.00470505891, 3.88847844e-05, 3.88847844e-05, \n 3.88847844e-05]])\n', (22721, 23162), True, 'import numpy as np\n'), ((26797, 27490), 'numpy.array', 'np.array', (['[-22.60982633, -23.07660654, -23.57298822, -19.76726066, -24.74325104, -\n 35.20241402, -35.00096024, -34.99203854, -21.35069072, -19.85767814, -\n 21.92098414, -4.01623741, -19.16380593, -21.1062242, -20.47163973, -\n 21.10157273, -21.49584851, -20.4811853, -18.25856307, -23.39717471, -\n 21.92146798, -9.41377777, -21.03053445, -21.08029591, -20.10863677, -\n 33.48185979, -19.47301382, -20.77150242, -20.11727696, -20.56031676, -\n 20.57193964, -32.30366295, -18.07551522, -22.58887909, -19.1585905, -\n 16.02994331, -24.30968545, -20.92932218, -21.96797222, -24.29571895, -\n 23.45968569, -22.43665883, -20.46568904, -22.75551606, -19.6637215, -\n 18.36288463]'], {}), '([-22.60982633, -23.07660654, -23.57298822, -19.76726066, -\n 24.74325104, -35.20241402, -35.00096024, -34.99203854, -21.35069072, -\n 19.85767814, -21.92098414, -4.01623741, -19.16380593, -21.1062242, -\n 20.47163973, -21.10157273, -21.49584851, -20.4811853, -18.25856307, -\n 23.39717471, -21.92146798, -9.41377777, -21.03053445, -21.08029591, -\n 20.10863677, -33.48185979, -19.47301382, -20.77150242, -20.11727696, -\n 20.56031676, -20.57193964, -32.30366295, -18.07551522, -22.58887909, -\n 19.1585905, -16.02994331, -24.30968545, -20.92932218, -21.96797222, -\n 24.29571895, -23.45968569, -22.43665883, -20.46568904, -22.75551606, -\n 19.6637215, -18.36288463])\n', (26805, 27490), True, 'import numpy as np\n'), ((33062, 33453), 'numpy.array', 'np.array', (['[[-22.60982633, -24.78215633, -34.08246498, -34.34107105, -49.56012613], [-\n 23.07660654, -24.51583896, -35.04774303, -35.28281026, -50.52540418], [\n -23.57298822, -29.98305064, -31.98004656, -38.99187549, -47.45770771],\n [-19.76726066, -25.7122143, -31.54577612, -37.38331695, -47.02343727],\n [-24.74325104, -28.78696025, -31.458494, -36.00456711, -46.93615515]]'], {}), '([[-22.60982633, -24.78215633, -34.08246498, -34.34107105, -\n 49.56012613], [-23.07660654, -24.51583896, -35.04774303, -35.28281026, \n -50.52540418], [-23.57298822, -29.98305064, -31.98004656, -38.99187549,\n -47.45770771], [-19.76726066, -25.7122143, -31.54577612, -37.38331695, \n -47.02343727], [-24.74325104, -28.78696025, -31.458494, -36.00456711, -\n 46.93615515]])\n', (33070, 33453), True, 'import numpy as np\n'), ((34482, 34900), 'numpy.array', 'np.array', (['[[-202.75618827, -208.38838519, -210.46938402, -210.15943098, -223.79223672\n ], [-202.58297597, -217.72266765, -207.23725672, -215.529735, -\n 224.13957203], [-202.00878092, -214.23093833, -217.41021623, -\n 220.73768708, -222.03338753], [-200.44016117, -209.46937757, -\n 209.06951664, -216.22297765, -221.09669653], [-208.74189499, -\n 214.62088817, -209.79346523, -213.52623459, -228.70417526]]'], {}), '([[-202.75618827, -208.38838519, -210.46938402, -210.15943098, -\n 223.79223672], [-202.58297597, -217.72266765, -207.23725672, -\n 215.529735, -224.13957203], [-202.00878092, -214.23093833, -\n 217.41021623, -220.73768708, -222.03338753], [-200.44016117, -\n 209.46937757, -209.06951664, -216.22297765, -221.09669653], [-\n 208.74189499, -214.62088817, -209.79346523, -213.52623459, -228.70417526]])\n', (34490, 34900), True, 'import numpy as np\n'), ((35926, 36045), 'numpy.array', 'np.array', (['[[0, 11, 20, 25, 20], [0, 11, 20, 25, 20], [0, 11, 20, 25, 20], [0, 11, 20,\n 25, 20], [0, 11, 20, 25, 20]]'], {}), '([[0, 11, 20, 25, 20], [0, 11, 20, 25, 20], [0, 11, 20, 25, 20], [0,\n 11, 20, 25, 20], [0, 11, 20, 25, 20]])\n', (35934, 36045), True, 'import numpy as np\n'), ((36259, 36383), 'numpy.array', 'np.array', (['[[20, 19, 35, 11, 21], [20, 19, 35, 11, 21], [20, 19, 35, 11, 21], [20, 19,\n 35, 11, 21], [35, 19, 35, 11, 34]]'], {}), '([[20, 19, 35, 11, 21], [20, 19, 35, 11, 21], [20, 19, 35, 11, 21],\n [20, 19, 35, 11, 21], [35, 19, 35, 11, 34]])\n', (36267, 36383), True, 'import numpy as np\n')] |
import cv2 as cv
import numpy as np
img = cv.imread('Photos/cats.jpg')
cv.imshow('Cats', img)
blank = np.zeros(img.shape[:2], dtype= 'uint8')
cv.imshow("Blank Image", blank)
mask = cv.circle(blank.copy(), (img.shape[1]//2, img.shape[0]//2), 100, 255, -1)
cv.imshow("mask", mask)
mask2 = cv.rectangle(blank.copy(), (img.shape[1]//2, img.shape[0]//2), (img.shape[1]//2+100, img.shape[0]//2+100), 255, -1)
cv.imshow("mask2", mask2)
masked = cv.bitwise_and(img, img, mask = mask)
cv.imshow("Masked", masked)
masked2 = cv.bitwise_and(img, img, mask = mask2)
cv.imshow("Masked2", masked2)
rectangle = cv.rectangle(blank.copy(), (30, 30), (370, 370), 255, -1)
circle = cv.circle(blank.copy(), (200, 200), 200, 255, -1)
weird_shape = cv.bitwise_and(rectangle, circle)
cv.imshow("Weird shape", weird_shape)
masked3 = cv.bitwise_and(img, img, mask = weird_shape)
cv.imshow("Masked3", masked3)
cv.waitKey(0)
| [
"cv2.bitwise_and",
"cv2.imshow",
"numpy.zeros",
"cv2.waitKey",
"cv2.imread"
] | [((43, 71), 'cv2.imread', 'cv.imread', (['"""Photos/cats.jpg"""'], {}), "('Photos/cats.jpg')\n", (52, 71), True, 'import cv2 as cv\n'), ((72, 94), 'cv2.imshow', 'cv.imshow', (['"""Cats"""', 'img'], {}), "('Cats', img)\n", (81, 94), True, 'import cv2 as cv\n'), ((104, 142), 'numpy.zeros', 'np.zeros', (['img.shape[:2]'], {'dtype': '"""uint8"""'}), "(img.shape[:2], dtype='uint8')\n", (112, 142), True, 'import numpy as np\n'), ((144, 175), 'cv2.imshow', 'cv.imshow', (['"""Blank Image"""', 'blank'], {}), "('Blank Image', blank)\n", (153, 175), True, 'import cv2 as cv\n'), ((258, 281), 'cv2.imshow', 'cv.imshow', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (267, 281), True, 'import cv2 as cv\n'), ((407, 432), 'cv2.imshow', 'cv.imshow', (['"""mask2"""', 'mask2'], {}), "('mask2', mask2)\n", (416, 432), True, 'import cv2 as cv\n'), ((443, 478), 'cv2.bitwise_and', 'cv.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (457, 478), True, 'import cv2 as cv\n'), ((481, 508), 'cv2.imshow', 'cv.imshow', (['"""Masked"""', 'masked'], {}), "('Masked', masked)\n", (490, 508), True, 'import cv2 as cv\n'), ((520, 556), 'cv2.bitwise_and', 'cv.bitwise_and', (['img', 'img'], {'mask': 'mask2'}), '(img, img, mask=mask2)\n', (534, 556), True, 'import cv2 as cv\n'), ((559, 588), 'cv2.imshow', 'cv.imshow', (['"""Masked2"""', 'masked2'], {}), "('Masked2', masked2)\n", (568, 588), True, 'import cv2 as cv\n'), ((734, 767), 'cv2.bitwise_and', 'cv.bitwise_and', (['rectangle', 'circle'], {}), '(rectangle, circle)\n', (748, 767), True, 'import cv2 as cv\n'), ((768, 805), 'cv2.imshow', 'cv.imshow', (['"""Weird shape"""', 'weird_shape'], {}), "('Weird shape', weird_shape)\n", (777, 805), True, 'import cv2 as cv\n'), ((817, 859), 'cv2.bitwise_and', 'cv.bitwise_and', (['img', 'img'], {'mask': 'weird_shape'}), '(img, img, mask=weird_shape)\n', (831, 859), True, 'import cv2 as cv\n'), ((862, 891), 'cv2.imshow', 'cv.imshow', (['"""Masked3"""', 'masked3'], {}), "('Masked3', masked3)\n", (871, 891), True, 'import cv2 as cv\n'), ((893, 906), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (903, 906), True, 'import cv2 as cv\n')] |
import numpy as np
from randomcsv.CategoryColumn import CategoryColumn
def test_should_pick_class_at_random():
column = CategoryColumn('Class', ['A', 'B', 'C'], random_state=42)
series = column.generate_entries(5)
assert series.at[0] == 'C'
assert series.at[1] == 'A'
assert series.at[2] == 'C'
assert series.at[3] == 'C'
assert series.at[4] == 'A'
def test_can_set_type_of_classes():
column = CategoryColumn('IntClass', [1, 2, 3], random_state=42)
series = column.generate_entries(5)
assert series.at[0] == 3
assert series.at[1] == 1
assert series.at[2] == 3
assert series.at[3] == 3
assert series.at[4] == 1
def test_default_null_elements_are_chosen_by_pandas():
column = CategoryColumn("Category", [1, 2, 3], null_ratio=0.5, random_state=42)
series = column.generate_entries(5)
assert np.isnan(series[0])
assert series[1] == 1
assert series[2] == 3
assert series[3] == 3
assert np.isnan(series[4])
def test_null_elements_can_be_set():
column = CategoryColumn("Category", [1, 2, 3], null_ratio=0.5, null_element=-1, random_state=42)
series = column.generate_entries(5)
assert series[0] == -1
assert series[1] == 1
assert series[2] == 3
assert series[3] == 3
assert series[4] == -1
| [
"randomcsv.CategoryColumn.CategoryColumn",
"numpy.isnan"
] | [((126, 183), 'randomcsv.CategoryColumn.CategoryColumn', 'CategoryColumn', (['"""Class"""', "['A', 'B', 'C']"], {'random_state': '(42)'}), "('Class', ['A', 'B', 'C'], random_state=42)\n", (140, 183), False, 'from randomcsv.CategoryColumn import CategoryColumn\n'), ((430, 484), 'randomcsv.CategoryColumn.CategoryColumn', 'CategoryColumn', (['"""IntClass"""', '[1, 2, 3]'], {'random_state': '(42)'}), "('IntClass', [1, 2, 3], random_state=42)\n", (444, 484), False, 'from randomcsv.CategoryColumn import CategoryColumn\n'), ((740, 810), 'randomcsv.CategoryColumn.CategoryColumn', 'CategoryColumn', (['"""Category"""', '[1, 2, 3]'], {'null_ratio': '(0.5)', 'random_state': '(42)'}), "('Category', [1, 2, 3], null_ratio=0.5, random_state=42)\n", (754, 810), False, 'from randomcsv.CategoryColumn import CategoryColumn\n'), ((862, 881), 'numpy.isnan', 'np.isnan', (['series[0]'], {}), '(series[0])\n', (870, 881), True, 'import numpy as np\n'), ((971, 990), 'numpy.isnan', 'np.isnan', (['series[4]'], {}), '(series[4])\n', (979, 990), True, 'import numpy as np\n'), ((1043, 1134), 'randomcsv.CategoryColumn.CategoryColumn', 'CategoryColumn', (['"""Category"""', '[1, 2, 3]'], {'null_ratio': '(0.5)', 'null_element': '(-1)', 'random_state': '(42)'}), "('Category', [1, 2, 3], null_ratio=0.5, null_element=-1,\n random_state=42)\n", (1057, 1134), False, 'from randomcsv.CategoryColumn import CategoryColumn\n')] |
import argparse
import logging
import os
import pickle
import shutil
import settings
import torch
import numpy as np
from IPython.core.debugger import Pdb
LOG_FILE = 'log.txt'
_LOG_LEVEL_STRINGS = ['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG']
EPSILON = 0.0000001
def clean_label_list(ylist):
#remove duplicates without changing order
#remove intermittent zeros. We cant have 0 with others
#remove zeros
ylist = [x for x in ylist if x != 0]
if len(ylist) == 0:
return [0]
else:
#return duplicates
already_added = set()
ry = []
for y in ylist:
if y not in already_added:
ry.append(y)
already_added.add(y)
return ry
def read_multilabel(filename, num_labels = 0):
lines = []
with open(filename) as fh:
lines = fh.readlines()
lines = [list(map(int,line.strip().split(','))) for line in lines]
if num_labels == 0:
num_labels = max([max(line) for line in lines])+1
#
y_multi = np.zeros((len(lines),num_labels))
y_single = np.zeros(len(lines))
for i,line in enumerate(lines):
y_single[i] = line[0]
for j in line:
y_multi[i,j] = 1
#
#
return y_single, y_multi
def compute_performance(gt_file, pred_file):
preds = np.loadtxt(pred_file)
y_single, y_multi = read_multilabel(gt_file)
acc = (y_single == preds).sum()/preds.shape[0]
multi_acc = y_multi[np.arange(preds.shape[0]), preds.astype(int)].sum()/preds.shape[0]
return acc, multi_acc
def get_template_id_maps(num_templates, exclude_t_ids):
old_to_new = [0]*(num_templates+1)
new_to_old = [0]*(num_templates+1 - len(exclude_t_ids))
cnt = 0
for i in range(num_templates+1):
if i in exclude_t_ids:
old_to_new[i] = 0
else:
new_to_old[cnt] = i
old_to_new[i] = cnt
cnt += 1
#Pdb().set_trace()
return old_to_new, new_to_old
def _log_level_string_to_int(log_level_string):
if not log_level_string in _LOG_LEVEL_STRINGS:
message = 'invalid choice: {0} (choose from {1})'.format(
log_level_string, _LOG_LEVEL_STRINGS)
raise argparse.ArgumentTypeError(message)
log_level_int = getattr(logging, log_level_string, logging.INFO)
# check the logging log_level_choices have not changed from our expected values
assert isinstance(log_level_int, int)
return log_level_int
def get_embed_size(base_model_file, use_ids):
if not use_ids:
return 0
if os.path.exists(base_model_file):
d = pickle.load(open(base_model_file, 'rb'))
ent_embed = d['entity_real'].shape[1] + d['entity_type'].shape[1]
rel_embed = d['head_rel_type'].shape[1] + \
d['tail_rel_type'].shape[1] + d['rel_real'].shape[1]
return int(2*ent_embed+rel_embed)
else:
logging.error(
'Base Model file not present at {}'.format(base_model_file))
raise Exception('Base Model file not present')
def get_learning_rate(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def save_checkpoint(state, epoch, isBest, checkpoint_file, best_file):
torch.save(state, checkpoint_file)
if isBest:
best_file = best_file + str(0)
shutil.copyfile(checkpoint_file, best_file)
logging.info('Saving checkpoint to {}'.format(checkpoint_file))
def log_sum_exp(x, dim=-1):
max_score, _ = torch.max(x, dim)
max_score_broadcast = max_score.unsqueeze(dim).expand_as(x)
return max_score + torch.log(torch.sum(torch.exp(x - max_score_broadcast), dim))
class Map(dict):
"""
Example:
m = Map({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(Map, self).__init__(*args, **kwargs)
for arg in args:
# If arg is a dict,add all the elements of that dict to self
if isinstance(arg, dict):
for k, v in arg.items(): # Python2 - for k, v in arg.iteritems():
self[k] = v
if kwargs:
for k, v in kwargs.items():
self[k] = v
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(Map, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(Map, self).__delitem__(key)
del self.__dict__[key]
| [
"os.path.exists",
"torch.max",
"argparse.ArgumentTypeError",
"torch.exp",
"shutil.copyfile",
"torch.save",
"numpy.loadtxt",
"numpy.arange"
] | [((1395, 1416), 'numpy.loadtxt', 'np.loadtxt', (['pred_file'], {}), '(pred_file)\n', (1405, 1416), True, 'import numpy as np\n'), ((2644, 2675), 'os.path.exists', 'os.path.exists', (['base_model_file'], {}), '(base_model_file)\n', (2658, 2675), False, 'import os\n'), ((3317, 3351), 'torch.save', 'torch.save', (['state', 'checkpoint_file'], {}), '(state, checkpoint_file)\n', (3327, 3351), False, 'import torch\n'), ((3576, 3593), 'torch.max', 'torch.max', (['x', 'dim'], {}), '(x, dim)\n', (3585, 3593), False, 'import torch\n'), ((2295, 2330), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['message'], {}), '(message)\n', (2321, 2330), False, 'import argparse\n'), ((3414, 3457), 'shutil.copyfile', 'shutil.copyfile', (['checkpoint_file', 'best_file'], {}), '(checkpoint_file, best_file)\n', (3429, 3457), False, 'import shutil\n'), ((3701, 3735), 'torch.exp', 'torch.exp', (['(x - max_score_broadcast)'], {}), '(x - max_score_broadcast)\n', (3710, 3735), False, 'import torch\n'), ((1541, 1566), 'numpy.arange', 'np.arange', (['preds.shape[0]'], {}), '(preds.shape[0])\n', (1550, 1566), True, 'import numpy as np\n')] |
# Copyright 2021 ETH Zurich and the NPBench authors. All rights reserved.
import numpy as np
def initialize(M, N, datatype=np.float64):
alpha = datatype(1.5)
beta = datatype(1.2)
C = np.fromfunction(lambda i, j: ((i + j) % 100) / M, (M, N),
dtype=datatype)
B = np.fromfunction(lambda i, j: ((N + i - j) % 100) / M, (M, N),
dtype=datatype)
A = np.empty((M, M), dtype=datatype)
for i in range(M):
A[i, :i + 1] = np.fromfunction(lambda j: ((i + j) % 100) / M,
(i + 1, ),
dtype=datatype)
A[i, i + 1:] = -999
return alpha, beta, C, A, B
| [
"numpy.fromfunction",
"numpy.empty"
] | [((198, 269), 'numpy.fromfunction', 'np.fromfunction', (['(lambda i, j: (i + j) % 100 / M)', '(M, N)'], {'dtype': 'datatype'}), '(lambda i, j: (i + j) % 100 / M, (M, N), dtype=datatype)\n', (213, 269), True, 'import numpy as np\n'), ((304, 379), 'numpy.fromfunction', 'np.fromfunction', (['(lambda i, j: (N + i - j) % 100 / M)', '(M, N)'], {'dtype': 'datatype'}), '(lambda i, j: (N + i - j) % 100 / M, (M, N), dtype=datatype)\n', (319, 379), True, 'import numpy as np\n'), ((414, 446), 'numpy.empty', 'np.empty', (['(M, M)'], {'dtype': 'datatype'}), '((M, M), dtype=datatype)\n', (422, 446), True, 'import numpy as np\n'), ((493, 563), 'numpy.fromfunction', 'np.fromfunction', (['(lambda j: (i + j) % 100 / M)', '(i + 1,)'], {'dtype': 'datatype'}), '(lambda j: (i + j) % 100 / M, (i + 1,), dtype=datatype)\n', (508, 563), True, 'import numpy as np\n')] |
import numpy as np
class BinomialModel(object):
def __init__(self, *args):
# black == False. args: S0, u, d, R
# or
# black == True. args: S0, u, d, R, q
self.S0 = args[0]
self.u = args[1]
self.d = args[2]
self.R = args[3]
self.euro = args[5]
self.call = args[6]
self.black = args[7]
if self.black:
self.q = args[4]
else:
self.q = (self.R - self.d) / (self.u - self.d)
self.up, self.down = self.create_branches()
# Value of root
self.c = 0
def create_branches(self):
# Down and up moves
return self.S0 * self.u, self.S0 * self.d
def set_c_values(self, bi_1, bi_2, K):
if self.euro:
self.c = sum(np.array([self.q, 1-self.q]) * [bi_1.c, bi_2.c])/self.R
else:
if self.call:
self.c = max(sum(np.array([self.q, 1 - self.q]) * [bi_1.c, bi_2.c])/self.R,
self.S0 - K)
else:
self.c = max(sum(np.array([self.q, 1 - self.q]) * [bi_1.c, bi_2.c])/self.R,
K - self.S0)
self.c = self.c
class MultibinomialModel(object):
def __init__(self, *args, euro=True, call=True, black=False):
# black == False. args: S0, u, d, R
# or
# black == True. args: S0, u, d, R, q
self.S0 = args[0]
self.u = args[1]
self.d = args[2]
self.R = args[3]
self.t = args[4]
self.euro = euro
self.call = call
self.black = black
if self.black:
self.q = args[5]
else:
self.q = (self.R - self.d)/(self.u - self.d)
self.branches = []
# Init root
self.branches.append([BinomialModel(self.S0, self.u, self.d, self.R, self.q,
self.euro, self.call, self.black)])
# Init first values
self.S = []
self.S.append([self.S0])
self.S.append([])
self.S[-1].append(self.branches[0][0].up)
self.S[-1].append(self.branches[0][0].down)
self.init_branches()
# Values
self.C = []
def init_branches(self):
for i in range(1, self.t):
self.branches.append([])
self.S.append([])
for num in self.S[i]:
new_model = BinomialModel(num, self.u, self.d, self.R, self.q, self.euro, self.call, self.black)
self.branches[-1].append(new_model)
if len(self.S[-1]) > 1:
self.S[-1].append(new_model.down)
else:
self.S[-1].append(new_model.up)
self.S[-1].append(new_model.down)
def count_all_values(self, K):
# All values for every node in every period
S = np.array(self.S[-1])
if self.call:
S = S - K
else:
S = K - S
S = list(map(lambda x: 0 if x < 0 else x, S))
# Init values of last branches and layer
self.C.append([])
self.C[-1] = S
ls = []
for i in range(len(self.branches[-1])):
if self.euro:
self.branches[-1][i].c = sum(np.array([self.q, 1-self.q]) *
[self.C[-1][i],
self.C[-1][i+1]])/self.R
else:
if self.call:
self.branches[-1][i].c = max(sum(np.array([self.q, 1-self.q]) *
[self.C[-1][i],
self.C[-1][i+1]])/self.R, self.branches[-1][i].S0 - K)
else:
self.branches[-1][i].c = max(sum(np.array([self.q, 1-self.q]) *
[self.C[-1][i],
self.C[-1][i+1]])/self.R,
K - self.branches[-1][i].S0)
ls.append(self.branches[-1][i].c)
self.C.append(ls)
del ls
for i in range(len(self.branches)-2, -1, -1):
self.C.append([])
for j in range(len(self.branches[i])):
self.branches[i][j].set_c_values(self.branches[i+1][j], self.branches[i+1][j+1], K)
self.C[-1].append(self.branches[i][j].c)
self.C = list(reversed(self.C))
return self.C
def create_trading_strategies(self):
for i in range(1, len(self.S)):
print("Layer {}".format(i-1))
for j in range(len(self.S[i])-1):
a = np.array([[self.S[i][j], self.R**i],
[self.S[i][j+1], self.R**i]])
b = np.array([[self.C[i][j]],
[self.C[i][j+1]]])
solution = np.linalg.solve(a, b)
x = solution[0]
y = solution[1]
print("x: {}"
"\ny: {}".format(np.round(x, 2), np.round(y, 2)))
value = np.round(x * self.S[i-1][j] + y * self.R**(i-1), 2)
print("Value is {}".format(value[0]))
print()
| [
"numpy.array",
"numpy.linalg.solve",
"numpy.round"
] | [((2880, 2900), 'numpy.array', 'np.array', (['self.S[-1]'], {}), '(self.S[-1])\n', (2888, 2900), True, 'import numpy as np\n'), ((4704, 4776), 'numpy.array', 'np.array', (['[[self.S[i][j], self.R ** i], [self.S[i][j + 1], self.R ** i]]'], {}), '([[self.S[i][j], self.R ** i], [self.S[i][j + 1], self.R ** i]])\n', (4712, 4776), True, 'import numpy as np\n'), ((4822, 4868), 'numpy.array', 'np.array', (['[[self.C[i][j]], [self.C[i][j + 1]]]'], {}), '([[self.C[i][j]], [self.C[i][j + 1]]])\n', (4830, 4868), True, 'import numpy as np\n'), ((4925, 4946), 'numpy.linalg.solve', 'np.linalg.solve', (['a', 'b'], {}), '(a, b)\n', (4940, 4946), True, 'import numpy as np\n'), ((5140, 5197), 'numpy.round', 'np.round', (['(x * self.S[i - 1][j] + y * self.R ** (i - 1))', '(2)'], {}), '(x * self.S[i - 1][j] + y * self.R ** (i - 1), 2)\n', (5148, 5197), True, 'import numpy as np\n'), ((794, 824), 'numpy.array', 'np.array', (['[self.q, 1 - self.q]'], {}), '([self.q, 1 - self.q])\n', (802, 824), True, 'import numpy as np\n'), ((5082, 5096), 'numpy.round', 'np.round', (['x', '(2)'], {}), '(x, 2)\n', (5090, 5096), True, 'import numpy as np\n'), ((5098, 5112), 'numpy.round', 'np.round', (['y', '(2)'], {}), '(y, 2)\n', (5106, 5112), True, 'import numpy as np\n'), ((3272, 3302), 'numpy.array', 'np.array', (['[self.q, 1 - self.q]'], {}), '([self.q, 1 - self.q])\n', (3280, 3302), True, 'import numpy as np\n'), ((923, 953), 'numpy.array', 'np.array', (['[self.q, 1 - self.q]'], {}), '([self.q, 1 - self.q])\n', (931, 953), True, 'import numpy as np\n'), ((1075, 1105), 'numpy.array', 'np.array', (['[self.q, 1 - self.q]'], {}), '([self.q, 1 - self.q])\n', (1083, 1105), True, 'import numpy as np\n'), ((3536, 3566), 'numpy.array', 'np.array', (['[self.q, 1 - self.q]'], {}), '([self.q, 1 - self.q])\n', (3544, 3566), True, 'import numpy as np\n'), ((3812, 3842), 'numpy.array', 'np.array', (['[self.q, 1 - self.q]'], {}), '([self.q, 1 - self.q])\n', (3820, 3842), True, 'import numpy as np\n')] |
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""Construct minibatches for Detectron networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import logging
import numpy as np
import os
import copy
from detectron.core.config import cfg
import detectron.roi_data.fast_rcnn as fast_rcnn_roi_data
import detectron.roi_data.retinanet as retinanet_roi_data
import detectron.roi_data.rpn as rpn_roi_data
import detectron.utils.blob as blob_utils
logger = logging.getLogger(__name__)
#import sys
#sys.path.append('/home/gaomingda/softer_nms_LIP_JPPNet/detectron/LIP_JPPNet')
#from evaluate_pose_JPPNet import draw_resized_pose
def get_minibatch_blob_names(is_training=True):
"""Return blob names in the order in which they are read by the data loader.
"""
# data blob: holds a batch of N images, each with 3 channels
blob_names = ['data']
blob_names += ['normalizer'] # focal loss at fast_rcnn_heads
# blob_names += ['normalizer_fcn'] # focal loss at mask_res_top
# blob_names += ['pose_pred']
blob_names += ['pose_pred_4']
blob_names += ['pose_pred_8']
blob_names += ['pose_pred_16']
blob_names += ['pose_pred_32']
blob_names += ['pose_line_8']
blob_names += ['pose_line_16']
# seg_gt_label, add segementation on top of fpn2-5
blob_names += ['seg_gt_label']
if cfg.RPN.RPN_ON:
# RPN-only or end-to-end Faster R-CNN
blob_names += rpn_roi_data.get_rpn_blob_names(is_training=is_training)
elif cfg.RETINANET.RETINANET_ON:
blob_names += retinanet_roi_data.get_retinanet_blob_names(
is_training=is_training
)
else:
# Fast R-CNN like models trained on precomputed proposals
blob_names += fast_rcnn_roi_data.get_fast_rcnn_blob_names(
is_training=is_training
)
return blob_names
#def get_minibatch(roidb, pose_pred_model):
def get_minibatch(roidb):
"""Given a roidb, construct a minibatch sampled from it."""
# We collect blobs from each image onto a list and then concat them into a
# single tensor, hence we initialize each blob to an empty list
blobs = {k: [] for k in get_minibatch_blob_names()}
# Get the input image blob, formatted for caffe2
# im_blob, im_scales = _get_image_blob(roidb)
im_blob, im_scales, pose_pred, pose_line, blobs['seg_gt_label'] = _get_image_pose_blob(roidb) # pose_pred the same shape with im_blob
blobs['data'] = im_blob
blobs['normalizer'] = np.array([100], dtype=np.float32)
if 'LIP' in cfg.TRAIN.DATASETS[0]:
blobs['pose_pred_4'], blobs['pose_pred_8'], blobs['pose_pred_16'], blobs['pose_pred_32'] = _resize_pose_blob(pose_pred, channel=26)
else:
blobs['pose_pred_4'], blobs['pose_pred_8'], blobs['pose_pred_16'], blobs['pose_pred_32'] = _resize_pose_blob(pose_pred, channel=26)
# blobs['pose_pred_8'], blobs['pose_pred_16'] = _resize_pose_blob_to13(pose_pred) # pose 16 to 13 channel
# blobs['pose_sum_8'], blobs['pose_sum_16'] = pose_sum_to_onehotmap(blobs['pose_pred_8'], blobs['pose_pred_16'])
blobs['pose_line_8'], blobs['pose_line_16'] = _resize_poseline_blob(pose_line)
if cfg.RPN.RPN_ON:
# RPN-only or end-to-end Faster/Mask R-CNN
valid = rpn_roi_data.add_rpn_blobs(blobs, im_scales, roidb)
elif cfg.RETINANET.RETINANET_ON:
im_width, im_height = im_blob.shape[3], im_blob.shape[2]
# im_width, im_height corresponds to the network input: padded image
# (if needed) width and height. We pass it as input and slice the data
# accordingly so that we don't need to use SampleAsOp
valid = retinanet_roi_data.add_retinanet_blobs(
blobs, im_scales, roidb, im_width, im_height
)
else:
# Fast R-CNN like models trained on precomputed proposals
valid = fast_rcnn_roi_data.add_fast_rcnn_blobs(blobs, im_scales, roidb)
# blobs['pose_pred'] = pose_pred_model.pred_pose_batch(roidb)
# pose_pred_model.draw_batch(blobs['pose_pred'], roidb)
# blobs['pose_pred'] = _get_pose_pred(roidb)
# logger.info(blobs['pose_pred'].shape)
return blobs, valid
def _get_image_blob(roidb):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
scale_inds = np.random.randint(
0, high=len(cfg.TRAIN.SCALES), size=num_images
)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
assert im is not None, \
'Failed to read image \'{}\''.format(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = blob_utils.prep_im_for_blob(
im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE
)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = blob_utils.im_list_to_blob(processed_ims)
return blob, im_scales
def _get_pose_pred(roidb, channel=16):
"""get LIP_JPP pose prediction from .bin file
"""
num_images = len(roidb)
if 'LIP' in cfg.TRAIN.DATASETS[0]:
# pred_pose_data = '/home/gaomingda/Downloads/gaomingda/dataset/LIP_JPP_pred_pose/train'
pred_pose_data = '/home/gaomingda/datasets/lip_body25/train_images'
pose_line_data = '/home/gaomingda/Downloads/gaomingda/dataset/LIP_JPP_pose_edge/train'
if 'ATR' in cfg.TRAIN.DATASETS[0]:
# pred_pose_data = '/home/gaomingda/Downloads/gaomingda/dataset/ATR_JPP_pred_pose'
# pred_pose_data = '/home/gaomingda/Downloads/gaomingda/dataset/ATR_JPP_crop_pred_pose'
pred_pose_data = '/home/gaomingda/Downloads/gaomingda/dataset/ATR_openpose'
if 'LIP' in cfg.TRAIN.DATASETS[0]:
pose_blob = np.zeros((num_images, 48, 48, channel), dtype=np.float32)
else: # ATR
pose_blob = np.zeros((num_images, 48, 48, 26), dtype=np.float32)
pose_line_blob = np.zeros((num_images, 48, 48), dtype=np.float32)
for i in range(num_images):
entry = roidb[i]
if 'ATR' in cfg.TRAIN.DATASETS[0]:
if entry['flipped']:
pred_pose_path = os.path.join(pred_pose_data, 'heatmap_flip', entry['id']+'.bin')
else:
pred_pose_path = os.path.join(pred_pose_data, 'heatmap', entry['id']+'.bin')
pred_ = np.fromfile(pred_pose_path, dtype=np.float32)
pred_ = pred_.reshape(48, 48, 26)
pose_blob[i] = pred_
else: # LIP
if entry['flipped']:
pred_pose_path = os.path.join(pred_pose_data, 'heatmap_flip', entry['id']+'.bin')
else:
pred_pose_path = os.path.join(pred_pose_data, 'heatmap', entry['id']+'.bin')
pred_ = np.fromfile(pred_pose_path, dtype=np.float32)
pred_ = pred_.reshape(48, 48, channel)
# pose line
#pose_line = np.fromfile(os.path.join(pose_line_data, entry['id']+'.bin'), dtype=np.float32)
#pose_line = pose_line.reshape(48, 48)
pose_line = np.zeros((48, 48), dtype=np.float32)
# if entry['flipped']:
# pred_ = flip_pose(pred_)
# # pose line
# pose_line = pose_line[:, ::-1]
pose_blob[i] = pred_
# pose line
pose_line_blob[i] = pose_line
# select 0-15 channel
#print("train body25, select poses 0-16 channel")
pose_blob = pose_blob[:, :, :, 0:16]
return pose_blob , pose_line_blob
def flip_pose(pose):
"""input: pose, is array of size(none, none, 16)
"""
flip_pose = np.zeros(pose.shape, dtype=np.float32)
flip_pose[:, :, 0] = pose[:, :, 5]
flip_pose[:, :, 1] = pose[:, :, 4]
flip_pose[:, :, 2] = pose[:, :, 3]
flip_pose[:, :, 3] = pose[:, :, 2]
flip_pose[:, :, 4] = pose[:, :, 1]
flip_pose[:, :, 5] = pose[:, :, 0]
flip_pose[:, :, 10] = pose[:, :, 15]
flip_pose[:, :, 11] = pose[:, :, 14]
flip_pose[:, :, 12] = pose[:, :, 13]
flip_pose[:, :, 13] = pose[:, :, 12]
flip_pose[:, :, 14] = pose[:, :, 11]
flip_pose[:, :, 15] = pose[:, :, 10]
flip_pose[:, :, 6] = pose[:, :, 6]
flip_pose[:, :, 7] = pose[:, :, 7]
flip_pose[:, :, 8] = pose[:, :, 8]
flip_pose[:, :, 9] = pose[:, :, 9]
return flip_pose[:, ::-1, :]
def _get_image_pose_blob(roidb):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
scale_inds = np.random.randint(
0, high=len(cfg.TRAIN.SCALES), size=num_images
)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
assert im is not None, \
'Failed to read image \'{}\''.format(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = blob_utils.prep_im_for_blob(
im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE
)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
# pose_line: (num_images, 48, 48)
poses, pose_line = _get_pose_pred(roidb, channel=26)
# show_pose(roidb, poses, im_scales)
# seg_gt label
seg_gt_list = _prep_seg_gt_for_blob(roidb)
im_blob, pose_blob, pose_line_blob, seg_gt_blob = blob_utils.im_list_to_blob_andPose(processed_ims, poses, pose_line, seg_gt_list)
# show_pose(roidb, pose_blob, im_scales)
# blob = blob_utils.im_list_to_blob(processed_ims)
return im_blob, im_scales, pose_blob, pose_line_blob, seg_gt_blob
def show_pose(roidb, pose_blob, im_scales):
num_images = len(roidb)
for i in range(num_images):
pose_blob_i = pose_blob[i]
pred_poses = []
for j in range(16):
channel_ = pose_blob_i[:, :, j]
r_, c_ = np.unravel_index(channel_.argmax(), channel_.shape)
# if channel_[r_, c_]>0.3:
# pred_poses.append([r_, c_])
# else:
# pred_poses.append([-1, -1])
pred_poses.append([r_, c_])
draw_resized_pose(roidb[i]['image'], pred_poses, im_scales[i], roidb[i]['flipped'])
def _resize_pose_blob(pose_pred, channel=16):
n, h, w, channel = pose_pred.shape
pose_shrink4 = np.zeros((n, int(h/4.), int(w/4.), channel), dtype=np.float32)
pose_shrink8 = np.zeros((n, int(h/8.), int(w/8.), channel), dtype=np.float32)
pose_shrink16 = np.zeros((n, int(h/16.), int(w/16.), channel), dtype=np.float32)
pose_shrink32 = np.zeros((n, int(h/32.), int(w/32.), channel), dtype=np.float32)
for i in range(n):
pose_shrink4[i] = cv2.resize(pose_pred[i, :, :, :], None, None, 1./4., 1./4.,
interpolation=cv2.INTER_LINEAR)
pose_shrink8[i] = cv2.resize(pose_pred[i, :, :, :], None, None, 1./8., 1./8.,
interpolation=cv2.INTER_LINEAR)
pose_shrink16[i] = cv2.resize(pose_pred[i], None, None, 1./16., 1./16.,
interpolation=cv2.INTER_LINEAR)
pose_shrink32[i] = cv2.resize(pose_pred[i], None, None, 1./32., 1./32.,
interpolation=cv2.INTER_LINEAR)
return pose_shrink4, pose_shrink8, pose_shrink16, pose_shrink32
def _resize_poseline_blob(pose_pred):
n, h, w = pose_pred.shape
# pose_shrink4 = np.zeros((n, int(h/4.), int(w/4.)), dtype=np.float32)
pose_shrink8 = np.zeros((n, int(h/8.), int(w/8.)), dtype=np.float32)
pose_shrink16 = np.zeros((n, int(h/16.), int(w/16.)), dtype=np.float32)
for i in range(n):
# pose_shrink4[i] = cv2.resize(pose_pred[i, :, :], None, None, 1./4., 1./4.,
# interpolation=cv2.INTER_NEAREST)
pose_shrink8[i] = cv2.resize(pose_pred[i, :, :], None, None, 1./8., 1./8.,
interpolation=cv2.INTER_NEAREST)
pose_shrink16[i] = cv2.resize(pose_pred[i], None, None, 1./16., 1./16.,
interpolation=cv2.INTER_NEAREST)
return pose_shrink8, pose_shrink16
def _resize_pose_blob_to13(pose_pred):
"""first combine 16 channel pose pred to 13 channel(6,B_Pelvis ,B_Spine ,B_Neck ,B_Head)
then shrink 1./8, 1.16 the 13 channel pose blob
"""
n, h, w, _ = pose_pred.shape
pose_13 = np.zeros((n, h, w, 13), dtype=np.float32)
pose_13[:, :, :, 0:6] = pose_pred[:, :, :, 0:6]
pose_13[:, :, :, 7] = pose_pred[:, :, :, 10]
pose_13[:, :, :, 6] = pose_pred[:, :, :, 6]+pose_pred[:, :, :, 7]+pose_pred[:, :, :, 8]+pose_pred[:, :, :, 9]
return _resize_pose_blob(pose_13, channel=13)
def pose_sum_to_onehotmap(pose_blob_8, pose_blob_16):
"""pose_blob: shape (num_imgs, h, w, 16)
pose_blob_8: same shape with res3
pose_blob_16: same shape with res4
"""
n, h, w, c = pose_blob_8.shape
_, h_16, w_16, _ = pose_blob_16.shape
pose_sum_8 = np.sum(pose_blob_8, axis=3)
pose_sum_16 = np.sum(pose_blob_16, axis=3)
one_hot_blob_res3 = np.zeros((n, 512, h, w), dtype=np.float32)
one_hot_blob_res4 = np.zeros((n, 1024, h_16, w_16), dtype=np.float32)
for i in range(n):
one_hot_blob_res3[i, :, :, :] = pose_sum_8[i]
one_hot_blob_res4[i, :, :, :] = pose_sum_16[i]
return one_hot_blob_res3, one_hot_blob_res4
def _prep_seg_gt_for_blob(roidb):
"""load seg gt label
return: 2D array
return: a list of seg_gt array(H, W)
"""
seg_gt_list = []
for entry in roidb:
seg_gt = cv2.imread(entry['ins_seg'], 0)
if entry['flipped']:
seg_gt = seg_gt[:, ::-1]
label_ = copy.deepcopy(seg_gt)
dataset_name = cfg.TRAIN.DATASETS[0]
if 'LIP' in dataset_name:
orig2flipped = {14:15, 15:14, 16:17, 17:16, 18:19, 19:18}
if 'ATR' in dataset_name:
orig2flipped = {
9: 10, 10: 9, 12: 13, 13: 12, 14: 15, 15: 14}
for i in orig2flipped.keys():
ind_i = np.where(label_==i)
if len(ind_i[0])==0:
continue
seg_gt[ind_i] = int(orig2flipped[i])
# seg_gt = cv2.resize(seg_gt, None, None, fx=im_scale, fy=im_scale,
# interpolation=cv2.INTER_NEAREST)
seg_gt = np.array(seg_gt, dtype=np.int32)
seg_gt_list.append(seg_gt)
return seg_gt_list | [
"logging.getLogger",
"numpy.fromfile",
"detectron.utils.blob.prep_im_for_blob",
"numpy.array",
"copy.deepcopy",
"numpy.where",
"detectron.roi_data.retinanet.add_retinanet_blobs",
"detectron.utils.blob.im_list_to_blob_andPose",
"detectron.roi_data.fast_rcnn.get_fast_rcnn_blob_names",
"cv2.resize",
... | [((1449, 1476), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1466, 1476), False, 'import logging\n'), ((3464, 3497), 'numpy.array', 'np.array', (['[100]'], {'dtype': 'np.float32'}), '([100], dtype=np.float32)\n', (3472, 3497), True, 'import numpy as np\n'), ((6020, 6061), 'detectron.utils.blob.im_list_to_blob', 'blob_utils.im_list_to_blob', (['processed_ims'], {}), '(processed_ims)\n', (6046, 6061), True, 'import detectron.utils.blob as blob_utils\n'), ((7057, 7105), 'numpy.zeros', 'np.zeros', (['(num_images, 48, 48)'], {'dtype': 'np.float32'}), '((num_images, 48, 48), dtype=np.float32)\n', (7065, 7105), True, 'import numpy as np\n'), ((8724, 8762), 'numpy.zeros', 'np.zeros', (['pose.shape'], {'dtype': 'np.float32'}), '(pose.shape, dtype=np.float32)\n', (8732, 8762), True, 'import numpy as np\n'), ((10583, 10668), 'detectron.utils.blob.im_list_to_blob_andPose', 'blob_utils.im_list_to_blob_andPose', (['processed_ims', 'poses', 'pose_line', 'seg_gt_list'], {}), '(processed_ims, poses, pose_line, seg_gt_list\n )\n', (10617, 10668), True, 'import detectron.utils.blob as blob_utils\n'), ((13475, 13516), 'numpy.zeros', 'np.zeros', (['(n, h, w, 13)'], {'dtype': 'np.float32'}), '((n, h, w, 13), dtype=np.float32)\n', (13483, 13516), True, 'import numpy as np\n'), ((14074, 14101), 'numpy.sum', 'np.sum', (['pose_blob_8'], {'axis': '(3)'}), '(pose_blob_8, axis=3)\n', (14080, 14101), True, 'import numpy as np\n'), ((14120, 14148), 'numpy.sum', 'np.sum', (['pose_blob_16'], {'axis': '(3)'}), '(pose_blob_16, axis=3)\n', (14126, 14148), True, 'import numpy as np\n'), ((14178, 14220), 'numpy.zeros', 'np.zeros', (['(n, 512, h, w)'], {'dtype': 'np.float32'}), '((n, 512, h, w), dtype=np.float32)\n', (14186, 14220), True, 'import numpy as np\n'), ((14245, 14294), 'numpy.zeros', 'np.zeros', (['(n, 1024, h_16, w_16)'], {'dtype': 'np.float32'}), '((n, 1024, h_16, w_16), dtype=np.float32)\n', (14253, 14294), True, 'import numpy as np\n'), ((2413, 2469), 'detectron.roi_data.rpn.get_rpn_blob_names', 'rpn_roi_data.get_rpn_blob_names', ([], {'is_training': 'is_training'}), '(is_training=is_training)\n', (2444, 2469), True, 'import detectron.roi_data.rpn as rpn_roi_data\n'), ((4225, 4276), 'detectron.roi_data.rpn.add_rpn_blobs', 'rpn_roi_data.add_rpn_blobs', (['blobs', 'im_scales', 'roidb'], {}), '(blobs, im_scales, roidb)\n', (4251, 4276), True, 'import detectron.roi_data.rpn as rpn_roi_data\n'), ((5519, 5548), 'cv2.imread', 'cv2.imread', (["roidb[i]['image']"], {}), "(roidb[i]['image'])\n", (5529, 5548), False, 'import cv2\n'), ((5791, 5877), 'detectron.utils.blob.prep_im_for_blob', 'blob_utils.prep_im_for_blob', (['im', 'cfg.PIXEL_MEANS', 'target_size', 'cfg.TRAIN.MAX_SIZE'], {}), '(im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.\n MAX_SIZE)\n', (5818, 5877), True, 'import detectron.utils.blob as blob_utils\n'), ((6889, 6946), 'numpy.zeros', 'np.zeros', (['(num_images, 48, 48, channel)'], {'dtype': 'np.float32'}), '((num_images, 48, 48, channel), dtype=np.float32)\n', (6897, 6946), True, 'import numpy as np\n'), ((6983, 7035), 'numpy.zeros', 'np.zeros', (['(num_images, 48, 48, 26)'], {'dtype': 'np.float32'}), '((num_images, 48, 48, 26), dtype=np.float32)\n', (6991, 7035), True, 'import numpy as np\n'), ((9836, 9865), 'cv2.imread', 'cv2.imread', (["roidb[i]['image']"], {}), "(roidb[i]['image'])\n", (9846, 9865), False, 'import cv2\n'), ((10108, 10194), 'detectron.utils.blob.prep_im_for_blob', 'blob_utils.prep_im_for_blob', (['im', 'cfg.PIXEL_MEANS', 'target_size', 'cfg.TRAIN.MAX_SIZE'], {}), '(im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.\n MAX_SIZE)\n', (10135, 10194), True, 'import detectron.utils.blob as blob_utils\n'), ((11890, 11993), 'cv2.resize', 'cv2.resize', (['pose_pred[i, :, :, :]', 'None', 'None', '(1.0 / 4.0)', '(1.0 / 4.0)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(pose_pred[i, :, :, :], None, None, 1.0 / 4.0, 1.0 / 4.0,\n interpolation=cv2.INTER_LINEAR)\n', (11900, 11993), False, 'import cv2\n'), ((12028, 12131), 'cv2.resize', 'cv2.resize', (['pose_pred[i, :, :, :]', 'None', 'None', '(1.0 / 8.0)', '(1.0 / 8.0)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(pose_pred[i, :, :, :], None, None, 1.0 / 8.0, 1.0 / 8.0,\n interpolation=cv2.INTER_LINEAR)\n', (12038, 12131), False, 'import cv2\n'), ((12167, 12264), 'cv2.resize', 'cv2.resize', (['pose_pred[i]', 'None', 'None', '(1.0 / 16.0)', '(1.0 / 16.0)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(pose_pred[i], None, None, 1.0 / 16.0, 1.0 / 16.0, interpolation=\n cv2.INTER_LINEAR)\n', (12177, 12264), False, 'import cv2\n'), ((12299, 12396), 'cv2.resize', 'cv2.resize', (['pose_pred[i]', 'None', 'None', '(1.0 / 32.0)', '(1.0 / 32.0)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(pose_pred[i], None, None, 1.0 / 32.0, 1.0 / 32.0, interpolation=\n cv2.INTER_LINEAR)\n', (12309, 12396), False, 'import cv2\n'), ((12951, 13052), 'cv2.resize', 'cv2.resize', (['pose_pred[i, :, :]', 'None', 'None', '(1.0 / 8.0)', '(1.0 / 8.0)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(pose_pred[i, :, :], None, None, 1.0 / 8.0, 1.0 / 8.0,\n interpolation=cv2.INTER_NEAREST)\n', (12961, 13052), False, 'import cv2\n'), ((13088, 13186), 'cv2.resize', 'cv2.resize', (['pose_pred[i]', 'None', 'None', '(1.0 / 16.0)', '(1.0 / 16.0)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(pose_pred[i], None, None, 1.0 / 16.0, 1.0 / 16.0, interpolation=\n cv2.INTER_NEAREST)\n', (13098, 13186), False, 'import cv2\n'), ((14673, 14704), 'cv2.imread', 'cv2.imread', (["entry['ins_seg']", '(0)'], {}), "(entry['ins_seg'], 0)\n", (14683, 14704), False, 'import cv2\n'), ((15501, 15533), 'numpy.array', 'np.array', (['seg_gt'], {'dtype': 'np.int32'}), '(seg_gt, dtype=np.int32)\n', (15509, 15533), True, 'import numpy as np\n'), ((2529, 2597), 'detectron.roi_data.retinanet.get_retinanet_blob_names', 'retinanet_roi_data.get_retinanet_blob_names', ([], {'is_training': 'is_training'}), '(is_training=is_training)\n', (2572, 2597), True, 'import detectron.roi_data.retinanet as retinanet_roi_data\n'), ((2718, 2786), 'detectron.roi_data.fast_rcnn.get_fast_rcnn_blob_names', 'fast_rcnn_roi_data.get_fast_rcnn_blob_names', ([], {'is_training': 'is_training'}), '(is_training=is_training)\n', (2761, 2786), True, 'import detectron.roi_data.fast_rcnn as fast_rcnn_roi_data\n'), ((4613, 4701), 'detectron.roi_data.retinanet.add_retinanet_blobs', 'retinanet_roi_data.add_retinanet_blobs', (['blobs', 'im_scales', 'roidb', 'im_width', 'im_height'], {}), '(blobs, im_scales, roidb, im_width,\n im_height)\n', (4651, 4701), True, 'import detectron.roi_data.retinanet as retinanet_roi_data\n'), ((4812, 4875), 'detectron.roi_data.fast_rcnn.add_fast_rcnn_blobs', 'fast_rcnn_roi_data.add_fast_rcnn_blobs', (['blobs', 'im_scales', 'roidb'], {}), '(blobs, im_scales, roidb)\n', (4850, 4875), True, 'import detectron.roi_data.fast_rcnn as fast_rcnn_roi_data\n'), ((7468, 7513), 'numpy.fromfile', 'np.fromfile', (['pred_pose_path'], {'dtype': 'np.float32'}), '(pred_pose_path, dtype=np.float32)\n', (7479, 7513), True, 'import numpy as np\n'), ((7875, 7920), 'numpy.fromfile', 'np.fromfile', (['pred_pose_path'], {'dtype': 'np.float32'}), '(pred_pose_path, dtype=np.float32)\n', (7886, 7920), True, 'import numpy as np\n'), ((8176, 8212), 'numpy.zeros', 'np.zeros', (['(48, 48)'], {'dtype': 'np.float32'}), '((48, 48), dtype=np.float32)\n', (8184, 8212), True, 'import numpy as np\n'), ((14792, 14813), 'copy.deepcopy', 'copy.deepcopy', (['seg_gt'], {}), '(seg_gt)\n', (14805, 14813), False, 'import copy\n'), ((7272, 7338), 'os.path.join', 'os.path.join', (['pred_pose_data', '"""heatmap_flip"""', "(entry['id'] + '.bin')"], {}), "(pred_pose_data, 'heatmap_flip', entry['id'] + '.bin')\n", (7284, 7338), False, 'import os\n'), ((7388, 7449), 'os.path.join', 'os.path.join', (['pred_pose_data', '"""heatmap"""', "(entry['id'] + '.bin')"], {}), "(pred_pose_data, 'heatmap', entry['id'] + '.bin')\n", (7400, 7449), False, 'import os\n'), ((7679, 7745), 'os.path.join', 'os.path.join', (['pred_pose_data', '"""heatmap_flip"""', "(entry['id'] + '.bin')"], {}), "(pred_pose_data, 'heatmap_flip', entry['id'] + '.bin')\n", (7691, 7745), False, 'import os\n'), ((7795, 7856), 'os.path.join', 'os.path.join', (['pred_pose_data', '"""heatmap"""', "(entry['id'] + '.bin')"], {}), "(pred_pose_data, 'heatmap', entry['id'] + '.bin')\n", (7807, 7856), False, 'import os\n'), ((15191, 15212), 'numpy.where', 'np.where', (['(label_ == i)'], {}), '(label_ == i)\n', (15199, 15212), True, 'import numpy as np\n')] |
from sklearn.ensemble import RandomForestRegressor
from sklearn.utils.validation import check_is_fitted
from joblib import Parallel, delayed
from sklearn.ensemble._base import _partition_estimators
import threading
import numpy as np
class RandomForestRegressor2(RandomForestRegressor):
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor2, self).__init__(
n_estimators,
criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
min_impurity_decrease,
min_impurity_split,
bootstrap,
oob_score,
n_jobs,
random_state,
verbose,
warm_start)
def predict(self, X, return_std=False):
if return_std:
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# if self.n_outputs_ > 1:
# y_pred = np.zeros((self.n_estimators, X.shape[0], self.n_outputs_), dtype=np.float64)
# else:
# y_pred = np.zeros((self.n_estimators, X.shape[0]), dtype=np.float64)
# Parallel loop
# lock = self.threading.Lock()
y_pred = np.array(Parallel(n_jobs=n_jobs, verbose=self.verbose, backend="threading")(
delayed(e.predict)(X) for e in self.estimators_))
# y_hat /= len(self.estimators_)
ypred_mean = np.mean(y_pred, axis=0)
ypred_std = np.std(y_pred, axis=0)
if len(ypred_std.shape) > 1:
ypred_std = np.max(ypred_std, 1)
return ypred_mean, ypred_std
else:
return super(RandomForestRegressor2, self).predict(X)
| [
"sklearn.utils.validation.check_is_fitted",
"numpy.mean",
"numpy.max",
"joblib.Parallel",
"sklearn.ensemble._base._partition_estimators",
"numpy.std",
"joblib.delayed"
] | [((1453, 1489), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""estimators_"""'], {}), "(self, 'estimators_')\n", (1468, 1489), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((1632, 1685), 'sklearn.ensemble._base._partition_estimators', '_partition_estimators', (['self.n_estimators', 'self.n_jobs'], {}), '(self.n_estimators, self.n_jobs)\n', (1653, 1685), False, 'from sklearn.ensemble._base import _partition_estimators\n'), ((2244, 2267), 'numpy.mean', 'np.mean', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (2251, 2267), True, 'import numpy as np\n'), ((2293, 2315), 'numpy.std', 'np.std', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (2299, 2315), True, 'import numpy as np\n'), ((2386, 2406), 'numpy.max', 'np.max', (['ypred_std', '(1)'], {}), '(ypred_std, 1)\n', (2392, 2406), True, 'import numpy as np\n'), ((2038, 2104), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs', 'verbose': 'self.verbose', 'backend': '"""threading"""'}), "(n_jobs=n_jobs, verbose=self.verbose, backend='threading')\n", (2046, 2104), False, 'from joblib import Parallel, delayed\n'), ((2122, 2140), 'joblib.delayed', 'delayed', (['e.predict'], {}), '(e.predict)\n', (2129, 2140), False, 'from joblib import Parallel, delayed\n')] |
import cv2, base64, json, os, requests
import numpy as np
from fileTools import *
from trainTools import *
from imgTools import *
from eightPuzzle import *
with np.load('./record/knnTrainData.npz') as data:
print(data.files)
trainData = data['trainData']
responses = data['responses']
knn = cv2.ml.KNearest_create()
knn.train(trainData,cv2.ml.ROW_SAMPLE,responses)
charDict = readJsonFile('./record/charDict.json')
noDict = readJsonFile('./record/noDict.json')
fileNames = os.listdir('./source')
fileNames.sort()
j = 1
for fileName in fileNames:
for i in range(1,10):
img = cv2.imread('./source'+'/'+fileName+'/'+str(i)+'.jpg')
test = getImageTrainData(img)
if(test.size > 1):
ret,results,neighbours,dist = knn.findNearest(test,k=1)
print(results.shape)
result = getClosestResult(results)
#print('p: ' + fileName[0]+str(i) +' m: '+noDict[str(result)])
if((fileName[0]+str(i)) != noDict[str(result)]):
print('p: ' + fileName[0]+str(i) +' m: '+noDict[str(result)])
else:
print(j)
j += 1
| [
"cv2.ml.KNearest_create",
"os.listdir",
"numpy.load"
] | [((316, 340), 'cv2.ml.KNearest_create', 'cv2.ml.KNearest_create', ([], {}), '()\n', (338, 340), False, 'import cv2, base64, json, os, requests\n'), ((500, 522), 'os.listdir', 'os.listdir', (['"""./source"""'], {}), "('./source')\n", (510, 522), False, 'import cv2, base64, json, os, requests\n'), ((162, 198), 'numpy.load', 'np.load', (['"""./record/knnTrainData.npz"""'], {}), "('./record/knnTrainData.npz')\n", (169, 198), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-#
'''
# Name: LSTMCell_1_1
# Description: Implement the basic functions of lstm cell and linear cell
# Can't support batch input
# Author: super
# Date: 2020/6/18
'''
import sys
import math
import numpy as np
from MiniFramework.Layer import *
from MiniFramework.ActivationLayer import *
class LSTMCell_1_1(object):
def __init__(self, input_size, hidden_size, bias=True):
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.dW = np.zeros((4 * self.hidden_size, self.hidden_size))
self.dU = np.zeros((4 * self.input_size, self.hidden_size))
self.db = np.zeros((4, self.hidden_size))
def get_params(self, W, U, b):
self.wf = W[0: self.hidden_size,:]
self.wi = W[self.hidden_size: 2 * self.hidden_size,:]
self.wg = W[2 * self.hidden_size: 3 * self.hidden_size,:]
self.wo = W[3 * self.hidden_size: 4 * self.hidden_size,:]
self.uf = U[0: self.input_size,:]
self.ui = U[self.input_size: 2 * self.input_size,:]
self.ug = U[2 * self.input_size: 3 * self.input_size,:]
self.uo = U[3 * self.input_size: 4 * self.input_size,:]
if self.bias:
self.bf = b[0,:]
self.bi = b[1,:]
self.bg = b[2,:]
self.bo = b[3,:]
else:
self.bf = self.bi = self.bg = self.bo = np.zeros((1, self.hidden_size))
def forward(self, x, h_p, c_p, W, U, b):
self.get_params(W, U, b)
self.x = x
self.f = self.get_gate(x, h_p, self.wf, self.uf, self.bf, Sigmoid())
self.i = self.get_gate(x, h_p, self.wi, self.ui, self.bi, Sigmoid())
self.g = self.get_gate(x, h_p, self.wg, self.ug, self.bg, Tanh())
self.o = self.get_gate(x, h_p, self.wo, self.uo, self.bo, Sigmoid())
self.c = np.multiply(self.f, c_p) + np.multiply(self.i, self.g)
self.h = np.multiply(self.o, Tanh().forward(self.c))
def get_gate(self, x, h, W, U, b, activator, bias=True):
if self.bias:
z = np.dot(h, W) + np.dot(x, U) + b
else:
z = np.dot(h, W) + np.dot(x, U)
a = activator.forward(z)
return a
def backward(self, h_p, c_p, in_grad):
tanh = lambda x : Tanh().forward(x)
self.dzo = in_grad * tanh(self.c) * self.o * (1 - self.o)
self.dc = in_grad * self.o * (1 - tanh(self.c) * tanh(self.c))
self.dzg = self.dc * self.i * (1- self.g * self.g)
self.dzi = self.dc * self.g * self.i * (1 - self.i)
self.dzf = self.dc * c_p * self.f * (1 - self.f)
self.dW[3 * self.hidden_size: 4 * self.hidden_size] = np.dot(h_p.T, self.dzo)
self.dW[2 * self.hidden_size: 3 * self.hidden_size] = np.dot(h_p.T, self.dzg)
self.dW[self.hidden_size: 2 * self.hidden_size] = np.dot(h_p.T, self.dzi)
self.dW[0: self.hidden_size] = np.dot(h_p.T, self.dzf)
self.dU[3 * self.input_size: 4 * self.input_size] = np.dot(self.x.T, self.dzo)
self.dU[2 * self.input_size: 3 * self.input_size] = np.dot(self.x.T, self.dzg)
self.dU[self.input_size: 2 * self.input_size] = np.dot(self.x.T, self.dzi)
self.dU[0: self.input_size] = np.dot(self.x.T, self.dzf)
if self.bias:
self.db[0] = self.dzo
self.db[1] = self.dzg
self.db[2] = self.dzi
self.db[3] = self.dzf
# pass to previous time step
self.dh = np.dot(self.dzf, self.wf.T) + np.dot(self.dzi, self.wi.T) + np.dot(self.dzg, self.wg.T) + np.dot(self.dzo, self.wo.T)
# pass to previous layer
self.dx = np.dot(self.dzf, self.uf.T) + np.dot(self.dzi, self.ui.T) + np.dot(self.dzg, self.ug.T) + np.dot(self.dzo, self.uo.T)
class LinearCell_1_1(object):
def __init__(self, input_size, output_size, bias=True):
self.input_size = input_size
self.output_size = output_size
self.bias = bias
def forward(self, x, V, b):
self.x = x
self.V = V
if self.bias:
self.b = b
else:
self.b = np.zeros((1,self.output_size))
self.z = np.dot(x, V) + b
def backward(self, in_grad):
self.dz = in_grad.reshape(1,-1)
self.dV = np.dot(self.x.T, self.dz)
if self.bias:
self.db = self.dz
self.dx = np.dot(self.dz, self.V.T) | [
"numpy.dot",
"numpy.zeros",
"numpy.multiply"
] | [((556, 606), 'numpy.zeros', 'np.zeros', (['(4 * self.hidden_size, self.hidden_size)'], {}), '((4 * self.hidden_size, self.hidden_size))\n', (564, 606), True, 'import numpy as np\n'), ((625, 674), 'numpy.zeros', 'np.zeros', (['(4 * self.input_size, self.hidden_size)'], {}), '((4 * self.input_size, self.hidden_size))\n', (633, 674), True, 'import numpy as np\n'), ((693, 724), 'numpy.zeros', 'np.zeros', (['(4, self.hidden_size)'], {}), '((4, self.hidden_size))\n', (701, 724), True, 'import numpy as np\n'), ((2708, 2731), 'numpy.dot', 'np.dot', (['h_p.T', 'self.dzo'], {}), '(h_p.T, self.dzo)\n', (2714, 2731), True, 'import numpy as np\n'), ((2794, 2817), 'numpy.dot', 'np.dot', (['h_p.T', 'self.dzg'], {}), '(h_p.T, self.dzg)\n', (2800, 2817), True, 'import numpy as np\n'), ((2876, 2899), 'numpy.dot', 'np.dot', (['h_p.T', 'self.dzi'], {}), '(h_p.T, self.dzi)\n', (2882, 2899), True, 'import numpy as np\n'), ((2939, 2962), 'numpy.dot', 'np.dot', (['h_p.T', 'self.dzf'], {}), '(h_p.T, self.dzf)\n', (2945, 2962), True, 'import numpy as np\n'), ((3024, 3050), 'numpy.dot', 'np.dot', (['self.x.T', 'self.dzo'], {}), '(self.x.T, self.dzo)\n', (3030, 3050), True, 'import numpy as np\n'), ((3111, 3137), 'numpy.dot', 'np.dot', (['self.x.T', 'self.dzg'], {}), '(self.x.T, self.dzg)\n', (3117, 3137), True, 'import numpy as np\n'), ((3194, 3220), 'numpy.dot', 'np.dot', (['self.x.T', 'self.dzi'], {}), '(self.x.T, self.dzi)\n', (3200, 3220), True, 'import numpy as np\n'), ((3259, 3285), 'numpy.dot', 'np.dot', (['self.x.T', 'self.dzf'], {}), '(self.x.T, self.dzf)\n', (3265, 3285), True, 'import numpy as np\n'), ((4289, 4314), 'numpy.dot', 'np.dot', (['self.x.T', 'self.dz'], {}), '(self.x.T, self.dz)\n', (4295, 4314), True, 'import numpy as np\n'), ((4385, 4410), 'numpy.dot', 'np.dot', (['self.dz', 'self.V.T'], {}), '(self.dz, self.V.T)\n', (4391, 4410), True, 'import numpy as np\n'), ((1434, 1465), 'numpy.zeros', 'np.zeros', (['(1, self.hidden_size)'], {}), '((1, self.hidden_size))\n', (1442, 1465), True, 'import numpy as np\n'), ((1888, 1912), 'numpy.multiply', 'np.multiply', (['self.f', 'c_p'], {}), '(self.f, c_p)\n', (1899, 1912), True, 'import numpy as np\n'), ((1915, 1942), 'numpy.multiply', 'np.multiply', (['self.i', 'self.g'], {}), '(self.i, self.g)\n', (1926, 1942), True, 'import numpy as np\n'), ((3591, 3618), 'numpy.dot', 'np.dot', (['self.dzo', 'self.wo.T'], {}), '(self.dzo, self.wo.T)\n', (3597, 3618), True, 'import numpy as np\n'), ((3760, 3787), 'numpy.dot', 'np.dot', (['self.dzo', 'self.uo.T'], {}), '(self.dzo, self.uo.T)\n', (3766, 3787), True, 'import numpy as np\n'), ((4132, 4163), 'numpy.zeros', 'np.zeros', (['(1, self.output_size)'], {}), '((1, self.output_size))\n', (4140, 4163), True, 'import numpy as np\n'), ((4180, 4192), 'numpy.dot', 'np.dot', (['x', 'V'], {}), '(x, V)\n', (4186, 4192), True, 'import numpy as np\n'), ((2166, 2178), 'numpy.dot', 'np.dot', (['h', 'W'], {}), '(h, W)\n', (2172, 2178), True, 'import numpy as np\n'), ((2181, 2193), 'numpy.dot', 'np.dot', (['x', 'U'], {}), '(x, U)\n', (2187, 2193), True, 'import numpy as np\n'), ((3561, 3588), 'numpy.dot', 'np.dot', (['self.dzg', 'self.wg.T'], {}), '(self.dzg, self.wg.T)\n', (3567, 3588), True, 'import numpy as np\n'), ((3730, 3757), 'numpy.dot', 'np.dot', (['self.dzg', 'self.ug.T'], {}), '(self.dzg, self.ug.T)\n', (3736, 3757), True, 'import numpy as np\n'), ((2104, 2116), 'numpy.dot', 'np.dot', (['h', 'W'], {}), '(h, W)\n', (2110, 2116), True, 'import numpy as np\n'), ((2119, 2131), 'numpy.dot', 'np.dot', (['x', 'U'], {}), '(x, U)\n', (2125, 2131), True, 'import numpy as np\n'), ((3501, 3528), 'numpy.dot', 'np.dot', (['self.dzf', 'self.wf.T'], {}), '(self.dzf, self.wf.T)\n', (3507, 3528), True, 'import numpy as np\n'), ((3531, 3558), 'numpy.dot', 'np.dot', (['self.dzi', 'self.wi.T'], {}), '(self.dzi, self.wi.T)\n', (3537, 3558), True, 'import numpy as np\n'), ((3670, 3697), 'numpy.dot', 'np.dot', (['self.dzf', 'self.uf.T'], {}), '(self.dzf, self.uf.T)\n', (3676, 3697), True, 'import numpy as np\n'), ((3700, 3727), 'numpy.dot', 'np.dot', (['self.dzi', 'self.ui.T'], {}), '(self.dzi, self.ui.T)\n', (3706, 3727), True, 'import numpy as np\n')] |
# Copyright 2019 <NAME>
# Licensed under the MIT License (the "License").
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at https://mit-license.org
# February 2019
# This script tests color function of filterizePy package.
import pytest
import numpy as np
import pandas as pd
import os
import imghdr
import skimage.io
from filterizePy.greenscale import greenscale
def test_greenscale_white():
# White image testing
test_img = np.array([[[255, 255, 255],[255, 255, 255],[255, 255, 255]] ,
[[255, 255, 255],[255, 255, 255],[255, 255, 255]],
[[255, 255, 255],[255, 255, 255],[255, 255, 255]],
[[255, 255, 255],[255, 255, 255],[255, 255, 255]],
[[255, 255, 255],[255, 255, 255],[255, 255, 255]]], dtype = "uint8")
gs_test_img = np.array([[[ 0, 230, 0],[ 0, 230, 0],[ 0, 230, 0]],
[[ 0, 230, 0],[ 0, 230, 0],[ 0, 230, 0]],
[[ 0, 230, 0],[ 0, 230, 0],[ 0, 230, 0]],
[[ 0, 230, 0],[ 0, 230, 0],[ 0, 230, 0]],
[[ 0, 230, 0],[ 0, 230, 0],[ 0, 230, 0]]], dtype = "uint8")
greenscale("test_img/test_img.png")
output_expected = skimage.io.imread("test_img/gs_test_img.png")
assert np.array_equal(output_expected, gs_test_img), "The greenscale function does not work properly."
def test_greenscale_black():
# Black image testing
test_img_black = np.array([[[0, 0, 0]]], dtype = "uint8")
gs_test_img_black = np.array([[[0, 0, 0]]], dtype = "uint8")
greenscale("test_img/test_img_black.jpeg")
output_expected = skimage.io.imread("test_img/test_img_black.jpeg")
assert np.array_equal(output_expected, gs_test_img_black), "The greenscale function does not work properly."
def test_same_size():
# Checking input and output size
input_img = "test_img/test_original.jpg"
greenscale(input_img)
input_img = skimage.io.imread('test_img/test_original.jpg')
output_img = skimage.io.imread('test_img/gs_test_original.jpg')
assert input_img.shape == output_img.shape, "Input and output dimensions do not match"
def check_output_type():
input_img = "test_img/test_original.jpg"
greenscale(input_img)
input_img = skimage.io.imread('test_img/test_original.jpg')
output_img = skimage.io.imread('test_img/gs_test_original.jpg')
assert imghdr.what(output_img) in ['png','jpeg','gif','bmp','jpg'] and imghdr.what(output_img)==imghdr.what(input_img),"The output image has a different file formatest_img"
def test_input_img():
input_img = "test_img/test_original.jpg"
greenscale(input_img)
with pytest.raises(FileNotFoundError):
greenscale("not a file path")
def test_invalid_input():
with pytest.raises(AttributeError):
greenscale(123)
def test_invalid_input_type_error():
with pytest.raises(FileNotFoundError):
greenscale("Hi")
| [
"filterizePy.greenscale.greenscale",
"numpy.array",
"numpy.array_equal",
"imghdr.what",
"pytest.raises"
] | [((495, 800), 'numpy.array', 'np.array', (['[[[255, 255, 255], [255, 255, 255], [255, 255, 255]], [[255, 255, 255], [\n 255, 255, 255], [255, 255, 255]], [[255, 255, 255], [255, 255, 255], [\n 255, 255, 255]], [[255, 255, 255], [255, 255, 255], [255, 255, 255]], [\n [255, 255, 255], [255, 255, 255], [255, 255, 255]]]'], {'dtype': '"""uint8"""'}), "([[[255, 255, 255], [255, 255, 255], [255, 255, 255]], [[255, 255, \n 255], [255, 255, 255], [255, 255, 255]], [[255, 255, 255], [255, 255, \n 255], [255, 255, 255]], [[255, 255, 255], [255, 255, 255], [255, 255, \n 255]], [[255, 255, 255], [255, 255, 255], [255, 255, 255]]], dtype='uint8')\n", (503, 800), True, 'import numpy as np\n'), ((901, 1144), 'numpy.array', 'np.array', (['[[[0, 230, 0], [0, 230, 0], [0, 230, 0]], [[0, 230, 0], [0, 230, 0], [0, \n 230, 0]], [[0, 230, 0], [0, 230, 0], [0, 230, 0]], [[0, 230, 0], [0, \n 230, 0], [0, 230, 0]], [[0, 230, 0], [0, 230, 0], [0, 230, 0]]]'], {'dtype': '"""uint8"""'}), "([[[0, 230, 0], [0, 230, 0], [0, 230, 0]], [[0, 230, 0], [0, 230, 0\n ], [0, 230, 0]], [[0, 230, 0], [0, 230, 0], [0, 230, 0]], [[0, 230, 0],\n [0, 230, 0], [0, 230, 0]], [[0, 230, 0], [0, 230, 0], [0, 230, 0]]],\n dtype='uint8')\n", (909, 1144), True, 'import numpy as np\n'), ((1280, 1315), 'filterizePy.greenscale.greenscale', 'greenscale', (['"""test_img/test_img.png"""'], {}), "('test_img/test_img.png')\n", (1290, 1315), False, 'from filterizePy.greenscale import greenscale\n'), ((1395, 1439), 'numpy.array_equal', 'np.array_equal', (['output_expected', 'gs_test_img'], {}), '(output_expected, gs_test_img)\n', (1409, 1439), True, 'import numpy as np\n'), ((1568, 1606), 'numpy.array', 'np.array', (['[[[0, 0, 0]]]'], {'dtype': '"""uint8"""'}), "([[[0, 0, 0]]], dtype='uint8')\n", (1576, 1606), True, 'import numpy as np\n'), ((1633, 1671), 'numpy.array', 'np.array', (['[[[0, 0, 0]]]'], {'dtype': '"""uint8"""'}), "([[[0, 0, 0]]], dtype='uint8')\n", (1641, 1671), True, 'import numpy as np\n'), ((1679, 1721), 'filterizePy.greenscale.greenscale', 'greenscale', (['"""test_img/test_img_black.jpeg"""'], {}), "('test_img/test_img_black.jpeg')\n", (1689, 1721), False, 'from filterizePy.greenscale import greenscale\n'), ((1805, 1855), 'numpy.array_equal', 'np.array_equal', (['output_expected', 'gs_test_img_black'], {}), '(output_expected, gs_test_img_black)\n', (1819, 1855), True, 'import numpy as np\n'), ((2016, 2037), 'filterizePy.greenscale.greenscale', 'greenscale', (['input_img'], {}), '(input_img)\n', (2026, 2037), False, 'from filterizePy.greenscale import greenscale\n'), ((2336, 2357), 'filterizePy.greenscale.greenscale', 'greenscale', (['input_img'], {}), '(input_img)\n', (2346, 2357), False, 'from filterizePy.greenscale import greenscale\n'), ((2739, 2760), 'filterizePy.greenscale.greenscale', 'greenscale', (['input_img'], {}), '(input_img)\n', (2749, 2760), False, 'from filterizePy.greenscale import greenscale\n'), ((2770, 2802), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (2783, 2802), False, 'import pytest\n'), ((2812, 2841), 'filterizePy.greenscale.greenscale', 'greenscale', (['"""not a file path"""'], {}), "('not a file path')\n", (2822, 2841), False, 'from filterizePy.greenscale import greenscale\n'), ((2878, 2907), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (2891, 2907), False, 'import pytest\n'), ((2917, 2932), 'filterizePy.greenscale.greenscale', 'greenscale', (['(123)'], {}), '(123)\n', (2927, 2932), False, 'from filterizePy.greenscale import greenscale\n'), ((2980, 3012), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (2993, 3012), False, 'import pytest\n'), ((3022, 3038), 'filterizePy.greenscale.greenscale', 'greenscale', (['"""Hi"""'], {}), "('Hi')\n", (3032, 3038), False, 'from filterizePy.greenscale import greenscale\n'), ((2501, 2524), 'imghdr.what', 'imghdr.what', (['output_img'], {}), '(output_img)\n', (2512, 2524), False, 'import imghdr\n'), ((2565, 2588), 'imghdr.what', 'imghdr.what', (['output_img'], {}), '(output_img)\n', (2576, 2588), False, 'import imghdr\n'), ((2590, 2612), 'imghdr.what', 'imghdr.what', (['input_img'], {}), '(input_img)\n', (2601, 2612), False, 'import imghdr\n')] |
# Programowanie I R
# Pakiet NumPy
import numpy as np
arr_list = np.array([1, 2, 3, 4, 5])
print(arr_list)
print(type(arr_list))
print()
arr_tuple = np.array((1, 2, 3, 4, 5))
print(arr_tuple)
print(type(arr_tuple))
print()
arr = np.array(42)
print(arr)
print(arr.ndim)
print()
arr = np.array([[1, 2, 3], [4, 5, 6]])
print(arr)
print(arr.ndim)
print()
arr = np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
print(arr)
print(arr.ndim)
print()
arr = np.array([1, 2, 3, 4], ndmin = 5)
print(arr)
print(arr.ndim)
print()
#***********************************************************************************
arr = np.array([1, 2, 3, 4])
print(arr[0])
print(arr[2] + arr[3])
print()
arr = np.array([[1,2,3,4,5], [6,7,8,9,10]])
print(arr[1, 3])
print(arr[1, 2])
print(arr[1, -1])
print()
arr = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
print(arr[0, 1, 2])
print()
#***********************************************************************************
arr = np.array([1, 2, 3, 4, 5, 6, 7])
print(arr[1:5])
print(arr[4:])
print(arr[:4])
print(arr[1:5:2])
print()
arr = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
print(arr[1, 1:4])
print(arr[0:2, 1:4])
#***********************************************************************************
arr1 = np.array([10, 11, 12, 13, 14, 15])
arr2 = np.array([20, 21, 22, 23, 24, 25])
newarr = np.add(arr1, arr2)
print(newarr)
print()
newarr = np.subtract(arr1, arr2)
print(newarr)
print()
newarr = np.multiply(arr1, arr2)
print(newarr)
print()
newarr = np.divide(arr1, arr2)
print(newarr)
print() | [
"numpy.multiply",
"numpy.add",
"numpy.subtract",
"numpy.array",
"numpy.divide"
] | [((67, 92), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (75, 92), True, 'import numpy as np\n'), ((153, 178), 'numpy.array', 'np.array', (['(1, 2, 3, 4, 5)'], {}), '((1, 2, 3, 4, 5))\n', (161, 178), True, 'import numpy as np\n'), ((235, 247), 'numpy.array', 'np.array', (['(42)'], {}), '(42)\n', (243, 247), True, 'import numpy as np\n'), ((290, 322), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (298, 322), True, 'import numpy as np\n'), ((365, 423), 'numpy.array', 'np.array', (['[[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]]'], {}), '([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])\n', (373, 423), True, 'import numpy as np\n'), ((466, 497), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {'ndmin': '(5)'}), '([1, 2, 3, 4], ndmin=5)\n', (474, 497), True, 'import numpy as np\n'), ((628, 650), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (636, 650), True, 'import numpy as np\n'), ((703, 748), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]'], {}), '([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])\n', (711, 748), True, 'import numpy as np\n'), ((809, 870), 'numpy.array', 'np.array', (['[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]'], {}), '([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])\n', (817, 870), True, 'import numpy as np\n'), ((992, 1023), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7]'], {}), '([1, 2, 3, 4, 5, 6, 7])\n', (1000, 1023), True, 'import numpy as np\n'), ((1103, 1148), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]'], {}), '([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])\n', (1111, 1148), True, 'import numpy as np\n'), ((1283, 1317), 'numpy.array', 'np.array', (['[10, 11, 12, 13, 14, 15]'], {}), '([10, 11, 12, 13, 14, 15])\n', (1291, 1317), True, 'import numpy as np\n'), ((1325, 1359), 'numpy.array', 'np.array', (['[20, 21, 22, 23, 24, 25]'], {}), '([20, 21, 22, 23, 24, 25])\n', (1333, 1359), True, 'import numpy as np\n'), ((1369, 1387), 'numpy.add', 'np.add', (['arr1', 'arr2'], {}), '(arr1, arr2)\n', (1375, 1387), True, 'import numpy as np\n'), ((1420, 1443), 'numpy.subtract', 'np.subtract', (['arr1', 'arr2'], {}), '(arr1, arr2)\n', (1431, 1443), True, 'import numpy as np\n'), ((1476, 1499), 'numpy.multiply', 'np.multiply', (['arr1', 'arr2'], {}), '(arr1, arr2)\n', (1487, 1499), True, 'import numpy as np\n'), ((1532, 1553), 'numpy.divide', 'np.divide', (['arr1', 'arr2'], {}), '(arr1, arr2)\n', (1541, 1553), True, 'import numpy as np\n')] |
import sys
from models.lstm_vae import *
from models.lstm import *
from models.lstm_vae_output import *
from models.nn import *
from keras.models import model_from_json
import pickle as pkl
from functions import *
from constants import *
from scipy import spatial
from rake_nltk import Rake
import numpy as np
glove_paths = "/media/edward/c40f4a81-55c2-49d2-8b86-91d871810cf2/GloVe_Tester/"
class Chatbot():
def __init__(self, mode):
#* mode == 1: Train NN; mode == 2: Train LSTM; mode == 3: Test
#* Change the PATH to the respected destination!
print_load("Loading [200D GloVe] Module...")
self.glove_dict = pkl.load(open(glove_paths + "glove_dict.pkl", "rb"))
print_complete("Loaded [200D GloVe] Module.")
print_load("Loading [300D GloVe] Module...")
self.glove_dict_300d = pkl.load(open(glove_paths + "glove_dict_300d.pkl", "rb"))
print_complete("Loaded [300D GloVe] Module.")
if mode == 3:
print_load("Loading [300D GloVe Words] Module...")
self.glove_dict_words_300d = pkl.load(open(glove_paths + "reverse_glove_dict_300d_words.pkl", "rb"))
print_complete("Loaded [300D GloVe Words] Module.")
print_load("Loading [300D GloVe Values] Module...")
self.glove_dict_values_300d = pkl.load(open(glove_paths + "reverse_glove_dict_300d_values.pkl", "rb"))
print_complete("Loaded [300D GloVe Values] Module.")
end = np.zeros(len(self.glove_dict_300d["the"])); end[0] += 1
self.glove_dict_values_300d.append(end)
self.glove_dict_words_300d.append("___END___")
print_load("Loading [300D GloVe KDTree] Module...")
self.glove_dict_values_KDTree_300d = spatial.KDTree(self.glove_dict_values_300d)
print_complete("Loaded [300D GloVe KDTree] Module.")
#* Emotion classifier:
print_load("Loading [Emotion Classifier] Module...")
self.emotion_type = pkl.load(open("sentence_dimentionalizer/emotion_classifier/emotion_types.pkl", "rb"))
self.emotion_classifier_model = Single_LSTM(200, len(self.emotion_type))
self.emotion_classifier_model.load("sentence_dimentionalizer/emotion_classifier/model")
print_complete("Loaded [Emotion Classifier] Module.")
#* Intent classifier:
print_load("Loading [Query Type Classifier] Module...")
self.intent_classifier = LSTM_VAE(input_dim=300, batch_size=1, intermediate_dim=100, latent_dim=200, epsilon_std=1.)
self.intent_classifier.load("sentence_dimentionalizer/intent_calculator/vae_test2", "sentence_dimentionalizer/intent_calculator/enc_test2")
print_complete("Loaded [Query Type Classifier] Module.")
#* Keyword extractor:
print_load("Loading [Keyword Extractor] Module...")
self.r = Rake()
print_complete("Loaded [Keyword Extractor] Module.")
#* Conversation Saver:
print_load("Loading [Conversation Saver] Module...")
self.previous_conversations = []
print_complete("Loaded [Conversation Saver] Module.")
"""-----------------------------------------------------------------------------------------------------------------------------------------------------------------"""
#* Build chatbot module:
print_load("Loading [Chatbot Self-assertion] Module...")
self.chatbot_lstm = LSTM_VAE(input_dim=200, batch_size=1, intermediate_dim=128, latent_dim=64, epsilon_std=1.)
if mode != 2:
self.chatbot_lstm.load("params/chatbot_vae", "params/chatbot_enc")
print_complete("Loaded [Chatbot Self-assertion] Module.")
#* Build nn module:
print_load("Loading [Latent Dimention Mapper] Module...")
self.chatbot_nn = NN(input_dim=64, output_dim=200)
if mode == 3:
self.chatbot_nn.load("params/chatbot_nn_small")
print_complete("Loaded [Latent Dimention Mapper] Module.")
if mode != 2:
#* Build output module for training:
print_load("Loading [Training output] Module...")
self.chatbot_lstm_y = LSTM_VAE_Output(input_dim=300, batch_size=1, timesteps=padding, intermediate_dim=100, latent_dim=200, epsilon_std=1.)
self.chatbot_lstm_y.load("output_module/vae_padding", "output_module/enc_padding", "output_module/dec_padding")
print_complete("Loaded [Training output] Module.")
def train_chatbot_lstm(self, train_x, train_count):
train_data = []
for index in range(len(train_x)):
sentence_x = train_x[index]
#* Convert sentences into GloVe vectorizers:
vec_x = encode_sentence(sentence_x, self.glove_dict)
vec_x_300d = encode_sentence_intent(sentence_x, self.glove_dict_300d)
#* Compute values for each module:
#* Emotion classifier:
ECM_value = self.emotion_classifier_model.predict(np.array([vec_x]))
ECM_value = int(np.argmax(ECM_value,axis=-1)[0])
#* Convert emotion into GloVe matrix:
ECM_value = encode_sentence(self.emotion_type[ECM_value], self.glove_dict)
#* Intent classifier:
ENC_value = self.intent_classifier.enc_predict(np.array([vec_x_300d]))
PRE_value = []
self.r.extract_keywords_from_text(sentence_x)
for keywords in self.r.get_ranked_phrases():
for keyword in keywords.split():
PRE_value.append(encode_keyword(keyword, self.glove_dict))
#* Sum all vectors into one matrix:
final_matrix = []
final_matrix.append(ECM_value[0])
final_matrix.append(ENC_value[0])
for i in PRE_value:
final_matrix.append(i)
final_matrix = np.array(final_matrix)
train_data.append(final_matrix)
self.previous_conversations.append(train_data)
train_data = np.array(train_data)
#* Conversation Memorizer
for _ in tqdm(range(train_count), desc="Episode: "):
try:
self.chatbot_lstm.train(train_data)
except KeyboardInterrupt:
return
def train_chatbot_nn(self, train_x, train_y, train_count):
x = []; y = []
for index in range(len(train_x)):
sentence_x = train_x[index]
sentence_y = train_y[index]
print(sentence_x, sentence_y)
#* Convert sentences into GloVe vectorizers:
vec_x = encode_sentence(sentence_x, self.glove_dict)
vec_x_300d = encode_sentence_intent(sentence_x, self.glove_dict_300d)
vec_y_300d = encode_sentence_intent_padding(sentence_y, self.glove_dict_300d, padding)
#* Compute values for each module:
#* Emotion classifier:
ECM_value = self.emotion_classifier_model.predict(np.array([vec_x]))
ECM_value = int(np.argmax(ECM_value,axis=-1)[0])
#* Convert emotion into GloVe matrix:
ECM_value = encode_sentence(self.emotion_type[ECM_value], self.glove_dict)
#* Intent classifier:
ENC_value = self.intent_classifier.enc_predict(np.array([vec_x_300d]))
PRE_value = []
self.r.extract_keywords_from_text(sentence_x)
for keywords in self.r.get_ranked_phrases():
for keyword in keywords.split():
PRE_value.append(encode_keyword(keyword, self.glove_dict))
#* Sum all vectors into one matrix:
final_matrix = []
final_matrix.append(ECM_value[0])
final_matrix.append(ENC_value[0])
for i in PRE_value:
final_matrix.append(i)
final_matrix = np.array(final_matrix)
x_output = self.chatbot_lstm.enc_predict(np.array([final_matrix]))
y_output = self.chatbot_lstm_y.enc_predict(np.array([vec_y_300d]))
x.append(x_output)
y.append(y_output)
x = np.array(x); y = np.array(y)
for _ in tqdm(range(train_count), desc="Episode: "):
try:
self.chatbot_nn.train(x, y)
except KeyboardInterrupt:
return
def predict(self, x):
vec_x = encode_sentence(x, self.glove_dict); vec_x_300d = encode_sentence(x, self.glove_dict_300d)
#* Compute values for each module:
#* Emotion classifier:
ECM_value = self.emotion_classifier_model.predict(np.array([vec_x]))
ECM_value = int(np.argmax(ECM_value,axis=-1)[0])
#* Convert emotion into GloVe matrix:
ECM_value = encode_sentence(self.emotion_type[ECM_value], self.glove_dict)
#* Intent classifier:
ENC_value = self.intent_classifier.enc_predict(np.array([vec_x_300d]))
PRE_value = []
self.r.extract_keywords_from_text(x)
for keywords in self.r.get_ranked_phrases():
for keyword in keywords.split():
PRE_value.append(encode_keyword(keyword, self.glove_dict))
#* Sum all vectors into one matrix:
final_matrix = []
final_matrix.append(ECM_value[0])
final_matrix.append(ENC_value[0])
for i in PRE_value:
final_matrix.append(i)
final_matrix = np.array(final_matrix)
lstm_y = self.chatbot_lstm.enc_predict(np.array([final_matrix]))
nn_y = self.chatbot_nn.predict(np.array(lstm_y))
output = self.chatbot_lstm_y.dec_predict(np.array(nn_y))
return decode_sentence_padding(output, self.glove_dict_words_300d, self.glove_dict_values_KDTree_300d)
def predict_latent_mapping(self, x):
vec_x = encode_sentence(x, self.glove_dict); vec_x_300d = encode_sentence(x, self.glove_dict_300d)
#* Compute values for each module:
#* Emotion classifier:
ECM_value = self.emotion_classifier_model.predict(np.array([vec_x]))
ECM_value = int(np.argmax(ECM_value,axis=-1)[0])
#* Convert emotion into GloVe matrix:
ECM_value = encode_sentence(self.emotion_type[ECM_value], self.glove_dict)
#* Intent classifier:
ENC_value = self.intent_classifier.enc_predict(np.array([vec_x_300d]))
PRE_value = []
self.r.extract_keywords_from_text(x)
for keywords in self.r.get_ranked_phrases():
for keyword in keywords.split():
PRE_value.append(encode_keyword(keyword, self.glove_dict))
#* Sum all vectors into one matrix:
final_matrix = []
final_matrix.append(ECM_value[0])
final_matrix.append(ENC_value[0])
for i in PRE_value:
final_matrix.append(i)
final_matrix = np.array(final_matrix)
return self.chatbot_lstm.enc_predict(np.array([final_matrix]))
| [
"numpy.array",
"rake_nltk.Rake",
"numpy.argmax",
"scipy.spatial.KDTree"
] | [((2854, 2860), 'rake_nltk.Rake', 'Rake', ([], {}), '()\n', (2858, 2860), False, 'from rake_nltk import Rake\n'), ((6019, 6039), 'numpy.array', 'np.array', (['train_data'], {}), '(train_data)\n', (6027, 6039), True, 'import numpy as np\n'), ((8112, 8123), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (8120, 8123), True, 'import numpy as np\n'), ((8129, 8140), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (8137, 8140), True, 'import numpy as np\n'), ((9405, 9427), 'numpy.array', 'np.array', (['final_matrix'], {}), '(final_matrix)\n', (9413, 9427), True, 'import numpy as np\n'), ((10824, 10846), 'numpy.array', 'np.array', (['final_matrix'], {}), '(final_matrix)\n', (10832, 10846), True, 'import numpy as np\n'), ((1758, 1801), 'scipy.spatial.KDTree', 'spatial.KDTree', (['self.glove_dict_values_300d'], {}), '(self.glove_dict_values_300d)\n', (1772, 1801), False, 'from scipy import spatial\n'), ((5858, 5880), 'numpy.array', 'np.array', (['final_matrix'], {}), '(final_matrix)\n', (5866, 5880), True, 'import numpy as np\n'), ((7857, 7879), 'numpy.array', 'np.array', (['final_matrix'], {}), '(final_matrix)\n', (7865, 7879), True, 'import numpy as np\n'), ((8607, 8624), 'numpy.array', 'np.array', (['[vec_x]'], {}), '([vec_x])\n', (8615, 8624), True, 'import numpy as np\n'), ((8898, 8920), 'numpy.array', 'np.array', (['[vec_x_300d]'], {}), '([vec_x_300d])\n', (8906, 8920), True, 'import numpy as np\n'), ((9476, 9500), 'numpy.array', 'np.array', (['[final_matrix]'], {}), '([final_matrix])\n', (9484, 9500), True, 'import numpy as np\n'), ((9541, 9557), 'numpy.array', 'np.array', (['lstm_y'], {}), '(lstm_y)\n', (9549, 9557), True, 'import numpy as np\n'), ((9608, 9622), 'numpy.array', 'np.array', (['nn_y'], {}), '(nn_y)\n', (9616, 9622), True, 'import numpy as np\n'), ((10026, 10043), 'numpy.array', 'np.array', (['[vec_x]'], {}), '([vec_x])\n', (10034, 10043), True, 'import numpy as np\n'), ((10317, 10339), 'numpy.array', 'np.array', (['[vec_x_300d]'], {}), '([vec_x_300d])\n', (10325, 10339), True, 'import numpy as np\n'), ((10893, 10917), 'numpy.array', 'np.array', (['[final_matrix]'], {}), '([final_matrix])\n', (10901, 10917), True, 'import numpy as np\n'), ((4969, 4986), 'numpy.array', 'np.array', (['[vec_x]'], {}), '([vec_x])\n', (4977, 4986), True, 'import numpy as np\n'), ((5280, 5302), 'numpy.array', 'np.array', (['[vec_x_300d]'], {}), '([vec_x_300d])\n', (5288, 5302), True, 'import numpy as np\n'), ((6982, 6999), 'numpy.array', 'np.array', (['[vec_x]'], {}), '([vec_x])\n', (6990, 6999), True, 'import numpy as np\n'), ((7293, 7315), 'numpy.array', 'np.array', (['[vec_x_300d]'], {}), '([vec_x_300d])\n', (7301, 7315), True, 'import numpy as np\n'), ((7933, 7957), 'numpy.array', 'np.array', (['[final_matrix]'], {}), '([final_matrix])\n', (7941, 7957), True, 'import numpy as np\n'), ((8014, 8036), 'numpy.array', 'np.array', (['[vec_y_300d]'], {}), '([vec_y_300d])\n', (8022, 8036), True, 'import numpy as np\n'), ((8650, 8679), 'numpy.argmax', 'np.argmax', (['ECM_value'], {'axis': '(-1)'}), '(ECM_value, axis=-1)\n', (8659, 8679), True, 'import numpy as np\n'), ((10069, 10098), 'numpy.argmax', 'np.argmax', (['ECM_value'], {'axis': '(-1)'}), '(ECM_value, axis=-1)\n', (10078, 10098), True, 'import numpy as np\n'), ((5016, 5045), 'numpy.argmax', 'np.argmax', (['ECM_value'], {'axis': '(-1)'}), '(ECM_value, axis=-1)\n', (5025, 5045), True, 'import numpy as np\n'), ((7029, 7058), 'numpy.argmax', 'np.argmax', (['ECM_value'], {'axis': '(-1)'}), '(ECM_value, axis=-1)\n', (7038, 7058), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import knn
def knn_plot(x, y, k):
plt.figure(figsize=(10, 10))
color = ['red', 'blue']
for label in np.unique(y):
p = x[y == label]
plt.scatter(p[:, 0], p[:, 1], s=3, c=color[label])
xmin = np.min(x[:, 0])
xmax = np.max(x[:, 0])
ymin = np.min(x[:, 1])
ymax = np.max(x[:, 1])
step = 0.05
mesh = np.meshgrid(np.arange(xmin, xmax + step, step), np.arange(ymin, ymax + step, step))
mesh_f = np.vstack((mesh[0].flatten(), mesh[1].flatten())).T
classes = knn.knn(mesh_f, x, y, k).reshape(mesh[0].shape)
plt.contour(mesh[0], mesh[1], classes)
ti = 'K = {}'.format(k)
plt.title(ti)
| [
"numpy.unique",
"knn.knn",
"numpy.max",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"numpy.min",
"matplotlib.pyplot.title",
"numpy.arange"
] | [((93, 121), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (103, 121), True, 'import matplotlib.pyplot as plt\n'), ((169, 181), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (178, 181), True, 'import numpy as np\n'), ((280, 295), 'numpy.min', 'np.min', (['x[:, 0]'], {}), '(x[:, 0])\n', (286, 295), True, 'import numpy as np\n'), ((307, 322), 'numpy.max', 'np.max', (['x[:, 0]'], {}), '(x[:, 0])\n', (313, 322), True, 'import numpy as np\n'), ((334, 349), 'numpy.min', 'np.min', (['x[:, 1]'], {}), '(x[:, 1])\n', (340, 349), True, 'import numpy as np\n'), ((361, 376), 'numpy.max', 'np.max', (['x[:, 1]'], {}), '(x[:, 1])\n', (367, 376), True, 'import numpy as np\n'), ((619, 657), 'matplotlib.pyplot.contour', 'plt.contour', (['mesh[0]', 'mesh[1]', 'classes'], {}), '(mesh[0], mesh[1], classes)\n', (630, 657), True, 'import matplotlib.pyplot as plt\n'), ((690, 703), 'matplotlib.pyplot.title', 'plt.title', (['ti'], {}), '(ti)\n', (699, 703), True, 'import matplotlib.pyplot as plt\n'), ((217, 267), 'matplotlib.pyplot.scatter', 'plt.scatter', (['p[:, 0]', 'p[:, 1]'], {'s': '(3)', 'c': 'color[label]'}), '(p[:, 0], p[:, 1], s=3, c=color[label])\n', (228, 267), True, 'import matplotlib.pyplot as plt\n'), ((416, 450), 'numpy.arange', 'np.arange', (['xmin', '(xmax + step)', 'step'], {}), '(xmin, xmax + step, step)\n', (425, 450), True, 'import numpy as np\n'), ((452, 486), 'numpy.arange', 'np.arange', (['ymin', '(ymax + step)', 'step'], {}), '(ymin, ymax + step, step)\n', (461, 486), True, 'import numpy as np\n'), ((567, 591), 'knn.knn', 'knn.knn', (['mesh_f', 'x', 'y', 'k'], {}), '(mesh_f, x, y, k)\n', (574, 591), False, 'import knn\n')] |
# Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import numpy as np
import gym
from typing import Callable
from collections.abc import Iterable
from skdecide import Domain, Solver
from skdecide.hub.solver.cgp import cgp
from skdecide.builders.solver import Policies, Restorable
from skdecide.builders.domain import SingleAgent, Sequential, Environment, UnrestrictedActions, Initializable, History, \
PartiallyObservable, Rewards
class D(Domain, SingleAgent, Sequential, Environment, UnrestrictedActions, Initializable, History, PartiallyObservable,
Rewards):
pass
#for normalizing states
class Normalizer():
def __init__(self, nb_inputs):
self.n = np.zeros(nb_inputs)
self.mean = np.zeros(nb_inputs)
self.mean_diff = np.zeros(nb_inputs)
self.var = np.zeros(nb_inputs)
def observe(self, x):
self.n += 1.
last_mean = self.mean.copy()
self.mean += (x - self.mean) / self.n
self.mean_diff += (x - last_mean) * (x - self.mean)
self.var = (self.mean_diff / self.n).clip(min=1e-2)
def normalize(self, inputs):
if self.n[0] <= 1:
return inputs
obs_mean = self.mean
obs_std = np.sqrt(self.var)
return (inputs - obs_mean) / obs_std
def flatten(c):
"""
Generator flattening the structure
"""
for x in c:
if isinstance(x, str) or not isinstance(x, Iterable):
yield x
else:
yield from flatten(x)
class AugmentedRandomSearch(Solver, Policies, Restorable):
T_domain = D
def __init__(self,
n_epochs=1000,
epoch_size=1000,
directions = 10,
top_directions = 3,
learning_rate = 0.02,
policy_noise = 0.03,
reward_maximization = True
) -> None:
self.env = None
self.n_epochs = n_epochs
self.learning_rate = learning_rate
self.epoch_size = epoch_size
self.directions = directions
self.top_directions = top_directions
self.policy = None
self.policy_noise = policy_noise
self.reward_maximization = reward_maximization
assert self.top_directions <= self.directions
def evaluate_policy(self, state, delta=None, direction=None):
if direction is None:
return self.policy.dot(state)
elif direction == "positive":
return (self.policy + self.policy_noise * delta).dot(state)
else:
return (self.policy - self.policy_noise * delta).dot(state)
def explore(self, normalizer, direction=None, delta=None):
state = self.env.reset()
done = False
num_plays = 0.
sum_rewards = 0
while not done and num_plays < self.epoch_size:
state = cgp.norm_and_flatten(state, self.env.get_observation_space().unwrapped())
action = self.evaluate_policy(state, delta, direction)
action = cgp.denorm(action, self.env.get_action_space().unwrapped())
state, transition_value, done, _ = self.env.step(action).astuple()
reward = transition_value[0]
reward = max(min(reward, 1), -1)
if not np.isnan(reward):
sum_rewards += reward
num_plays += 1
return sum_rewards
def update_policy(self, rollouts, sigma_r):
step = np.zeros(self.policy.shape)
for r_pos, r_neg, d in rollouts:
step += (r_pos - r_neg) * d
if self.top_directions == 0 or sigma_r == 0:
return
self.policy += self.learning_rate / (self.top_directions * sigma_r) * step
def get_dimension_space(self, space):
if isinstance(space, gym.spaces.Tuple):
dim = 0
for element in space:
dim += self.get_dimension_space(element)
return dim
elif isinstance(space, gym.spaces.Discrete):
return 1
else:
return space.shape[0]
def generate_perturbations(self, space):
if isinstance(space, gym.spaces.Tuple):
perturbations = []
for element in space:
perturbations += self.generate_perturbations(element)
return perturbations
if isinstance(space, gym.spaces.Discrete):
return 2*np.random.random_integers(space.n) / space.n -1
else:
return 2*np.random.random_sample()-1
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self.env = domain_factory()
np.random.seed(0)
input_size = self.get_dimension_space(self.env.get_observation_space().unwrapped())
output_size = self.get_dimension_space(self.env.get_action_space().unwrapped())
self.policy = np.zeros((output_size, input_size))
normalizer = Normalizer(input_size)
for step in range(self.n_epochs):
# Initializing the perturbations deltas and the positive/negative rewards
deltas = [2* np.random.random_sample(self.policy.shape)-1 for _ in range(self.directions)]
positive_rewards = [0] * self.directions
negative_rewards = [0] * self.directions
# Getting the positive rewards in the positive directions
for k in range(self.directions):
positive_rewards[k] = self.explore(normalizer, direction="positive", delta=deltas[k])
# Getting the negative rewards in the negative/opposite directions
for k in range(self.directions):
negative_rewards[k] = self.explore(normalizer, direction="negative", delta=deltas[k])
# Gathering all the positive/negative rewards to compute the standard deviation of these rewards
all_rewards = np.array(positive_rewards + negative_rewards)
sigma_r = all_rewards.std()
# Sorting the rollouts by the max(r_pos, r_neg) and selecting the best directions
scores = {k: max(r_pos, r_neg) for k, (r_pos, r_neg) in enumerate(zip(positive_rewards, negative_rewards))}
order = sorted(scores.keys(), key=lambda x: scores[x], reverse=self.reward_maximization)[:self.top_directions]
rollouts = [(positive_rewards[k], negative_rewards[k], deltas[k]) for k in order]
# Updating our policy
self.update_policy(rollouts, sigma_r)
# Printing the final reward of the policy after the update
reward_evaluation = self.explore(normalizer)
print('Step:', step, 'Reward:', reward_evaluation, 'Policy', self.policy)
print('Final Reward:', reward_evaluation, 'Policy', self.policy)
def _sample_action(self, observation: D.T_agent[D.T_observation]) -> D.T_agent[D.T_concurrency[D.T_event]]:
#print('observation', observation, 'Policy', self.policy)
action = self.policy.dot(cgp.norm_and_flatten(observation, self.env.get_observation_space().unwrapped()))
action = cgp.denorm(action, self.env.get_action_space().unwrapped())
return action
| [
"numpy.sqrt",
"numpy.random.random_sample",
"numpy.random.random_integers",
"numpy.array",
"numpy.zeros",
"numpy.isnan",
"numpy.random.seed"
] | [((864, 883), 'numpy.zeros', 'np.zeros', (['nb_inputs'], {}), '(nb_inputs)\n', (872, 883), True, 'import numpy as np\n'), ((905, 924), 'numpy.zeros', 'np.zeros', (['nb_inputs'], {}), '(nb_inputs)\n', (913, 924), True, 'import numpy as np\n'), ((951, 970), 'numpy.zeros', 'np.zeros', (['nb_inputs'], {}), '(nb_inputs)\n', (959, 970), True, 'import numpy as np\n'), ((991, 1010), 'numpy.zeros', 'np.zeros', (['nb_inputs'], {}), '(nb_inputs)\n', (999, 1010), True, 'import numpy as np\n'), ((1409, 1426), 'numpy.sqrt', 'np.sqrt', (['self.var'], {}), '(self.var)\n', (1416, 1426), True, 'import numpy as np\n'), ((3703, 3730), 'numpy.zeros', 'np.zeros', (['self.policy.shape'], {}), '(self.policy.shape)\n', (3711, 3730), True, 'import numpy as np\n'), ((4906, 4923), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4920, 4923), True, 'import numpy as np\n'), ((5131, 5166), 'numpy.zeros', 'np.zeros', (['(output_size, input_size)'], {}), '((output_size, input_size))\n', (5139, 5166), True, 'import numpy as np\n'), ((6156, 6201), 'numpy.array', 'np.array', (['(positive_rewards + negative_rewards)'], {}), '(positive_rewards + negative_rewards)\n', (6164, 6201), True, 'import numpy as np\n'), ((3523, 3539), 'numpy.isnan', 'np.isnan', (['reward'], {}), '(reward)\n', (3531, 3539), True, 'import numpy as np\n'), ((4757, 4782), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (4780, 4782), True, 'import numpy as np\n'), ((4672, 4706), 'numpy.random.random_integers', 'np.random.random_integers', (['space.n'], {}), '(space.n)\n', (4697, 4706), True, 'import numpy as np\n'), ((5378, 5420), 'numpy.random.random_sample', 'np.random.random_sample', (['self.policy.shape'], {}), '(self.policy.shape)\n', (5401, 5420), True, 'import numpy as np\n')] |
### First draft of a Quantum Circuit object
import numpy as np
def kron(*args):
## multiple kronecker product
qb = np.array([[1.0]])
for q in args:
qb = np.kron(qb, q)
return qb
def n_kron(n, vector):
## n kronecker product with itself
ret = np.array([[1.0]])
for _ in range(n):
ret = np.kron(ret, vector)
return ret
def dot(*args):
# multiple dot products
qb = 1
for q in args:
qb = np.dot(qb, q)
return qb
def gate_operator(O, i, n):
# O is the matrix representation of the gate
# It should be at the i-th place
# There are n qubits in total
I = np.eye(2)
return kron(n_kron(i, I), O, n_kron(n-i-1, I))
def gate_multiple_operator(O, args, n):
# O is the matrix representation of the gate
# It should be at the places indicated by the elements of args
# There are n qubits in total
I = np.eye(2)
ret = np.array([[1.0]])
for i in range(n):
if i+1 in args: # because of qubit notation
ret = np.kron(ret, O)
else:
ret = np.kron(ret, I)
return ret
def prepare_projector(P, i, n):
# P is the projector (generally of the form [[1,0],[0,0]]
# Measurement on the i-th qubit
# n qubits in the system
I = np.eye(2)
return kron(n_kron(n-i-1, I), P, n_kron(i, I))
class QCircuit:
### states are updated after every gate addition
def __init__(self, number_of_qubits = 1):
self.number_of_qubits = number_of_qubits
self.state_zero = np.array([[1.0],[0.0]])
# self.state_one = np.array([[0.0],[1.0]])
self.initial_state = n_kron(self.number_of_qubits, self.state_zero)
self.state = self.initial_state
self.track_gates1 = [[] for _ in range(self.number_of_qubits)]
self.track_gates2 = [[] for _ in range(self.number_of_qubits)]
self.basis = [('{:0'+str(self.number_of_qubits)+'b}').format(i) for i in range(2**self.number_of_qubits)]
# self.I = np.eye(2)
def one_X(self, i = 1): # first qubit per default
# add X gate to i-th qubit
i -= 1
self.X = np.array([[0.0, 1.0], [1.0, 0.0]])
self.state = np.dot(gate_operator(self.X, i, self.number_of_qubits), self.state)
for j in range(self.number_of_qubits):
if j == i:
self.track_gates1[j].append("X")
self.track_gates2[j].append("X")
else:
self.track_gates2[j].append("-")
def X(self, *args):
# add X gate to multiple qubits
if len(args) == 0:
args = [1]
self.X = np.array([[0.0, 1.0], [1.0, 0.0]])
self.state = np.dot(gate_multiple_operator(self.X, args, self.number_of_qubits), self.state)
for j in range(self.number_of_qubits):
if j+1 in args:
self.track_gates1[j].append("X")
self.track_gates2[j].append("X")
else:
self.track_gates2[j].append("-")
def H(self, *args):
# add H gate to multiple qubits
if len(args) == 0:
args = [1]
self.H = 1.0 / 2**.5 * np.array([[1, 1], [1, -1]])
self.state = np.dot(gate_multiple_operator(self.H, args, self.number_of_qubits), self.state)
for j in range(self.number_of_qubits):
if j+1 in args:
self.track_gates1[j].append("H")
self.track_gates2[j].append("H")
else:
self.track_gates2[j].append("-")
def CNOT(self, control=1, target=2):
# add CNOT gate w.r.t. control and target (both should be valid qubits)
# for now, the control and the target have to be next to each other
if abs(control - target) > 1:
print("Warning, the control and target should be next to eachother, nothing added")
elif control == target:
print("Warning, the control and target should be different, nothing added")
elif control < target:
self.CNOT = np.array([[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],])
self.state = np.dot(gate_operator(self.CNOT, control-1, self.number_of_qubits-1), self.state)
else:
self.CNOT = np.array([[0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],])
self.state = np.dot(gate_operator(self.CNOT, target-1, self.number_of_qubits-1), self.state)
if abs(control - target) == 1:
for j in range(self.number_of_qubits):
if j+1 == control:
self.track_gates1[j].append("ctrl")
self.track_gates2[j].append("ctrl")
elif j+1 == target:
self.track_gates1[j].append("CNOT")
self.track_gates2[j].append("CNOT")
else:
self.track_gates2[j].append("----")
def measure(self, i = 1):
# add measurement gate at i-th qubit
i -= 1
self.P = np.dot(self.state_zero, self.state_zero.T)
prob = dot(np.conjugate(self.state).T,gate_operator(self.P,i,self.number_of_qubits),self.state)
if np.random.rand() < prob:
self.state = np.dot(gate_operator(self.P,i,self.number_of_qubits),self.state) / np.sqrt(prob)
elif prob > 0.0:
self.state_one = np.array([[0.0],[1.0]])
self.P1 = np.dot(self.state_one, self.state_one.T)
self.state = np.dot(gate_operator(self.P1,i,self.number_of_qubits),self.state) / np.sqrt(prob)
for j in range(self.number_of_qubits):
if j == i:
self.track_gates1[j].append("M")
self.track_gates2[j].append("M")
else:
self.track_gates2[j].append("-")
def draw(self):
print("First Drawing")
for _,q in enumerate(self.track_gates1):
ret = "|0> --- " + " --- ".join(q)
print(ret)
print("Second Drawing")
for _,q in enumerate(self.track_gates2):
ret = "|0> --- " + " --- ".join(q)
print(ret)
def reinitialize(self):
# carefull for the previous states will be lost
self.state = self.initial_state
self.track_gates1 = [[] for _ in range(self.number_of_qubits)]
self.track_gates2 = [[] for _ in range(self.number_of_qubits)]
def dirac(self):
# returns a nice description of the state of the system
equation = "|Psi> = "
for i,s in enumerate(self.state):
equation += str(s[0]) + '|' + self.basis[i] + '> + '
print(equation[:-2])
equation = "|Psi> = "
for i,s in enumerate(self.state):
if s > 0.0:
equation += str(s[0]) + '|' + self.basis[i] + '> + '
print(equation[:-2])
## Introducing QCChain
class QCChain:
### states are updated after every simulation call, allows for more flexibility, and introduces a "running time"
def __init__(self, number_of_qubits = 1):
self.number_of_qubits = number_of_qubits
self.state_zero = np.array([[1.0],[0.0]])
# self.state_one = np.array([[0.0],[1.0]])
self.initial_state = n_kron(self.number_of_qubits, self.state_zero)
self.state = self.initial_state
self.track_gates1 = [[] for _ in range(self.number_of_qubits)]
self.track_gates2 = [[] for _ in range(self.number_of_qubits)]
self.basis = [('{:0'+str(self.number_of_qubits)+'b}').format(i) for i in range(2**self.number_of_qubits)]
# self.I = np.eye(2)
self.count = {basis:0 for basis in self.basis}
def X(self, *args):
# add X gate to multiple qubits, default to first qubit
if len(args) == 0:
args = [1]
for j in range(self.number_of_qubits):
if j+1 in args:
self.track_gates1[j].append("X")
self.track_gates2[j].append("X")
else:
self.track_gates2[j].append("-")
def H(self, *args):
# add H gate to multiple qubits
if len(args) == 0:
args = [1]
for j in range(self.number_of_qubits):
if j+1 in args:
self.track_gates1[j].append("H")
self.track_gates2[j].append("H")
else:
self.track_gates2[j].append("-")
def CNOT(self, control=1, target=2):
# add CNOT gate w.r.t. control and target (both should be valid qubits)
# for now, the control and the target have to be next to each other
if abs(control - target) > 1:
print("Warning, the control and target should be next to eachother, nothing added")
elif control == target:
print("Warning, the control and target should be different, nothing added")
else:
for j in range(self.number_of_qubits):
if j+1 == control:
self.track_gates1[j].append("ctrl")
self.track_gates2[j].append("ctrl")
elif j+1 == target:
self.track_gates1[j].append("CNOT")
self.track_gates2[j].append("CNOT")
else:
self.track_gates2[j].append("----")
def measure(self, *args):
# add measurement gate at i-th qubit
if len(args) == 0:
args = [1]
for j in range(self.number_of_qubits):
if j+1 in args:
self.track_gates1[j].append("M")
self.track_gates2[j].append("M")
else:
self.track_gates2[j].append("-")
def draw(self):
print("First Drawing")
for _,q in enumerate(self.track_gates1):
ret = "|0> --- " + " --- ".join(q)
print(ret)
print("Second Drawing")
for _,q in enumerate(self.track_gates2):
ret = "|0> --- " + " --- ".join(q)
print(ret)
def reinitialize(self):
# carefull for the previous states will be lost
self.state = self.initial_state
self.track_gates1 = [[] for _ in range(self.number_of_qubits)]
self.track_gates2 = [[] for _ in range(self.number_of_qubits)]
def add(self, gates=[['X']], qubit=[1], place=[0]):
# special method that adds a gate or several gate to specified place.
# Example: q.add([['X','H'],['X'],['H']],[1,5,6],[-1,0,1])
# this will add two gates X and H to the first qubit before the last gate,
# X to the fifth qubit after all the other added gates,
# H to the sixth qubit at first place (so before all the other).
for j in range(self.number_of_qubits):
if j+1 in qubit:
i = qubit.index(j+1)
if place[i] == 0:
for gate in gates[i]:
self.track_gates1[j].append(gate)
self.track_gates2[j].append(gate)
if place[i] > 0:
for gate in gates[i]:
self.track_gates1[j].insert(place[i]-1,gate)
self.track_gates2[j].insert(place[i]-1,gate)
if place[i] < 0:
for gate in gates[i]:
self.track_gates1[j].insert(place[i],gate)
self.track_gates2[j].insert(place[i],gate)
else:
self.track_gates2[j].append("-")
def delq(self,qubit=[1],place=[0]):
# allows to delete gates at specified places
for j in range(self.number_of_qubits):
if j+1 in qubit:
i = qubit.index(j+1)
if place[i] == 0:
del self.track_gates1[j][-1]
del self.track_gates2[j][-1]
else:
del self.track_gates1[j][place[i]-1]
del self.track_gates2[j][place[i]-1]
def simulate(self):
# simulate the circuit! uses the second tracker
self.state = self.initial_state
for j,_ in enumerate(self.track_gates2[0]):
queue = [self.track_gates2[e][j] for e in range(self.number_of_qubits)]
app = []
for i,g in enumerate(queue):
if g not in ['-','ctrl','CNOT','----']:
app.append(i+1)
c = g
elif g == 'ctrl':
control = i
elif g == 'CNOT':
target = i
c = g
if c == 'X':
self.X = np.array([[0.0, 1.0], [1.0, 0.0]])
self.state = np.dot(gate_multiple_operator(self.X, app, self.number_of_qubits), self.state)
elif c == 'H':
self.H = 1.0 / 2**.5 * np.array([[1, 1], [1, -1]])
self.state = np.dot(gate_multiple_operator(self.H, app, self.number_of_qubits), self.state)
elif c == 'M':
for i in app:
self.P = np.dot(self.state_zero, self.state_zero.T)
prob = dot(np.conjugate(self.state).T,gate_operator(self.P,i-1,self.number_of_qubits),self.state)
if np.random.rand() < prob:
self.state = np.dot(gate_operator(self.P,i-1,self.number_of_qubits),self.state) / np.sqrt(prob)
elif prob > 0.0:
self.state_one = np.array([[0.0],[1.0]])
self.P1 = np.dot(self.state_one, self.state_one.T)
self.state = np.dot(gate_operator(self.P1,i-1,self.number_of_qubits),self.state) / np.sqrt(prob)
elif c == 'CNOT':
if control < target:
self.CNOT = np.array([[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],])
self.state = np.dot(gate_operator(self.CNOT, control, self.number_of_qubits-1), self.state)
else:
self.CNOT = np.array([[0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],])
self.state = np.dot(gate_operator(self.CNOT, target, self.number_of_qubits-1), self.state)
def run(self, shots=1):
# allows to simulate multiple times
self.results = []
self.count = {basis:0 for basis in self.basis}
for i in range(shots):
self.simulate()
self.results.append([i,self.state])
for i,s in enumerate(self.state):
if s == [1]:
self.count[self.basis[i]] += 1
def dirac(self):
# returns a nice description of the state of the system
equation = "|Psi> = "
for i,s in enumerate(self.state):
equation += str(s[0]) + '|' + self.basis[i] + '> + '
print(equation[:-2])
equation = "|Psi> = "
for i,s in enumerate(self.state):
if s > 0.0:
equation += str(s[0]) + '|' + self.basis[i] + '> + '
print(equation[:-2])
def plot(self):
# uses matplotlib
import matplotlib.pyplot as plt
plt.bar(list(self.count.keys()),self.count.values())
plt.title('Results of the quantum experiment')
plt.xlabel('Basis states')
plt.ylabel('Count')
plt.show()
""" !EPR pair!
qc = QCircuit(2)
qc.H(1)
print(qc.state)
qc.CNOT(1,2)
print(qc.state)
qc.measure(1)
qc.measure(2)
print(qc.state)
qc.draw()
print(qc.number_of_qubits)
print(qc.track_gates1)
print(qc.track_gates2)
"""
| [
"numpy.eye",
"numpy.sqrt",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.conjugate",
"numpy.kron",
"numpy.array",
"numpy.dot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((133, 150), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (141, 150), True, 'import numpy as np\n'), ((292, 309), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (300, 309), True, 'import numpy as np\n'), ((672, 681), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (678, 681), True, 'import numpy as np\n'), ((939, 948), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (945, 948), True, 'import numpy as np\n'), ((960, 977), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (968, 977), True, 'import numpy as np\n'), ((1330, 1339), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1336, 1339), True, 'import numpy as np\n'), ((185, 199), 'numpy.kron', 'np.kron', (['qb', 'q'], {}), '(qb, q)\n', (192, 199), True, 'import numpy as np\n'), ((349, 369), 'numpy.kron', 'np.kron', (['ret', 'vector'], {}), '(ret, vector)\n', (356, 369), True, 'import numpy as np\n'), ((480, 493), 'numpy.dot', 'np.dot', (['qb', 'q'], {}), '(qb, q)\n', (486, 493), True, 'import numpy as np\n'), ((1591, 1615), 'numpy.array', 'np.array', (['[[1.0], [0.0]]'], {}), '([[1.0], [0.0]])\n', (1599, 1615), True, 'import numpy as np\n'), ((2209, 2243), 'numpy.array', 'np.array', (['[[0.0, 1.0], [1.0, 0.0]]'], {}), '([[0.0, 1.0], [1.0, 0.0]])\n', (2217, 2243), True, 'import numpy as np\n'), ((2713, 2747), 'numpy.array', 'np.array', (['[[0.0, 1.0], [1.0, 0.0]]'], {}), '([[0.0, 1.0], [1.0, 0.0]])\n', (2721, 2747), True, 'import numpy as np\n'), ((5391, 5433), 'numpy.dot', 'np.dot', (['self.state_zero', 'self.state_zero.T'], {}), '(self.state_zero, self.state_zero.T)\n', (5397, 5433), True, 'import numpy as np\n'), ((7525, 7549), 'numpy.array', 'np.array', (['[[1.0], [0.0]]'], {}), '([[1.0], [0.0]])\n', (7533, 7549), True, 'import numpy as np\n'), ((16053, 16099), 'matplotlib.pyplot.title', 'plt.title', (['"""Results of the quantum experiment"""'], {}), "('Results of the quantum experiment')\n", (16062, 16099), True, 'import matplotlib.pyplot as plt\n'), ((16109, 16135), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Basis states"""'], {}), "('Basis states')\n", (16119, 16135), True, 'import matplotlib.pyplot as plt\n'), ((16145, 16164), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (16155, 16164), True, 'import matplotlib.pyplot as plt\n'), ((16174, 16184), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16182, 16184), True, 'import matplotlib.pyplot as plt\n'), ((1074, 1089), 'numpy.kron', 'np.kron', (['ret', 'O'], {}), '(ret, O)\n', (1081, 1089), True, 'import numpy as np\n'), ((1124, 1139), 'numpy.kron', 'np.kron', (['ret', 'I'], {}), '(ret, I)\n', (1131, 1139), True, 'import numpy as np\n'), ((3249, 3276), 'numpy.array', 'np.array', (['[[1, 1], [1, -1]]'], {}), '([[1, 1], [1, -1]])\n', (3257, 3276), True, 'import numpy as np\n'), ((5551, 5567), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5565, 5567), True, 'import numpy as np\n'), ((5454, 5478), 'numpy.conjugate', 'np.conjugate', (['self.state'], {}), '(self.state)\n', (5466, 5478), True, 'import numpy as np\n'), ((5669, 5682), 'numpy.sqrt', 'np.sqrt', (['prob'], {}), '(prob)\n', (5676, 5682), True, 'import numpy as np\n'), ((5739, 5763), 'numpy.array', 'np.array', (['[[0.0], [1.0]]'], {}), '([[0.0], [1.0]])\n', (5747, 5763), True, 'import numpy as np\n'), ((5786, 5826), 'numpy.dot', 'np.dot', (['self.state_one', 'self.state_one.T'], {}), '(self.state_one, self.state_one.T)\n', (5792, 5826), True, 'import numpy as np\n'), ((13117, 13151), 'numpy.array', 'np.array', (['[[0.0, 1.0], [1.0, 0.0]]'], {}), '([[0.0, 1.0], [1.0, 0.0]])\n', (13125, 13151), True, 'import numpy as np\n'), ((4142, 4244), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0], [0.0, \n 0.0, 1.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0],\n [0.0, 0.0, 1.0, 0.0]])\n', (4150, 4244), True, 'import numpy as np\n'), ((4494, 4596), 'numpy.array', 'np.array', (['[[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, \n 0.0, 0.0, 1.0]]'], {}), '([[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n', (4502, 4596), True, 'import numpy as np\n'), ((5921, 5934), 'numpy.sqrt', 'np.sqrt', (['prob'], {}), '(prob)\n', (5928, 5934), True, 'import numpy as np\n'), ((13330, 13357), 'numpy.array', 'np.array', (['[[1, 1], [1, -1]]'], {}), '([[1, 1], [1, -1]])\n', (13338, 13357), True, 'import numpy as np\n'), ((13556, 13598), 'numpy.dot', 'np.dot', (['self.state_zero', 'self.state_zero.T'], {}), '(self.state_zero, self.state_zero.T)\n', (13562, 13598), True, 'import numpy as np\n'), ((13742, 13758), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (13756, 13758), True, 'import numpy as np\n'), ((14292, 14394), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0], [0.0, \n 0.0, 1.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0],\n [0.0, 0.0, 1.0, 0.0]])\n', (14300, 14394), True, 'import numpy as np\n'), ((14690, 14792), 'numpy.array', 'np.array', (['[[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, \n 0.0, 0.0, 1.0]]'], {}), '([[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n', (14698, 14792), True, 'import numpy as np\n'), ((13631, 13655), 'numpy.conjugate', 'np.conjugate', (['self.state'], {}), '(self.state)\n', (13643, 13655), True, 'import numpy as np\n'), ((13874, 13887), 'numpy.sqrt', 'np.sqrt', (['prob'], {}), '(prob)\n', (13881, 13887), True, 'import numpy as np\n'), ((13968, 13992), 'numpy.array', 'np.array', (['[[0.0], [1.0]]'], {}), '([[0.0], [1.0]])\n', (13976, 13992), True, 'import numpy as np\n'), ((14027, 14067), 'numpy.dot', 'np.dot', (['self.state_one', 'self.state_one.T'], {}), '(self.state_one, self.state_one.T)\n', (14033, 14067), True, 'import numpy as np\n'), ((14176, 14189), 'numpy.sqrt', 'np.sqrt', (['prob'], {}), '(prob)\n', (14183, 14189), True, 'import numpy as np\n')] |
import numpy as np
class CrossEntropy(object):
def __init__(self):
pass
def forward(self, out, y):
loss = lambda x, y: -(y*np.log(x +.0001)) - ((1.01 - y) * np.log(1.01 - x))
if out.ndim > 1:
return np.mean(loss(out, y))
else:
return loss(out, y)
def backward(self, out, y):
return np.subtract(out, y)
| [
"numpy.log",
"numpy.subtract"
] | [((383, 402), 'numpy.subtract', 'np.subtract', (['out', 'y'], {}), '(out, y)\n', (394, 402), True, 'import numpy as np\n'), ((195, 211), 'numpy.log', 'np.log', (['(1.01 - x)'], {}), '(1.01 - x)\n', (201, 211), True, 'import numpy as np\n'), ((161, 179), 'numpy.log', 'np.log', (['(x + 0.0001)'], {}), '(x + 0.0001)\n', (167, 179), True, 'import numpy as np\n')] |
#!/usr/bin/env python
""" Empirically simulate the birthday paradox
The birthday paradox is the surprisingly high change that two
people in a group of people share the same birthday.
See https://en.wikipedia.org/wiki/Birthday_problem for details.
"""
import random
from matplotlib import pyplot as plt
import numpy as np
NUMBER_OF_SIMULATIONS = 100000
def simulate_number_of_people_in_room_before_collision():
"""
:return: Number of people added to room to achieve first collision
:rtype: int
"""
unique_birthdays = set()
all_birthdays = []
while True:
birthday_ordinal = random.randint(0, 365 - 1)
unique_birthdays.add(birthday_ordinal)
all_birthdays.append(birthday_ordinal)
if len(unique_birthdays) != len(all_birthdays):
return len(all_birthdays)
def generate_histogram(s):
plt.hist(
s,
bins=xrange(max(s)),
color='green',
)
plt.ylabel('Number of simulations')
plt.xlabel('Number of people in room when first birthday collision occurs')
plt.axvline(
float(sum(s)) / NUMBER_OF_SIMULATIONS,
color='red',
linewidth=2,
linestyle='--'
)
plt.show()
def generate_cdf(s):
normed_counts, bin_edges = np.histogram(s, bins=max(s), normed=True)
cdf = np.cumsum(normed_counts)
plt.plot(
bin_edges[1:],
cdf,
linewidth=2,
)
plt.ylabel('CDF')
plt.xlabel('Number of people in room when first birthday collision occurs')
plt.axhline(
.5,
color='red',
linewidth=2,
linestyle='--'
)
plt.show()
simulations = [
simulate_number_of_people_in_room_before_collision()
for _ in xrange(0, NUMBER_OF_SIMULATIONS)
]
generate_histogram(simulations)
generate_cdf(simulations)
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axhline",
"numpy.cumsum",
"random.randint",
"matplotlib.pyplot.show"
] | [((956, 991), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of simulations"""'], {}), "('Number of simulations')\n", (966, 991), True, 'from matplotlib import pyplot as plt\n'), ((996, 1071), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of people in room when first birthday collision occurs"""'], {}), "('Number of people in room when first birthday collision occurs')\n", (1006, 1071), True, 'from matplotlib import pyplot as plt\n'), ((1213, 1223), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1221, 1223), True, 'from matplotlib import pyplot as plt\n'), ((1330, 1354), 'numpy.cumsum', 'np.cumsum', (['normed_counts'], {}), '(normed_counts)\n', (1339, 1354), True, 'import numpy as np\n'), ((1360, 1401), 'matplotlib.pyplot.plot', 'plt.plot', (['bin_edges[1:]', 'cdf'], {'linewidth': '(2)'}), '(bin_edges[1:], cdf, linewidth=2)\n', (1368, 1401), True, 'from matplotlib import pyplot as plt\n'), ((1438, 1455), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CDF"""'], {}), "('CDF')\n", (1448, 1455), True, 'from matplotlib import pyplot as plt\n'), ((1460, 1535), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of people in room when first birthday collision occurs"""'], {}), "('Number of people in room when first birthday collision occurs')\n", (1470, 1535), True, 'from matplotlib import pyplot as plt\n'), ((1541, 1599), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0.5)'], {'color': '"""red"""', 'linewidth': '(2)', 'linestyle': '"""--"""'}), "(0.5, color='red', linewidth=2, linestyle='--')\n", (1552, 1599), True, 'from matplotlib import pyplot as plt\n'), ((1642, 1652), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1650, 1652), True, 'from matplotlib import pyplot as plt\n'), ((622, 648), 'random.randint', 'random.randint', (['(0)', '(365 - 1)'], {}), '(0, 365 - 1)\n', (636, 648), False, 'import random\n')] |
import codecademylib
import numpy as np
import matplotlib.pyplot as plt
survey_responses = ['Ceballos', 'Kerrigan', 'Ceballos', 'Ceballos', 'Ceballos','Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Ceballos',
'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Ceballos', 'Ceballos', 'Ceballos', 'Ceballos',
'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Ceballos',
'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Ceballos', 'Ceballos', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Kerrigan', 'Ceballos']
total_ceballos = sum([1 for i in survey_responses if i =='Ceballos'])
print('count ceballos '+str(total_ceballos))
percentage_ceballos = total_ceballos/float(len(survey_responses))
print('percentage ceballos '+ str(percentage_ceballos))
possible_surveys = np.random.binomial(float(len((survey_responses))),0.54,size=10000)/float(len(survey_responses))
# plt.hist(possible_surveys,bins=20,range=(0,1))
# plt.show()
ceballos_loss_surveys = np.mean(possible_surveys < 0.5)
print('loss percentage '+str (ceballos_loss_surveys))
large_survey = np.random.binomial(7000,0.5,size=10000)/7000
print('large_survey'+str(large_survey))
plt.hist(possible_surveys,bins=20,range=(0,1))
plt.hist(large_survey,alpha=.4,bins=20,range=(0,1))
plt.show()
ceballos_loss_new = np.mean(large_survey < 0.5)
print(ceballos_loss_new)
| [
"numpy.mean",
"matplotlib.pyplot.hist",
"numpy.random.binomial",
"matplotlib.pyplot.show"
] | [((1378, 1409), 'numpy.mean', 'np.mean', (['(possible_surveys < 0.5)'], {}), '(possible_surveys < 0.5)\n', (1385, 1409), True, 'import numpy as np\n'), ((1565, 1614), 'matplotlib.pyplot.hist', 'plt.hist', (['possible_surveys'], {'bins': '(20)', 'range': '(0, 1)'}), '(possible_surveys, bins=20, range=(0, 1))\n', (1573, 1614), True, 'import matplotlib.pyplot as plt\n'), ((1612, 1668), 'matplotlib.pyplot.hist', 'plt.hist', (['large_survey'], {'alpha': '(0.4)', 'bins': '(20)', 'range': '(0, 1)'}), '(large_survey, alpha=0.4, bins=20, range=(0, 1))\n', (1620, 1668), True, 'import matplotlib.pyplot as plt\n'), ((1665, 1675), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1673, 1675), True, 'import matplotlib.pyplot as plt\n'), ((1697, 1724), 'numpy.mean', 'np.mean', (['(large_survey < 0.5)'], {}), '(large_survey < 0.5)\n', (1704, 1724), True, 'import numpy as np\n'), ((1480, 1521), 'numpy.random.binomial', 'np.random.binomial', (['(7000)', '(0.5)'], {'size': '(10000)'}), '(7000, 0.5, size=10000)\n', (1498, 1521), True, 'import numpy as np\n')] |
"""
Define discrete action spaces for Gym Retro environments with a limited set of button combos
"""
import gym
import numpy as np
import retro
class Discretizer(gym.ActionWrapper):
"""
Wrap a gym environment and make it use discrete actions.
Args:
combos: ordered list of lists of valid button combinations
"""
def __init__(self, env, combos):
super().__init__(env)
assert isinstance(env.action_space, gym.spaces.MultiBinary)
buttons = env.unwrapped.buttons
self._decode_discrete_action = []
for combo in combos:
arr = np.array([False] * env.action_space.n)
for button in combo:
arr[buttons.index(button)] = True
self._decode_discrete_action.append(arr)
self.action_space = gym.spaces.Discrete(len(self._decode_discrete_action))
def action(self, act):
return self._decode_discrete_action[act].copy()
class SonicDiscretizer(Discretizer):
"""
Use Sonic-specific discrete actions
based on https://github.com/openai/retro-baselines/blob/master/agents/sonic_util.py
"""
def __init__(self, env):
super().__init__(env=env, combos=[['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], ['RIGHT', 'DOWN'], ['DOWN'], ['DOWN', 'B'], ['B']])
def main():
env = retro.make(game='SonicTheHedgehog-Genesis', use_restricted_actions=retro.Actions.DISCRETE)
print('retro.Actions.DISCRETE action_space', env.action_space)
env.close()
env = retro.make(game='SonicTheHedgehog-Genesis')
env = SonicDiscretizer(env)
print('SonicDiscretizer action_space', env.action_space)
env.close()
if __name__ == '__main__':
main() | [
"numpy.array",
"retro.make"
] | [((1316, 1411), 'retro.make', 'retro.make', ([], {'game': '"""SonicTheHedgehog-Genesis"""', 'use_restricted_actions': 'retro.Actions.DISCRETE'}), "(game='SonicTheHedgehog-Genesis', use_restricted_actions=retro.\n Actions.DISCRETE)\n", (1326, 1411), False, 'import retro\n'), ((1501, 1544), 'retro.make', 'retro.make', ([], {'game': '"""SonicTheHedgehog-Genesis"""'}), "(game='SonicTheHedgehog-Genesis')\n", (1511, 1544), False, 'import retro\n'), ((604, 642), 'numpy.array', 'np.array', (['([False] * env.action_space.n)'], {}), '([False] * env.action_space.n)\n', (612, 642), True, 'import numpy as np\n')] |
# coding: utf-8
# Revert Classification - Prediction
# ===
#
# Building a classifier to predict reverts and produce calibrated propensity scores for being reverted.
import numpy as np
import pandas as pd
import os
from tqdm import tqdm
import bz2
import sqlite3
import difflib
import gzip
import json
import re
import hashlib
from datetime import datetime
from datetime import timezone
import scipy.stats
from itertools import groupby
from collections import Counter
import sklearn
import sklearn.ensemble
import sklearn.metrics
import sklearn.calibration
from sklearn.model_selection import cross_val_score
import math
import argparse
import sys
from joblib import dump, load
parser = argparse.ArgumentParser()
parser.add_argument("--model")
parser.add_argument("--thresh_l", type = int)
parser.add_argument("--thresh_h", type = int)
args = parser.parse_args()
raw_data_dir = "/export/scratch2/wiki_data"
derived_data_dir = os.path.join('/export/scratch2/levon003/repos/wiki-ores-feedback', "data", "derived")
stub_history_dir = os.path.join(derived_data_dir, 'stub-history-all-revisions')
revision_sample_dir = os.path.join(derived_data_dir, 'revision_sample')
working_dir = os.path.join(derived_data_dir, 'audit')
# ### Data loading and cleaning
# read in the sample dataframe
s = datetime.now()
revision_sample_dir = os.path.join(derived_data_dir, 'revision_sample')
sample3_filepath = os.path.join(revision_sample_dir, 'sample3_all.pkl')
rev_df = pd.read_pickle(sample3_filepath)
print(f"Sample 3 data loaded in {datetime.now() - s}.")
# Load the features (2020-08-01 tsv file)
s = datetime.now()
labeled_revs_dir = os.path.join(derived_data_dir, 'labeled-revs')
sample3_features_dir = os.path.join(labeled_revs_dir, 'sample3-features')
sample3_damaging_filepath = os.path.join(sample3_features_dir, 'sample3.damaging.2020-08-01T05:40:00Z.tsv')
features_df = pd.read_csv(sample3_damaging_filepath, sep='\t', header=0)
print(f"Features data loaded in {datetime.now() - s}.")
# drop the useless 'damaging' column (it is auto-generated)
features_df = features_df.drop(columns='damaging')
# load the rev_ids that correspond to the feature data
revid_filepath = os.path.join(labeled_revs_dir, 'sample3-features', 'rev_id_2020-08-01T05:40:00Z.txt')
rev_id_list = pd.read_csv(revid_filepath, header=None)
assert len(rev_id_list) == len(features_df)
# Read the revert info
# This dataframe contains additional data beyond what is in the rev_df
s = datetime.now()
stub_history_reverts_dir = os.path.join(derived_data_dir, 'stub-history-reverts')
revert_df_filepath = os.path.join(stub_history_reverts_dir, 'revert_df.pkl')
revert_df = pd.read_pickle(revert_df_filepath)
print(f"Loaded revert data in {datetime.now() - s}.")
# The most important info in the `revert_df` that isn't in the `rev_df` is the username info, which enables the identification of self-reverts.
# `revert_df` has one line per **revert** revision, compared to the `rev_df` which has one line per revision.
# identify self-reverts
is_self_revert_list = []
for row in tqdm(revert_df.itertuples(), total=len(revert_df)):
is_self_revert = row.reverting_user_text in row.reverted_user_texts
is_self_revert_list.append(is_self_revert)
revert_df['is_self_revert'] = is_self_revert_list
# now compute the outcome, which is a variant of `rev_df.is_reverted`
reverted_rev_ids = set()
# only count it as a reverted revision if it was not a self-revert
# and it was reverted within one week
threshold_low = args.thresh_l
threshold_high = args.thresh_h
rs = revert_df[~revert_df.is_self_revert]
for row in tqdm(rs.itertuples(), total=len(rs)):
reverting_timestamp = row.reverting_timestamp
for rev_id, timestamp in zip(row.reverted_rev_ids, row.reverted_timestamps):
if reverting_timestamp - timestamp <= threshold_high and reverting_timestamp - timestamp > threshold_low:
reverted_rev_ids.add(rev_id)
# #### Create the actual outcome variable and add it to the features dataframe
is_reverted = [rev_id in reverted_rev_ids for rev_id in rev_id_list.iloc[:,0]]
features_df['is_reverted'] = is_reverted
################################################################
# scale X vars
X_test = sklearn.preprocessing.scale(features_df.iloc[:,:-1])
# load model from file
md_dir = '/export/scratch2/wastv004/wiki-ores-feedback/results_train_allsample3/models'
md_filepath = os.path.join(md_dir, args.model)
md_name = os.path.splitext(args.model)[0]
md = load(md_filepath)
# predict on new data
s = datetime.now()
y_pred_test_calib = md.predict_proba(X_test)[:,1]
print(f"Prediction completed in {datetime.now() - s}.")
# save prediction results
pred_results = pd.DataFrame()
pred_results['test_calib'] = y_pred_test_calib
pred_results['test_label'] = np.array(features_df['is_reverted'])
pred_results['rev_id'] = np.array(rev_id_list.iloc[:,0])
print(pred_results.head())
results_filepath = os.path.join('/export/scratch2/wastv004/wiki-ores-feedback/results_train_allsample3/predictions', md_name + '_prediction_2020-08-01.pkl')
pred_results.to_pickle(results_filepath)
print(results_filepath)
| [
"pandas.read_pickle",
"argparse.ArgumentParser",
"pandas.read_csv",
"os.path.join",
"os.path.splitext",
"datetime.datetime.now",
"numpy.array",
"joblib.load",
"pandas.DataFrame",
"sklearn.preprocessing.scale"
] | [((731, 756), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (754, 756), False, 'import argparse\n'), ((978, 1067), 'os.path.join', 'os.path.join', (['"""/export/scratch2/levon003/repos/wiki-ores-feedback"""', '"""data"""', '"""derived"""'], {}), "('/export/scratch2/levon003/repos/wiki-ores-feedback', 'data',\n 'derived')\n", (990, 1067), False, 'import os\n'), ((1084, 1144), 'os.path.join', 'os.path.join', (['derived_data_dir', '"""stub-history-all-revisions"""'], {}), "(derived_data_dir, 'stub-history-all-revisions')\n", (1096, 1144), False, 'import os\n'), ((1168, 1217), 'os.path.join', 'os.path.join', (['derived_data_dir', '"""revision_sample"""'], {}), "(derived_data_dir, 'revision_sample')\n", (1180, 1217), False, 'import os\n'), ((1233, 1272), 'os.path.join', 'os.path.join', (['derived_data_dir', '"""audit"""'], {}), "(derived_data_dir, 'audit')\n", (1245, 1272), False, 'import os\n'), ((1345, 1359), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1357, 1359), False, 'from datetime import datetime\n'), ((1383, 1432), 'os.path.join', 'os.path.join', (['derived_data_dir', '"""revision_sample"""'], {}), "(derived_data_dir, 'revision_sample')\n", (1395, 1432), False, 'import os\n'), ((1453, 1505), 'os.path.join', 'os.path.join', (['revision_sample_dir', '"""sample3_all.pkl"""'], {}), "(revision_sample_dir, 'sample3_all.pkl')\n", (1465, 1505), False, 'import os\n'), ((1516, 1548), 'pandas.read_pickle', 'pd.read_pickle', (['sample3_filepath'], {}), '(sample3_filepath)\n', (1530, 1548), True, 'import pandas as pd\n'), ((1656, 1670), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1668, 1670), False, 'from datetime import datetime\n'), ((1691, 1737), 'os.path.join', 'os.path.join', (['derived_data_dir', '"""labeled-revs"""'], {}), "(derived_data_dir, 'labeled-revs')\n", (1703, 1737), False, 'import os\n'), ((1762, 1812), 'os.path.join', 'os.path.join', (['labeled_revs_dir', '"""sample3-features"""'], {}), "(labeled_revs_dir, 'sample3-features')\n", (1774, 1812), False, 'import os\n'), ((1842, 1921), 'os.path.join', 'os.path.join', (['sample3_features_dir', '"""sample3.damaging.2020-08-01T05:40:00Z.tsv"""'], {}), "(sample3_features_dir, 'sample3.damaging.2020-08-01T05:40:00Z.tsv')\n", (1854, 1921), False, 'import os\n'), ((1937, 1995), 'pandas.read_csv', 'pd.read_csv', (['sample3_damaging_filepath'], {'sep': '"""\t"""', 'header': '(0)'}), "(sample3_damaging_filepath, sep='\\t', header=0)\n", (1948, 1995), True, 'import pandas as pd\n'), ((2244, 2333), 'os.path.join', 'os.path.join', (['labeled_revs_dir', '"""sample3-features"""', '"""rev_id_2020-08-01T05:40:00Z.txt"""'], {}), "(labeled_revs_dir, 'sample3-features',\n 'rev_id_2020-08-01T05:40:00Z.txt')\n", (2256, 2333), False, 'import os\n'), ((2345, 2385), 'pandas.read_csv', 'pd.read_csv', (['revid_filepath'], {'header': 'None'}), '(revid_filepath, header=None)\n', (2356, 2385), True, 'import pandas as pd\n'), ((2534, 2548), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2546, 2548), False, 'from datetime import datetime\n'), ((2577, 2631), 'os.path.join', 'os.path.join', (['derived_data_dir', '"""stub-history-reverts"""'], {}), "(derived_data_dir, 'stub-history-reverts')\n", (2589, 2631), False, 'import os\n'), ((2654, 2709), 'os.path.join', 'os.path.join', (['stub_history_reverts_dir', '"""revert_df.pkl"""'], {}), "(stub_history_reverts_dir, 'revert_df.pkl')\n", (2666, 2709), False, 'import os\n'), ((2723, 2757), 'pandas.read_pickle', 'pd.read_pickle', (['revert_df_filepath'], {}), '(revert_df_filepath)\n', (2737, 2757), True, 'import pandas as pd\n'), ((4314, 4367), 'sklearn.preprocessing.scale', 'sklearn.preprocessing.scale', (['features_df.iloc[:, :-1]'], {}), '(features_df.iloc[:, :-1])\n', (4341, 4367), False, 'import sklearn\n'), ((4497, 4529), 'os.path.join', 'os.path.join', (['md_dir', 'args.model'], {}), '(md_dir, args.model)\n', (4509, 4529), False, 'import os\n'), ((4579, 4596), 'joblib.load', 'load', (['md_filepath'], {}), '(md_filepath)\n', (4583, 4596), False, 'from joblib import dump, load\n'), ((4627, 4641), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4639, 4641), False, 'from datetime import datetime\n'), ((4797, 4811), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4809, 4811), True, 'import pandas as pd\n'), ((4890, 4926), 'numpy.array', 'np.array', (["features_df['is_reverted']"], {}), "(features_df['is_reverted'])\n", (4898, 4926), True, 'import numpy as np\n'), ((4953, 4985), 'numpy.array', 'np.array', (['rev_id_list.iloc[:, 0]'], {}), '(rev_id_list.iloc[:, 0])\n', (4961, 4985), True, 'import numpy as np\n'), ((5039, 5186), 'os.path.join', 'os.path.join', (['"""/export/scratch2/wastv004/wiki-ores-feedback/results_train_allsample3/predictions"""', "(md_name + '_prediction_2020-08-01.pkl')"], {}), "(\n '/export/scratch2/wastv004/wiki-ores-feedback/results_train_allsample3/predictions'\n , md_name + '_prediction_2020-08-01.pkl')\n", (5051, 5186), False, 'import os\n'), ((4541, 4569), 'os.path.splitext', 'os.path.splitext', (['args.model'], {}), '(args.model)\n', (4557, 4569), False, 'import os\n'), ((1583, 1597), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1595, 1597), False, 'from datetime import datetime\n'), ((2030, 2044), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2042, 2044), False, 'from datetime import datetime\n'), ((2790, 2804), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2802, 2804), False, 'from datetime import datetime\n'), ((4729, 4743), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4741, 4743), False, 'from datetime import datetime\n')] |
from typing import Tuple, Iterable, List, Union, TypeVar, ClassVar, Any
import numpy as np
import re
from .utils import split
State = np.array
Action = np.array
Reward = float
Done = bool
Datum = Tuple[State, Action, Reward, State, Done]
Data = Tuple[Iterable[State], Iterable[Action], Iterable[Reward], Iterable[State], Iterable[Done]]
class InvalidEpisodeNameException(Exception):
def __init__(self, filename:str):
msg = "Invalid episode file name: {}\nShould be of form <agent>_<environment>_<state>_<episode>.npz"
super().__init__(msg.format(filename))
class Episode(object):
# file_regex: ClassVar[Any] = re.compile('[^/]+\.npz$')
# columns: List[str]= ['states', 'actions', 'rewards', 'next_states', 'dones']
# save_columns: List[str] = columns[1:]
file_regex = re.compile('[^/]+\.npz$')
columns = ['states', 'actions', 'rewards', 'next_states', 'dones']
save_columns = columns[1:]
def __init__(self, agent:str, game:str, level:str, episode:int, initial_state:State):
self.agent = agent
self.game = game
self.level = level
self.episode = episode
self.initial_state = initial_state
# self.data:List[Datum] = list()
self.data = list()
def __len__(self):
return len(self.data)
def __getitem__(self, i:int) -> Datum:
return self.data[i]
def add(self, datum:Datum):
self.data.append(datum)
def sample(self, batch_size:int=100, sequential:bool=False) -> List[Datum]:
num_data = len(self.data)
if sequential:
idx = np.random.choice(num_data - batch_size)
return self.data[idx:idx + batch_size]
idc = np.random.choice(num_data, size=batch_size, replace=False)
return [self.data[i] for i in idc]
def save(self, location:str='.', suffix:str='') -> str:
'''Returns the name of the file to which the data was saved.'''
if location[-1] == '/':
location = location[:-1]
if suffix != '':
suffix = '_' + suffix
save_data = list(zip(*self.data))[1:] + [self.initial_state]
data = dict(zip(self.save_columns + ['initial_state'], [np.array(d) for d in save_data]))
filename = '{location}/{agent}_{game}_{level}_{episode}{suffix}.npz'.format(
location=location,
suffix=suffix,
agent=self.agent,
game=self.game,
level=self.level,
episode=self.episode)
np.savez_compressed(filename, **data)
return filename
@classmethod
def load(cls, path:str):
match = cls.file_regex.search(path)
if match is None:
raise InvalidEpisodeNameException(path)
idx = match.span()
filename = match.string[idx[0]:idx[1]]
filename = filename[:-4] # remove .npz
try:
agent, game, level, episode, *other = filename.split('_')
except ValueError:
raise InvalidEpisodeNameException(filename)
loaded = np.load(path)
initial_state = loaded['initial_state']
data = [split(loaded[name]) for name in cls.save_columns]
# next_states: List[State] = data[2]
# states: List[State] = [initial_state] + next_states[:-1]
next_states = data[2]
states = [initial_state] + next_states[:-1]
data = [states] + data
ep = cls(agent, game, level, episode, initial_state)
# ep.data: List[Datum] = list(zip(*data))
ep.data = list(zip(*data))
return ep
class Memory(object):
def __init__(self):
# self.episodes: List[Episode] = list()
self.episodes = list()
self.episode_counter = -1
self.agent = None
self.game = None
self.level = None
self.current_episode = None
self.array_names = ['states', 'actions', 'rewards', 'next_states', 'dones']
self.dirty = True
def set_meta(self, agent:Union[str, None]=None, game:Union[str, None]=None, level:Union[str, None]=None):
if agent is not None:
self.agent = agent
if game is not None:
self.game = game
if level is not None:
self.level = level
def begin_episode(self, initial_state:State):
if self.agent is None or self.game is None or self.level is None:
raise("You need to call set_meta before beginning an episode.")
self.episode_counter += 1
self.current_episode = Episode(self.agent, self.game, self.level, self.episode_counter, initial_state)
self.episodes.append(self.current_episode)
self.dirty = True
def add(self, datum:Datum):
if self.current_episode is None:
raise Exception("You need to call begin_episode before adding data.")
self.current_episode.add(datum)
self.dirty = True
def save(self, location:str='.', suffix:str='') -> List[str]:
'''Returns the list of filenames that were saved to.'''
return [episode.save(location=location, suffix=suffix) for episode in self.episodes]
def clear(self):
self.episodes = list()
self.episode_counter = -1
self.current_episode = None
self.dirty = True
def load(self, filenames:List[str]):
'''Loads episodes from files on disk.'''
self.episodes = list()
for filename in filenames:
try:
self.dirty = True
self.episodes.append(Episode.load(filename))
self.current_episode = self.episodes[-1]
self.episode_counter = len(self.episodes)
except InvalidEpisodeNameException as e:
print('memory.load: Skipping episode {} because loading it threw an exception:\n\t{}'.format(filename, e))
def sample(self, batch_size:int=100, single_episode:bool=False, **kwargs) -> List[Datum]:
if single_episode:
num_episodes = len(self.episodes)
ep = np.random.choice(num_episodes)
return self.episodes[ep].sample(batch_size=batch_size, **kwargs)
if self.dirty:
# Makes a lookup table for which episode to find a given datum in.
self.data_index = dict()
self.num_data = 0
for ep_idx in range(len(self.episodes)):
ep_num_data = len(self.episodes[ep_idx])
for data_idx in range(self.num_data, self.num_data + ep_num_data):
self.data_index[data_idx] = (ep_idx, self.num_data)
self.num_data += ep_num_data
self.dirty = False
idc = np.random.choice(self.num_data, size=batch_size, replace=False)
batch = list()
for data_idx in idc:
ep_idx, data_idx_start = self.data_index[data_idx]
datum = self.episodes[ep_idx].data[data_idx - data_idx_start]
batch.append(datum)
return batch
class FileMemory(object):
def __init__(self, filenames:List[str]):
if len(filenames) == 0:
raise ValueError("filenames must contain at least one filename")
# self.filenames:List[str] = filenames
self.filenames = filenames
# self.current_file_idx:int = -1
self.current_file_idx = -1
self.load_next()
def has_next(self):
return self.current_file_idx + 1 < len(self.filenames)
def load_next(self):
'''Loads next episode into memory.'''
self.current_file_idx += 1
# self.current_episode:Episode = Episode.load(self.filenames[self.current_file_idx])
# self.current_episode_offset:int = 0
self.current_episode = Episode.load(self.filenames[self.current_file_idx])
self.current_episode_offset = 0
def take(self, num:int) -> List[Datum]:
start_idx = self.current_episode_offset
end_idx = self.current_episode_offset + num
samples = self.current_episode[start_idx:end_idx]
self.current_episode_offset = end_idx
return samples
def sample(self, batch_size:int=100, **kwargs) -> List[Datum]:
remainder = batch_size - (len(self.current_episode) - 1 - self.current_episode_offset)
if remainder <= 0:
return self.take(batch_size)
else:
samples = self.take(batch_size - remainder)
if self.has_next():
self.load_next()
samples.extend(self.take(remainder))
return samples
| [
"re.compile",
"numpy.random.choice",
"numpy.array",
"numpy.savez_compressed",
"numpy.load"
] | [((807, 833), 're.compile', 're.compile', (['"""[^/]+\\\\.npz$"""'], {}), "('[^/]+\\\\.npz$')\n", (817, 833), False, 'import re\n'), ((1711, 1769), 'numpy.random.choice', 'np.random.choice', (['num_data'], {'size': 'batch_size', 'replace': '(False)'}), '(num_data, size=batch_size, replace=False)\n', (1727, 1769), True, 'import numpy as np\n'), ((2514, 2551), 'numpy.savez_compressed', 'np.savez_compressed', (['filename'], {}), '(filename, **data)\n', (2533, 2551), True, 'import numpy as np\n'), ((3050, 3063), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (3057, 3063), True, 'import numpy as np\n'), ((6661, 6724), 'numpy.random.choice', 'np.random.choice', (['self.num_data'], {'size': 'batch_size', 'replace': '(False)'}), '(self.num_data, size=batch_size, replace=False)\n', (6677, 6724), True, 'import numpy as np\n'), ((1606, 1645), 'numpy.random.choice', 'np.random.choice', (['(num_data - batch_size)'], {}), '(num_data - batch_size)\n', (1622, 1645), True, 'import numpy as np\n'), ((6029, 6059), 'numpy.random.choice', 'np.random.choice', (['num_episodes'], {}), '(num_episodes)\n', (6045, 6059), True, 'import numpy as np\n'), ((2207, 2218), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (2215, 2218), True, 'import numpy as np\n')] |
#!/usr/bin/python3
"""
/**
******************************************************************************
* @file dominant_attribute.py
* @author <NAME>
* $Rev: 1 $
* $Date: Sat Nov 17 15:12:04 CST 2018 $
* @brief Functions related to Dominant Attribute Algorithm
******************************************************************************
* @copyright
* @internal
*
* @endinternal
*
* @details
* This file contains the functions related to Dominant Attribute Algorithm
* @ingroup Algorithm
*/
"""
import pandas as pd
import numpy as np
from .misc import levelOfConsistency
def generateDiscretizedValue(value, point_ranges, min_value, max_value):
"""
Generate discrete symbol based on range
This function computes the discrete symbol representation of the value based
on the point ranges supplied. The representation will be lowerRange..upperRange.
Note: point_ranges must be sorted already.
Parameters
----------
value: float
The value to be transformed
point_ranges: list
List of ranges
min_value: float
The minimum value of the range
max_value: float
The maximum value of the range
Returns
-------
str
Discretize symbol representation of value
"""
str_format = "{:.3f}..{:.3f}"
# No ranges
if (len(point_ranges) == 0):
return (str_format.format(min_value, max_value))
# Value is below all point_ranges
if (value < point_ranges[0]):
return (str_format.format(min_value, point_ranges[0]))
# value is between point_ranges
for i in range(1, len(point_ranges)):
if (value < point_ranges[i]):
return(str_format.format(point_ranges[i - 1], point_ranges[i]))
# value is above all point_ranges
return (str_format.format(point_ranges[len(point_ranges) - 1], max_value))
def generateDiscretizedDataFrame(df, chosen_cut_points):
"""
Generate discretized data frame based on cut_points
This function generates discretized data frame based on chosen_cut_points.
The decision column will always be the last column.
Parameters
----------
df: pandas.DataFrame
The data frame to be discretized
chosen_cut_points: dict
Dictionary of cut points with the attribute names as the dictionary keys
Returns
-------
pandas.DataFrame
Discretize data frame
"""
decision_colname = df.columns[-1]
attribute_colnames = df.columns[:-1]
numerical_attribute_colnames = df[attribute_colnames].select_dtypes(include = "number").columns
non_numerical_attribute_colnames = attribute_colnames.drop(numerical_attribute_colnames)
df_discretized = pd.DataFrame()
df_discretized[non_numerical_attribute_colnames] = df[non_numerical_attribute_colnames]
for attribute in chosen_cut_points.keys():
current_attribute = df[attribute]
unique_current_attribute_values = current_attribute.unique()
df_discretized[attribute] = current_attribute.apply(generateDiscretizedValue,
point_ranges = chosen_cut_points[attribute],
min_value = np.min(unique_current_attribute_values),
max_value = np.max(unique_current_attribute_values))
df_discretized[decision_colname] = df[decision_colname]
return (df_discretized)
def computeConditionalEntropy(df, grouping_criteria):
"""
Compute Conditional Entropy value of the data frame based on grouping_criteria
This function computes the conditional entropy value of a data frame based on
grouping_criteria. The value will be conditional entropy of decision (last column)
conditioned on the group.
Parameters
----------
df: pandas.DataFrame
Data frame representation of the dataset
grouping_criteria: list
Grouping criteria of the data frame to be pased to pandas.DataFrame.groupby() function
Returns
-------
float
Conditional Entropy value
"""
df_grouped = df.groupby(grouping_criteria)
num_each_group = df_grouped.apply(lambda x: x.shape[0])
num_total = num_each_group.sum()
conditional_entropy_each_group = df_grouped.apply(
lambda group: (group.groupby(group.columns[-1]).apply(
lambda x: x.shape[0]
)/group.shape[0]).apply(lambda x: x * np.log2(x))).groupby(level = 0).apply(
lambda x: np.sum(x)
).T.apply(lambda x: np.sum(x))
conditional_entropy = np.sum(-(num_each_group/num_total) * conditional_entropy_each_group)
return (conditional_entropy)
def dominantAttribute(df):
"""
Perform Dominant Attribute Algorithm
This function performs Dominant Attribute Algorithm on the dataset in data frame
format
Parameters
----------
df: pandas.DataFrame
Data frame representation of the dataset
Returns
-------
pandas.DataFrame
Resulting dataset after performing Dominant Attribute Algorithm
"""
attribute_colnames = df.columns[:-1]
decision_colname = df.columns[-1]
df_numerical_attributes = df[attribute_colnames].select_dtypes(include = "number")
# No numerical attribute
if (df_numerical_attributes.shape[1] == 0):
return(df)
numerical_attribute_colnames = df_numerical_attributes.columns
df_numerical_attributes = pd.concat([df_numerical_attributes, df[decision_colname]], axis = 1)
print("------------------------------------------")
print(" = Dominant Attribute: Numerical attributes are {}".format(list(numerical_attribute_colnames)))
is_consistent = False
list_subset = [df_numerical_attributes]
chosen_cut_points = {}
total_cut_point = 0
print("------------------------------------------")
print(" = Dominant Attribute: Start finding cut points")
while not is_consistent:
# Note: Check if it is needed
if (len(list_subset) == 0):
break
current_subset = list_subset.pop(0)
dominant_attribute = numerical_attribute_colnames[np.argmin([computeConditionalEntropy(current_subset, column)
for column in numerical_attribute_colnames]) if len(numerical_attribute_colnames) > 1 else 0]
print(" = Dominant Attribute: Found dominant attribute = " + dominant_attribute)
unique_values = list(current_subset.groupby(dominant_attribute).groups)
# Note: Check if it is needed
if (len(unique_values) == 1):
print(" = Dominant Attribute: Only one value, cannot compute possible cut point. Skipping")
continue
cut_points = [(unique_values[i] + unique_values[i + 1])/2 for i in range(len(unique_values) - 1)]
best_cut_point = cut_points[(np.argmin([computeConditionalEntropy(current_subset,
np.where(current_subset[dominant_attribute] < cut_point,
True,
False))
for cut_point in cut_points])) if len(cut_points) > 1 else 0]
print(" = Dominant Attribute: Found best cut point = " + str(best_cut_point))
# Subset the current dataset further
new_subsets = dict(current_subset.groupby(np.where(current_subset[dominant_attribute] < best_cut_point, True, False)).__iter__())
for subset in new_subsets.keys():
list_subset.append(new_subsets[subset])
# Append best cut point of dominant attribute
if (chosen_cut_points.get(dominant_attribute) is not None):
if (best_cut_point not in chosen_cut_points[dominant_attribute]):
chosen_cut_points[dominant_attribute].append(best_cut_point)
chosen_cut_points[dominant_attribute].sort()
total_cut_point += 1
else:
chosen_cut_points[dominant_attribute] = [best_cut_point]
total_cut_point += 1
# Generate tmp_df
tmp_df = generateDiscretizedDataFrame(df, chosen_cut_points)
# Check consistency level
consistency_level = levelOfConsistency(tmp_df)
is_consistent = consistency_level == 1.0
print(" = Dominant Attribute: Current consistency level = " + str(consistency_level))
if (not is_consistent):
raise(Exception(" ! Dominant Attribute Error: Failed to perform Dominant Attribute until consistency_level of 1.\n Try to increase floating point precision in generateDiscretizedValue function under str_format variable."))
print("------------------------------------------")
print(" = Dominant Attribute: Found cut points = {}".format(chosen_cut_points))
print("------------------------------------------")
# Merging
print(" = Dominant Attribute: Start merging")
for attribute in chosen_cut_points.keys():
i = 0
total_element = len(chosen_cut_points[attribute])
while (i < total_element):
if (total_cut_point <= 1):
break
current_cut_point = chosen_cut_points[attribute].pop(i)
tmp_df = generateDiscretizedDataFrame(df, chosen_cut_points)
consistency_level = levelOfConsistency(tmp_df)
is_consistent = consistency_level == 1.0
if (is_consistent):
# Current cut point is redundant
total_element = len(chosen_cut_points[attribute])
print(" = Dominant Attribute: Found redundant cut point = {} {}".format(attribute, str(current_cut_point)) )
total_cut_point -= 1
else:
# Check next element, reinsert cutpoint
chosen_cut_points[attribute].insert(i, current_cut_point)
i += 1
# Finalize cut points for all numerical attributes
for attribute in numerical_attribute_colnames:
if (chosen_cut_points.get(attribute) is None):
chosen_cut_points[attribute] = []
print("------------------------------------------")
print(" = Dominant Attribute: Finalized cut points = {}".format(chosen_cut_points))
print("------------------------------------------")
df_discretized = generateDiscretizedDataFrame(df, chosen_cut_points)
df[df_discretized.columns] = df_discretized
return (df)
| [
"numpy.where",
"numpy.max",
"numpy.sum",
"numpy.min",
"pandas.DataFrame",
"numpy.log2",
"pandas.concat"
] | [((2703, 2717), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2715, 2717), True, 'import pandas as pd\n'), ((4603, 4673), 'numpy.sum', 'np.sum', (['(-(num_each_group / num_total) * conditional_entropy_each_group)'], {}), '(-(num_each_group / num_total) * conditional_entropy_each_group)\n', (4609, 4673), True, 'import numpy as np\n'), ((5471, 5537), 'pandas.concat', 'pd.concat', (['[df_numerical_attributes, df[decision_colname]]'], {'axis': '(1)'}), '([df_numerical_attributes, df[decision_colname]], axis=1)\n', (5480, 5537), True, 'import pandas as pd\n'), ((4565, 4574), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (4571, 4574), True, 'import numpy as np\n'), ((3231, 3270), 'numpy.min', 'np.min', (['unique_current_attribute_values'], {}), '(unique_current_attribute_values)\n', (3237, 3270), True, 'import numpy as np\n'), ((3344, 3383), 'numpy.max', 'np.max', (['unique_current_attribute_values'], {}), '(unique_current_attribute_values)\n', (3350, 3383), True, 'import numpy as np\n'), ((4527, 4536), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (4533, 4536), True, 'import numpy as np\n'), ((7560, 7634), 'numpy.where', 'np.where', (['(current_subset[dominant_attribute] < best_cut_point)', '(True)', '(False)'], {}), '(current_subset[dominant_attribute] < best_cut_point, True, False)\n', (7568, 7634), True, 'import numpy as np\n'), ((7031, 7100), 'numpy.where', 'np.where', (['(current_subset[dominant_attribute] < cut_point)', '(True)', '(False)'], {}), '(current_subset[dominant_attribute] < cut_point, True, False)\n', (7039, 7100), True, 'import numpy as np\n'), ((4466, 4476), 'numpy.log2', 'np.log2', (['x'], {}), '(x)\n', (4473, 4476), True, 'import numpy as np\n')] |
import os
import numpy as np
from src.base_class.vocoder.htk_io import HTK_Parm_IO
from util import file_util, log_util
log = log_util.get_logger("acoustic tool")
def interpolate_f0(data):
'''
interpolate F0, if F0 has already been interpolated, nothing will be changed after passing this function
:param data:
:return:
'''
data = np.reshape(data, (data.size, 1))
vuv_vector = np.zeros((data.size, 1))
vuv_vector[data > 0.0] = 1.0
vuv_vector[data <= 0.0] = 0.0
ip_data = data
frame_number = data.size
last_value = 0.0
for i in range(frame_number):
if data[i] <= 0.0:
j = i + 1
for j in range(i + 1, frame_number):
if data[j] > 0.0:
break
if j < frame_number - 1:
if last_value > 0.0:
step = (data[j] - data[i - 1]) / float(j - i)
for k in range(i, j):
ip_data[k] = data[i - 1] + step * (k - i + 1)
else:
for k in range(i, j):
ip_data[k] = data[j]
else:
for k in range(i, frame_number):
ip_data[k] = last_value
else:
ip_data[i] = data[i]
last_value = data[i]
return ip_data, vuv_vector
def compute_dynamic_vector(vector, dynamic_win, frame_number=None):
'''
compute dynamic vector
delta_win = [-0.5, 0.0, 0.5]
acc_win = [1.0, -2.0, 1.0]
:param vector:
:param dynamic_win:
:param frame_number:
:return:
'''
flag = True
if frame_number is None:
frame_number = vector.size
flag = False
vector = np.reshape(vector, (frame_number, 1))
win_length = len(dynamic_win)
win_width = int(win_length / 2)
temp_vector = np.zeros((frame_number + 2 * win_width, 1))
delta_vector = np.zeros((frame_number, 1))
if flag:
temp_vector[win_width:frame_number + win_width] = vector
else:
temp_vector[win_width:frame_number + win_width, ] = vector
for w in range(win_width):
temp_vector[w, 0] = vector[0, 0]
temp_vector[frame_number + win_width + w, 0] = vector[frame_number - 1, 0]
for i in range(frame_number):
for w in range(win_length):
delta_vector[i] += temp_vector[i + w, 0] * dynamic_win[w]
return delta_vector
def compute_delta(vector, delta_win):
# delta_win = [-0.5, 0.0, 0.5]
# acc_win = [1.0, -2.0, 1.0]
frame_number = vector.size
win_length = len(delta_win)
win_width = int(win_length / 2)
temp_vector = np.zeros((frame_number + 2 * win_width, 1))
delta_vector = np.zeros((frame_number, 1))
temp_vector[win_width:frame_number + win_width, ] = vector
for w in range(win_width):
temp_vector[w, 0] = vector[0, 0]
temp_vector[frame_number + win_width + w, 0] = vector[frame_number - 1, 0]
for i in range(frame_number):
for w in range(win_length):
delta_vector[i] += temp_vector[i + w, 0] * delta_win[w]
return delta_vector
def load_cmp_file(file_name, mgc_dim, bap_dim, lf0_dim):
'''
cmp_norm = CMPNormalisation(mgc_dim=50, bap_dim=25, lf0_dim=1)
self.mgc_dim = mgc_dim * 3
self.bap_dim = bap_dim * 3
self.lf0_dim = lf0_dim * 3
:param file_name:
:param mgc_dim:
:param bap_dim:
:param lf0_dim:
:return:
'''
mgc_dim = mgc_dim * 3
bap_dim = bap_dim * 3
lf0_dim = lf0_dim * 3
htk_reader = HTK_Parm_IO()
htk_reader.read_htk(file_name)
cmp_data = htk_reader.data
mgc_data = cmp_data[:, 0:mgc_dim]
# this only extracts the static lf0 because we need to interpolate it, then add deltas ourselves later
lf0_data = cmp_data[:, mgc_dim]
bap_data = cmp_data[:, mgc_dim + lf0_dim:mgc_dim + lf0_dim + bap_dim]
log.debug('loaded %s of shape %s' % (file_name, cmp_data.shape))
log.debug(' with: %d mgc + %d lf0 + %d bap = %d' % (
mgc_dim, lf0_dim, bap_dim, mgc_dim + lf0_dim + bap_dim))
assert ((mgc_dim + lf0_dim + bap_dim) == cmp_data.shape[1])
return mgc_data, bap_data, lf0_data
def produce_nn_cmp(in_file_list, out_file_list):
delta_win = [-0.5, 0.0, 0.5]
acc_win = [1.0, -2.0, 1.0]
file_number = len(in_file_list)
for i in range(file_number):
mgc_data, bap_data, lf0_data = load_cmp_file(in_file_list[i])
ip_lf0, vuv_vector = interpolate_f0(lf0_data)
delta_lf0 = compute_delta(ip_lf0, delta_win)
acc_lf0 = compute_delta(ip_lf0, acc_win)
frame_number = ip_lf0.size
cmp_data = np.concatenate((mgc_data, ip_lf0, delta_lf0, acc_lf0, vuv_vector, bap_data), axis=1)
file_util.array_to_binary_file(cmp_data, out_file_list[i])
log.info('finished creation of %d binary files' % file_number)
def acoustic_decomposition(in_file_list, out_dimension_dict, file_extension_dict):
stream_start_index = {}
dimension_index = 0
recorded_vuv = False
vuv_dimension = None
for feature_name in list(out_dimension_dict.keys()):
if feature_name != 'vuv':
stream_start_index[feature_name] = dimension_index
else:
vuv_dimension = dimension_index
recorded_vuv = True
dimension_index += out_dimension_dict[feature_name]
for file_name in in_file_list:
dir_name = os.path.dirname(file_name)
file_id = os.path.splitext(os.path.basename(file_name))[0]
| [
"numpy.reshape",
"util.log_util.get_logger",
"os.path.dirname",
"numpy.zeros",
"os.path.basename",
"numpy.concatenate",
"util.file_util.array_to_binary_file",
"src.base_class.vocoder.htk_io.HTK_Parm_IO"
] | [((129, 165), 'util.log_util.get_logger', 'log_util.get_logger', (['"""acoustic tool"""'], {}), "('acoustic tool')\n", (148, 165), False, 'from util import file_util, log_util\n'), ((364, 396), 'numpy.reshape', 'np.reshape', (['data', '(data.size, 1)'], {}), '(data, (data.size, 1))\n', (374, 396), True, 'import numpy as np\n'), ((414, 438), 'numpy.zeros', 'np.zeros', (['(data.size, 1)'], {}), '((data.size, 1))\n', (422, 438), True, 'import numpy as np\n'), ((1748, 1785), 'numpy.reshape', 'np.reshape', (['vector', '(frame_number, 1)'], {}), '(vector, (frame_number, 1))\n', (1758, 1785), True, 'import numpy as np\n'), ((1874, 1917), 'numpy.zeros', 'np.zeros', (['(frame_number + 2 * win_width, 1)'], {}), '((frame_number + 2 * win_width, 1))\n', (1882, 1917), True, 'import numpy as np\n'), ((1937, 1964), 'numpy.zeros', 'np.zeros', (['(frame_number, 1)'], {}), '((frame_number, 1))\n', (1945, 1964), True, 'import numpy as np\n'), ((2680, 2723), 'numpy.zeros', 'np.zeros', (['(frame_number + 2 * win_width, 1)'], {}), '((frame_number + 2 * win_width, 1))\n', (2688, 2723), True, 'import numpy as np\n'), ((2743, 2770), 'numpy.zeros', 'np.zeros', (['(frame_number, 1)'], {}), '((frame_number, 1))\n', (2751, 2770), True, 'import numpy as np\n'), ((3594, 3607), 'src.base_class.vocoder.htk_io.HTK_Parm_IO', 'HTK_Parm_IO', ([], {}), '()\n', (3605, 3607), False, 'from src.base_class.vocoder.htk_io import HTK_Parm_IO\n'), ((4689, 4777), 'numpy.concatenate', 'np.concatenate', (['(mgc_data, ip_lf0, delta_lf0, acc_lf0, vuv_vector, bap_data)'], {'axis': '(1)'}), '((mgc_data, ip_lf0, delta_lf0, acc_lf0, vuv_vector, bap_data),\n axis=1)\n', (4703, 4777), True, 'import numpy as np\n'), ((4782, 4840), 'util.file_util.array_to_binary_file', 'file_util.array_to_binary_file', (['cmp_data', 'out_file_list[i]'], {}), '(cmp_data, out_file_list[i])\n', (4812, 4840), False, 'from util import file_util, log_util\n'), ((5453, 5479), 'os.path.dirname', 'os.path.dirname', (['file_name'], {}), '(file_name)\n', (5468, 5479), False, 'import os\n'), ((5515, 5542), 'os.path.basename', 'os.path.basename', (['file_name'], {}), '(file_name)\n', (5531, 5542), False, 'import os\n')] |
from collections import OrderedDict
import os
from os import path as osp
import numpy as np
import torch
from torch import optim
from torch.distributions import Normal
from torch.utils.data import DataLoader
from torch.nn import functional as F
from torchvision.utils import save_image
from rlkit.data_management.images import normalize_image
from rlkit.core import logger
import rlkit.core.util as util
from rlkit.misc.eval_util import create_stats_ordered_dict
from rlkit.misc.ml_util import ConstantSchedule
from rlkit.torch import pytorch_util as ptu
from rlkit.torch.data import (
ImageDataset, InfiniteWeightedRandomSampler,
InfiniteRandomSampler,
)
from rlkit.torch.core import np_to_pytorch_batch
import collections
from rlkit.torch.vae.vae_trainer import ConvVAETrainer
class ConditionalConvVAETrainer(ConvVAETrainer):
def compute_loss(self, batch, epoch, test=False):
prefix = "test/" if test else "train/"
beta = float(self.beta_schedule.get_value(epoch))
obs = batch["observations"]
reconstructions, obs_distribution_params, latent_distribution_params = self.model(obs)
log_prob = self.model.logprob(batch["x_t"], obs_distribution_params)
kle = self.model.kl_divergence(latent_distribution_params)
loss = -1 * log_prob + beta * kle
self.eval_statistics['epoch'] = epoch
self.eval_statistics['beta'] = beta
self.eval_statistics[prefix + "losses"].append(loss.item())
self.eval_statistics[prefix + "log_probs"].append(log_prob.item())
self.eval_statistics[prefix + "kles"].append(kle.item())
encoder_mean = self.model.get_encoding_from_latent_distribution_params(latent_distribution_params)
z_data = ptu.get_numpy(encoder_mean.cpu())
for i in range(len(z_data)):
self.eval_data[prefix + "zs"].append(z_data[i, :])
self.eval_data[prefix + "last_batch"] = (batch, reconstructions)
if test:
self.test_last_batch = (obs, reconstructions)
return loss
def dump_reconstructions(self, epoch):
batch, reconstructions = self.eval_data["test/last_batch"]
obs = batch["x_t"]
x0 = batch["x_0"]
n = min(obs.size(0), 8)
comparison = torch.cat([
x0[:n].narrow(start=0, length=self.imlength // 2, dim=1)
.contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3),
obs[:n].narrow(start=0, length=self.imlength // 2, dim=1)
.contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3),
reconstructions.view(
self.batch_size,
3,
self.imsize,
self.imsize,
)[:n].transpose(2, 3)
])
save_dir = osp.join(self.log_dir, 'r%d.png' % epoch)
save_image(comparison.data.cpu(), save_dir, nrow=n)
def dump_samples(self, epoch):
self.model.eval()
batch, _ = self.eval_data["test/last_batch"]
sample = ptu.randn(64, self.representation_size)
sample = self.model.decode(sample, batch["observations"])[0].cpu()
save_dir = osp.join(self.log_dir, 's%d.png' % epoch)
save_image(
sample.data.view(64, 3, self.imsize, self.imsize).transpose(2, 3),
save_dir
)
x0 = batch["x_0"]
x0_img = x0[:64].narrow(start=0, length=self.imlength // 2, dim=1).contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3)
save_dir = osp.join(self.log_dir, 'x0_%d.png' % epoch)
save_image(x0_img.data.cpu(), save_dir)
class CVAETrainer(ConditionalConvVAETrainer):
def __init__(
self,
model,
batch_size=128,
log_interval=0,
beta=0.5,
beta_schedule=None,
lr=None,
do_scatterplot=False,
normalize=False,
mse_weight=0.1,
is_auto_encoder=False,
background_subtract=False,
linearity_weight=0.0,
distance_weight=0.0,
loss_weights=None,
use_linear_dynamics=False,
use_parallel_dataloading=False,
train_data_workers=2,
skew_dataset=False,
skew_config=None,
priority_function_kwargs=None,
start_skew_epoch=0,
weight_decay=0.001,
key_to_reconstruct='x_t',
num_epochs=500,
):
super().__init__(
model,
batch_size,
log_interval,
beta,
beta_schedule,
lr,
do_scatterplot,
normalize,
mse_weight,
is_auto_encoder,
background_subtract,
linearity_weight,
distance_weight,
loss_weights,
use_linear_dynamics,
use_parallel_dataloading,
train_data_workers,
skew_dataset,
skew_config,
priority_function_kwargs,
start_skew_epoch,
weight_decay,
key_to_reconstruct,
num_epochs
)
self.optimizer = optim.Adam(self.model.parameters(),
lr=self.lr,
weight_decay=weight_decay,
)
def compute_loss(self, batch, epoch, test=False):
prefix = "test/" if test else "train/"
beta = float(self.beta_schedule.get_value(epoch))
reconstructions, obs_distribution_params, latent_distribution_params = self.model(batch["x_t"], batch["env"])
log_prob = self.model.logprob(batch["x_t"], obs_distribution_params)
kle = self.model.kl_divergence(latent_distribution_params)
loss = -1 * log_prob + beta * kle
self.eval_statistics['epoch'] = epoch
self.eval_statistics['beta'] = beta
self.eval_statistics[prefix + "losses"].append(loss.item())
self.eval_statistics[prefix + "log_probs"].append(log_prob.item())
self.eval_statistics[prefix + "kles"].append(kle.item())
encoder_mean = self.model.get_encoding_from_latent_distribution_params(latent_distribution_params)
z_data = ptu.get_numpy(encoder_mean.cpu())
for i in range(len(z_data)):
self.eval_data[prefix + "zs"].append(z_data[i, :])
self.eval_data[prefix + "last_batch"] = (batch, reconstructions)
return loss
def dump_reconstructions(self, epoch):
batch, reconstructions = self.eval_data["test/last_batch"]
obs = batch["x_t"]
env = batch["env"]
n = min(obs.size(0), 8)
comparison = torch.cat([
env[:n].narrow(start=0, length=self.imlength, dim=1)
.contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3),
obs[:n].narrow(start=0, length=self.imlength, dim=1)
.contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3),
reconstructions.view(
self.batch_size,
3,
self.imsize,
self.imsize,
)[:n].transpose(2, 3),
])
save_dir = osp.join(self.log_dir, 'r%d.png' % epoch)
save_image(comparison.data.cpu(), save_dir, nrow=n)
def dump_distances(self, batch, epoch):
import matplotlib.pyplot as plt
plt.clf()
state = batch['episode_obs']
size = self.model.imsize
n = min(state.size(0), 8)
if n <= 2: return
distances = []
all_imgs = [state[i].reshape(3, size, size).transpose(1, 2) for i in range(n)]
env, goal = state[0].reshape(1,-1), state[-1].reshape(1,-1)
latent_goal = self.model.encode(goal, env, distrib=False)
for i in range(n):
latent = self.model.encode(state[i].reshape(1,-1), env, distrib=False)
distances.append(np.linalg.norm(ptu.get_numpy(latent) - ptu.get_numpy(latent_goal)))
plt.plot(np.arange(n), np.array(distances))
save_dir = osp.join(self.log_dir, 'dist_%d_plot.png' % epoch)
plt.savefig(save_dir)
all_imgs = torch.stack(all_imgs)
save_dir = osp.join(self.log_dir, 'dist_%d_traj.png' % epoch)
save_image(
all_imgs.data,
save_dir,
nrow=n,
)
def dump_samples(self, epoch):
self.model.eval()
batch, reconstructions, = self.eval_data["test/last_batch"]
self.dump_distances(batch, epoch)
env = batch["env"]
n = min(env.size(0), 8)
all_imgs = [
env[:n].narrow(start=0, length=self.imlength, dim=1)
.contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3)]
for i in range(7):
latent = self.model.sample_prior(self.batch_size, env)
samples = self.model.decode(latent)[0]
all_imgs.extend([
samples.view(
self.batch_size,
3,
self.imsize,
self.imsize,
)[:n].transpose(2, 3)])
comparison = torch.cat(all_imgs)
save_dir = osp.join(self.log_dir, 's%d.png' % epoch)
save_image(comparison.data.cpu(), save_dir, nrow=8)
class DeltaCVAETrainer(ConditionalConvVAETrainer):
def __init__(
self,
model,
batch_size=128,
log_interval=0,
beta=0.5,
beta_schedule=None,
context_schedule=None,
lr=None,
do_scatterplot=False,
normalize=False,
mse_weight=0.1,
is_auto_encoder=False,
background_subtract=False,
linearity_weight=0.0,
distance_weight=0.0,
loss_weights=None,
use_linear_dynamics=False,
use_parallel_dataloading=False,
train_data_workers=2,
skew_dataset=False,
skew_config=None,
priority_function_kwargs=None,
start_skew_epoch=0,
weight_decay=0.001,
num_epochs=500,
):
super().__init__(
model,
batch_size,
log_interval,
beta,
beta_schedule,
lr,
do_scatterplot,
normalize,
mse_weight,
is_auto_encoder,
background_subtract,
linearity_weight,
distance_weight,
loss_weights,
use_linear_dynamics,
use_parallel_dataloading,
train_data_workers,
skew_dataset,
skew_config,
priority_function_kwargs,
start_skew_epoch,
weight_decay,
)
self.context_schedule = context_schedule
self.optimizer = optim.Adam(self.model.parameters(),
lr=self.lr,
weight_decay=weight_decay,
)
def compute_loss(self, batch, epoch, test=False):
prefix = "test/" if test else "train/"
beta = float(self.beta_schedule.get_value(epoch))
#context_weight = float(self.context_schedule.get_value(epoch))
x_t, env = self.model(batch["x_t"], batch["env"])
reconstructions, obs_distribution_params, latent_distribution_params = x_t
env_reconstructions, env_distribution_params = env
log_prob = self.model.logprob(batch["x_t"], obs_distribution_params)
env_log_prob = self.model.logprob(batch["env"], env_distribution_params)
kle = self.model.kl_divergence(latent_distribution_params)
loss = -1 * (log_prob + env_log_prob) + beta * kle
self.eval_statistics['epoch'] = epoch
self.eval_statistics['beta'] = beta
self.eval_statistics[prefix + "losses"].append(loss.item())
self.eval_statistics[prefix + "log_probs"].append(log_prob.item())
self.eval_statistics[prefix + "env_log_probs"].append(env_log_prob.item())
self.eval_statistics[prefix + "kles"].append(kle.item())
encoder_mean = self.model.get_encoding_from_latent_distribution_params(latent_distribution_params)
z_data = ptu.get_numpy(encoder_mean.cpu())
for i in range(len(z_data)):
self.eval_data[prefix + "zs"].append(z_data[i, :])
self.eval_data[prefix + "last_batch"] = (batch, reconstructions, env_reconstructions)
return loss
def dump_mixed_latents(self, epoch):
n = 8
batch, reconstructions, env_reconstructions = self.eval_data["test/last_batch"]
x_t, env = batch["x_t"][:n], batch["env"][:n]
z_pos, logvar, z_obj = self.model.encode(x_t, env)
grid = []
for i in range(n):
for j in range(n):
if i + j == 0:
grid.append(ptu.zeros(1, self.input_channels, self.imsize, self.imsize))
elif i == 0:
grid.append(x_t[j].reshape(1, self.input_channels, self.imsize, self.imsize))
elif j == 0:
grid.append(env[i].reshape(1, self.input_channels, self.imsize, self.imsize))
else:
pos, obj = z_pos[j].reshape(1, -1), z_obj[i].reshape(1, -1)
img = self.model.decode(torch.cat([pos, obj], dim=1))[0]
grid.append(img.reshape(1, self.input_channels, self.imsize, self.imsize))
samples = torch.cat(grid)
save_dir = osp.join(self.log_dir, 'mixed_latents_%d.png' % epoch)
save_image(samples.data.cpu().transpose(2, 3), save_dir, nrow=n)
def dump_reconstructions(self, epoch):
self.dump_mixed_latents(epoch)
batch, reconstructions, env_reconstructions = self.eval_data["test/last_batch"]
obs = batch["x_t"]
env = batch["env"]
n = min(obs.size(0), 8)
comparison = torch.cat([
env[:n].narrow(start=0, length=self.imlength, dim=1)
.contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3),
obs[:n].narrow(start=0, length=self.imlength, dim=1)
.contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3),
reconstructions.view(
self.batch_size,
3,
self.imsize,
self.imsize,
)[:n].transpose(2, 3),
env_reconstructions.view(
self.batch_size,
3,
self.imsize,
self.imsize,
)[:n].transpose(2, 3)
])
save_dir = osp.join(self.log_dir, 'r%d.png' % epoch)
save_image(comparison.data.cpu(), save_dir, nrow=n)
def dump_samples(self, epoch):
self.model.eval()
batch, reconstructions, env_reconstructions = self.eval_data["test/last_batch"]
# self.dump_distances(batch, epoch)
env = batch["env"]
n = min(env.size(0), 8)
all_imgs = [
env[:n].narrow(start=0, length=self.imlength, dim=1)
.contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3)]
for i in range(7):
latent = self.model.sample_prior(self.batch_size, env)
samples = self.model.decode(latent)[0]
all_imgs.extend([
samples.view(
self.batch_size,
3,
self.imsize,
self.imsize,
)[:n].transpose(2, 3)])
comparison = torch.cat(all_imgs)
save_dir = osp.join(self.log_dir, 's%d.png' % epoch)
save_image(comparison.data.cpu(), save_dir, nrow=8)
class CDVAETrainer(CVAETrainer):
def state_linearity_loss(self, x_t, x_next, env, actions):
latent_obs = self.model.encode(x_t, env, distrib=False)
latent_next_obs = self.model.encode(x_next, env, distrib=False)
predicted_latent = self.model.process_dynamics(latent_obs, actions)
return torch.norm(predicted_latent - latent_next_obs) ** 2 / self.batch_size
def state_distance_loss(self, x_t, x_next, env):
latent_obs = self.model.encode(x_t, env, distrib=False)
latent_next_obs = self.model.encode(x_next, env, distrib=False)
return torch.norm(latent_obs - latent_next_obs) ** 2 / self.batch_size
def compute_loss(self, batch, epoch, test=False):
prefix = "test/" if test else "train/"
beta = float(self.beta_schedule.get_value(epoch))
reconstructions, obs_distribution_params, latent_distribution_params = self.model(batch["x_t"], batch["env"])
log_prob = self.model.logprob(batch["x_t"], obs_distribution_params)
kle = self.model.kl_divergence(latent_distribution_params)
state_distance_loss = self.state_distance_loss(batch["x_t"], batch["x_next"], batch["env"])
dynamics_loss = self.state_linearity_loss(
batch["x_t"], batch["x_next"], batch["env"], batch["actions"]
)
loss = -1 * log_prob + beta * kle + self.linearity_weight * dynamics_loss + self.distance_weight * state_distance_loss
self.eval_statistics['epoch'] = epoch
self.eval_statistics['beta'] = beta
self.eval_statistics[prefix + "losses"].append(loss.item())
self.eval_statistics[prefix + "log_probs"].append(log_prob.item())
self.eval_statistics[prefix + "kles"].append(kle.item())
self.eval_statistics[prefix + "dynamics_loss"].append(dynamics_loss.item())
self.eval_statistics[prefix + "distance_loss"].append(state_distance_loss.item())
encoder_mean = self.model.get_encoding_from_latent_distribution_params(latent_distribution_params)
z_data = ptu.get_numpy(encoder_mean.cpu())
for i in range(len(z_data)):
self.eval_data[prefix + "zs"].append(z_data[i, :])
self.eval_data[prefix + "last_batch"] = (batch, reconstructions)
return loss
def dump_dynamics(self, batch, epoch):
self.model.eval()
state = batch['episode_obs']
act = batch['episode_acts']
size = self.model.imsize
n = min(state.size(0), 8)
all_imgs = [state[i].reshape(3, size, size).transpose(1, 2) for i in range(n)]
latent_state = self.model.encode(state[0].reshape(1, -1), state[0].reshape(1, -1), distrib=False)
pred_curr = self.model.decode(latent_state)[0]
all_imgs.append(pred_curr.view(3, size, size).transpose(1, 2))
for i in range(n - 1):
latent_state = self.model.process_dynamics(latent_state.reshape(1, -1), act[i].reshape(1, -1))
pred_curr = self.model.decode(latent_state)[0]
all_imgs.append(pred_curr.view(3, size, size).transpose(1, 2))
all_imgs = torch.stack(all_imgs)
save_dir = osp.join(self.log_dir, 'dynamics%d.png' % epoch)
save_image(
all_imgs.data,
save_dir,
nrow=n,
)
def dump_reconstructions(self, epoch):
batch, reconstructions = self.eval_data["test/last_batch"]
self.dump_dynamics(batch, epoch)
obs = batch["x_t"]
env = batch["env"]
n = min(obs.size(0), 8)
comparison = torch.cat([
env[:n].narrow(start=0, length=self.imlength, dim=1)
.contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3),
obs[:n].narrow(start=0, length=self.imlength, dim=1)
.contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3),
reconstructions.view(
self.batch_size,
3,
self.imsize,
self.imsize,
)[:n].transpose(2, 3)
])
save_dir = osp.join(self.log_dir, 'r%d.png' % epoch)
save_image(comparison.data.cpu(), save_dir, nrow=n)
class DeltaDynamicsCVAETrainer(CDVAETrainer):
def compute_loss(self, batch, epoch, test=False):
prefix = "test/" if test else "train/"
beta = float(self.beta_schedule.get_value(epoch))
x_t, env = self.model(batch["x_t"], batch["env"])
reconstructions, obs_distribution_params, latent_distribution_params = x_t
env_reconstructions, env_distribution_params = env
log_prob = self.model.logprob(batch["x_t"], obs_distribution_params)
env_log_prob = self.model.logprob(batch["env"], env_distribution_params)
kle = self.model.kl_divergence(latent_distribution_params)
state_distance_loss = self.state_distance_loss(batch["x_t"], batch["x_next"], batch["env"])
dynamics_loss = self.state_linearity_loss(
batch["x_t"], batch["x_next"], batch["env"], batch["actions"]
)
loss = -1 * (log_prob + env_log_prob) + beta * kle + self.linearity_weight * dynamics_loss + self.distance_weight * state_distance_loss
self.eval_statistics['epoch'] = epoch
self.eval_statistics['beta'] = beta
self.eval_statistics[prefix + "losses"].append(loss.item())
self.eval_statistics[prefix + "log_probs"].append(log_prob.item())
self.eval_statistics[prefix + "env_log_probs"].append(env_log_prob.item())
self.eval_statistics[prefix + "kles"].append(kle.item())
self.eval_statistics[prefix + "dynamics_loss"].append(dynamics_loss.item())
self.eval_statistics[prefix + "distance_loss"].append(state_distance_loss.item())
encoder_mean = self.model.get_encoding_from_latent_distribution_params(latent_distribution_params)
z_data = ptu.get_numpy(encoder_mean.cpu())
for i in range(len(z_data)):
self.eval_data[prefix + "zs"].append(z_data[i, :])
self.eval_data[prefix + "last_batch"] = (batch, reconstructions, env_reconstructions)
return loss
def dump_samples(self, epoch):
self.model.eval()
batch, reconstructions, env_reconstructions = self.eval_data["test/last_batch"]
self.dump_distances(batch, epoch)
env = batch["env"]
n = min(env.size(0), 8)
all_imgs = [
env[:n].narrow(start=0, length=self.imlength, dim=1)
.contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3)]
for i in range(7):
latent = self.model.sample_prior(self.batch_size, env)
samples = self.model.decode(latent)[0]
all_imgs.extend([
samples.view(
self.batch_size,
3,
self.imsize,
self.imsize,
)[:n].transpose(2, 3)])
comparison = torch.cat(all_imgs)
save_dir = osp.join(self.log_dir, 's%d.png' % epoch)
save_image(comparison.data.cpu(), save_dir, nrow=8)
def dump_reconstructions(self, epoch):
batch, reconstructions, env_reconstructions = self.eval_data["test/last_batch"]
self.dump_dynamics(batch, epoch)
obs = batch["x_t"]
env = batch["env"]
n = min(obs.size(0), 8)
comparison = torch.cat([
env[:n].narrow(start=0, length=self.imlength, dim=1)
.contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3),
obs[:n].narrow(start=0, length=self.imlength, dim=1)
.contiguous().view(
-1,
3,
self.imsize,
self.imsize
).transpose(2, 3),
reconstructions.view(
self.batch_size,
3,
self.imsize,
self.imsize,
)[:n].transpose(2, 3),
env_reconstructions.view(
self.batch_size,
3,
self.imsize,
self.imsize,
)[:n].transpose(2, 3)
])
save_dir = osp.join(self.log_dir, 'r%d.png' % epoch)
save_image(comparison.data.cpu(), save_dir, nrow=n)
| [
"rlkit.torch.pytorch_util.randn",
"matplotlib.pyplot.savefig",
"numpy.arange",
"matplotlib.pyplot.clf",
"os.path.join",
"torch.stack",
"numpy.array",
"torch.norm",
"rlkit.torch.pytorch_util.get_numpy",
"torchvision.utils.save_image",
"torch.cat",
"rlkit.torch.pytorch_util.zeros"
] | [((2945, 2986), 'os.path.join', 'osp.join', (['self.log_dir', "('r%d.png' % epoch)"], {}), "(self.log_dir, 'r%d.png' % epoch)\n", (2953, 2986), True, 'from os import path as osp\n'), ((3179, 3218), 'rlkit.torch.pytorch_util.randn', 'ptu.randn', (['(64)', 'self.representation_size'], {}), '(64, self.representation_size)\n', (3188, 3218), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((3313, 3354), 'os.path.join', 'osp.join', (['self.log_dir', "('s%d.png' % epoch)"], {}), "(self.log_dir, 's%d.png' % epoch)\n", (3321, 3354), True, 'from os import path as osp\n'), ((3731, 3774), 'os.path.join', 'osp.join', (['self.log_dir', "('x0_%d.png' % epoch)"], {}), "(self.log_dir, 'x0_%d.png' % epoch)\n", (3739, 3774), True, 'from os import path as osp\n'), ((7516, 7557), 'os.path.join', 'osp.join', (['self.log_dir', "('r%d.png' % epoch)"], {}), "(self.log_dir, 'r%d.png' % epoch)\n", (7524, 7557), True, 'from os import path as osp\n'), ((7711, 7720), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7718, 7720), True, 'import matplotlib.pyplot as plt\n'), ((8375, 8425), 'os.path.join', 'osp.join', (['self.log_dir', "('dist_%d_plot.png' % epoch)"], {}), "(self.log_dir, 'dist_%d_plot.png' % epoch)\n", (8383, 8425), True, 'from os import path as osp\n'), ((8434, 8455), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_dir'], {}), '(save_dir)\n', (8445, 8455), True, 'import matplotlib.pyplot as plt\n'), ((8476, 8497), 'torch.stack', 'torch.stack', (['all_imgs'], {}), '(all_imgs)\n', (8487, 8497), False, 'import torch\n'), ((8517, 8567), 'os.path.join', 'osp.join', (['self.log_dir', "('dist_%d_traj.png' % epoch)"], {}), "(self.log_dir, 'dist_%d_traj.png' % epoch)\n", (8525, 8567), True, 'from os import path as osp\n'), ((8576, 8619), 'torchvision.utils.save_image', 'save_image', (['all_imgs.data', 'save_dir'], {'nrow': 'n'}), '(all_imgs.data, save_dir, nrow=n)\n', (8586, 8619), False, 'from torchvision.utils import save_image\n'), ((9541, 9560), 'torch.cat', 'torch.cat', (['all_imgs'], {}), '(all_imgs)\n', (9550, 9560), False, 'import torch\n'), ((9580, 9621), 'os.path.join', 'osp.join', (['self.log_dir', "('s%d.png' % epoch)"], {}), "(self.log_dir, 's%d.png' % epoch)\n", (9588, 9621), True, 'from os import path as osp\n'), ((13836, 13851), 'torch.cat', 'torch.cat', (['grid'], {}), '(grid)\n', (13845, 13851), False, 'import torch\n'), ((13871, 13925), 'os.path.join', 'osp.join', (['self.log_dir', "('mixed_latents_%d.png' % epoch)"], {}), "(self.log_dir, 'mixed_latents_%d.png' % epoch)\n", (13879, 13925), True, 'from os import path as osp\n'), ((15137, 15178), 'os.path.join', 'osp.join', (['self.log_dir', "('r%d.png' % epoch)"], {}), "(self.log_dir, 'r%d.png' % epoch)\n", (15145, 15178), True, 'from os import path as osp\n'), ((16135, 16154), 'torch.cat', 'torch.cat', (['all_imgs'], {}), '(all_imgs)\n', (16144, 16154), False, 'import torch\n'), ((16174, 16215), 'os.path.join', 'osp.join', (['self.log_dir', "('s%d.png' % epoch)"], {}), "(self.log_dir, 's%d.png' % epoch)\n", (16182, 16215), True, 'from os import path as osp\n'), ((19375, 19396), 'torch.stack', 'torch.stack', (['all_imgs'], {}), '(all_imgs)\n', (19386, 19396), False, 'import torch\n'), ((19416, 19464), 'os.path.join', 'osp.join', (['self.log_dir', "('dynamics%d.png' % epoch)"], {}), "(self.log_dir, 'dynamics%d.png' % epoch)\n", (19424, 19464), True, 'from os import path as osp\n'), ((19473, 19516), 'torchvision.utils.save_image', 'save_image', (['all_imgs.data', 'save_dir'], {'nrow': 'n'}), '(all_imgs.data, save_dir, nrow=n)\n', (19483, 19516), False, 'from torchvision.utils import save_image\n'), ((20499, 20540), 'os.path.join', 'osp.join', (['self.log_dir', "('r%d.png' % epoch)"], {}), "(self.log_dir, 'r%d.png' % epoch)\n", (20507, 20540), True, 'from os import path as osp\n'), ((23437, 23456), 'torch.cat', 'torch.cat', (['all_imgs'], {}), '(all_imgs)\n', (23446, 23456), False, 'import torch\n'), ((23476, 23517), 'os.path.join', 'osp.join', (['self.log_dir', "('s%d.png' % epoch)"], {}), "(self.log_dir, 's%d.png' % epoch)\n", (23484, 23517), True, 'from os import path as osp\n'), ((24718, 24759), 'os.path.join', 'osp.join', (['self.log_dir', "('r%d.png' % epoch)"], {}), "(self.log_dir, 'r%d.png' % epoch)\n", (24726, 24759), True, 'from os import path as osp\n'), ((8321, 8333), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (8330, 8333), True, 'import numpy as np\n'), ((8335, 8354), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (8343, 8354), True, 'import numpy as np\n'), ((16602, 16648), 'torch.norm', 'torch.norm', (['(predicted_latent - latent_next_obs)'], {}), '(predicted_latent - latent_next_obs)\n', (16612, 16648), False, 'import torch\n'), ((16877, 16917), 'torch.norm', 'torch.norm', (['(latent_obs - latent_next_obs)'], {}), '(latent_obs - latent_next_obs)\n', (16887, 16917), False, 'import torch\n'), ((8250, 8271), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['latent'], {}), '(latent)\n', (8263, 8271), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((8274, 8300), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['latent_goal'], {}), '(latent_goal)\n', (8287, 8300), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((13229, 13288), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['(1)', 'self.input_channels', 'self.imsize', 'self.imsize'], {}), '(1, self.input_channels, self.imsize, self.imsize)\n', (13238, 13288), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((13690, 13718), 'torch.cat', 'torch.cat', (['[pos, obj]'], {'dim': '(1)'}), '([pos, obj], dim=1)\n', (13699, 13718), False, 'import torch\n')] |
import numpy as np
import torch.utils.data
def qm9_collate_batch(batch):
#print(batch)
drug1, drug2, label1, label2 = list(zip(*batch))
ddi_idxs1, ddi_idxs2 = collate_drug_pairs(drug1, drug2)
drug1 = (*collate_drugs(drug1), *ddi_idxs1)
drug2 = (*collate_drugs(drug2), *ddi_idxs2)
label1 = collate_labels(label1)
label2 = collate_labels(label2)
return (*drug1, *drug2, label1, label2)
def collate_drug_pairs(drugs1, drugs2):
n_atom1 = [d['n_atom'] for d in drugs1]
n_atom2 = [d['n_atom'] for d in drugs2]
c_atom1 = [sum(n_atom1[:k]) for k in range(len(n_atom1))]
c_atom2 = [sum(n_atom2[:k]) for k in range(len(n_atom2))]
ddi_seg_i1, ddi_seg_i2, ddi_idx_j1, ddi_idx_j2 = zip(*[
(i1 + c1, i2 + c2, i2, i1)
for l1, l2, c1, c2 in zip(n_atom1, n_atom2, c_atom1, c_atom2)
for i1 in range(l1) for i2 in range(l2)])
ddi_seg_i1 = torch.LongTensor(ddi_seg_i1)
ddi_idx_j1 = torch.LongTensor(ddi_idx_j1)
ddi_seg_i2 = torch.LongTensor(ddi_seg_i2)
ddi_idx_j2 = torch.LongTensor(ddi_idx_j2)
return (ddi_seg_i1, ddi_idx_j1), (ddi_seg_i2, ddi_idx_j2)
def collate_labels(labels):
concat_labels = torch.Tensor(np.stack(labels))
return concat_labels
def collate_drugs(drugs):
c_atoms = [sum(d['n_atom'] for d in drugs[:k]) for k in range(len(drugs))]
atom_feat = torch.FloatTensor(np.vstack([d['atom_feat'] for d in drugs]))
atom_type = torch.LongTensor(np.hstack([d['atom_type'] for d in drugs]))
bond_type = torch.LongTensor(np.hstack([d['bond_type'] for d in drugs]))
bond_seg_i = torch.LongTensor(np.hstack([
np.array(d['bond_seg_i']) + c for d, c in zip(drugs, c_atoms)]))
bond_idx_j = torch.LongTensor(np.hstack([
np.array(d['bond_idx_j']) + c for d, c in zip(drugs, c_atoms)]))
batch_seg_m = torch.LongTensor(np.hstack([
[k] * d['n_atom'] for k, d in enumerate(drugs)]))
return batch_seg_m, atom_type, atom_feat, bond_type, bond_seg_i, bond_idx_j
class QM9Dataset(torch.utils.data.Dataset):
def __init__(
self,
graph_dict,
pairs_dataset=None):
assert pairs_dataset
self.graph_dict = graph_dict
self.graph_idx_list = list(graph_dict.keys())
self.feeding_insts = pairs_dataset
def prepare_feeding_insts(self):
# Could add optimizing logic here
return
def __len__(self):
return len(self.feeding_insts)
def __getitem__(self, idx):
instance = self.feeding_insts[idx]
# drug lookup
instance = self.drug_structure_lookup(instance)
return instance
def drug_structure_lookup(self, instance):
drug_idx1, drug_idx2, label1, label2 = instance
drug1 = self.graph_dict[str(drug_idx1)]
drug2 = self.graph_dict[str(drug_idx2)]
return drug1, drug2, label1, label2
| [
"numpy.stack",
"numpy.array",
"numpy.vstack",
"numpy.hstack"
] | [((1128, 1144), 'numpy.stack', 'np.stack', (['labels'], {}), '(labels)\n', (1136, 1144), True, 'import numpy as np\n'), ((1304, 1346), 'numpy.vstack', 'np.vstack', (["[d['atom_feat'] for d in drugs]"], {}), "([d['atom_feat'] for d in drugs])\n", (1313, 1346), True, 'import numpy as np\n'), ((1378, 1420), 'numpy.hstack', 'np.hstack', (["[d['atom_type'] for d in drugs]"], {}), "([d['atom_type'] for d in drugs])\n", (1387, 1420), True, 'import numpy as np\n'), ((1452, 1494), 'numpy.hstack', 'np.hstack', (["[d['bond_type'] for d in drugs]"], {}), "([d['bond_type'] for d in drugs])\n", (1461, 1494), True, 'import numpy as np\n'), ((1541, 1566), 'numpy.array', 'np.array', (["d['bond_seg_i']"], {}), "(d['bond_seg_i'])\n", (1549, 1566), True, 'import numpy as np\n'), ((1651, 1676), 'numpy.array', 'np.array', (["d['bond_idx_j']"], {}), "(d['bond_idx_j'])\n", (1659, 1676), True, 'import numpy as np\n')] |
import gtimer as gt
from rlkit.core import logger
from ROLL.online_LSTM_replay_buffer import OnlineLSTMRelabelingBuffer
import rlkit.torch.vae.vae_schedules as vae_schedules
import ROLL.LSTM_schedule as lstm_schedules
from rlkit.torch.torch_rl_algorithm import (
TorchBatchRLAlgorithm,
)
import rlkit.torch.pytorch_util as ptu
from torch.multiprocessing import Process, Pipe
from threading import Thread
from test_latent_space.test_LSTM import compare_latent_distance
from test_latent_space.test_LSTM2 import test_lstm_traj
from test_latent_space.test_masked_traj import test_masked_traj_lstm
import os
import os.path as osp
import numpy as np
from multiworld.core.image_env import unormalize_image, normalize_image
class OnlineLSTMAlgorithm(TorchBatchRLAlgorithm):
def __init__(
self,
env_id,
vae_original,
lstm_segmented,
vae_trainer_original,
lstm_trainer_segmented,
*base_args,
vae_save_period=1,
lstm_save_period=1,
vae_training_schedule=vae_schedules.never_train,
lstm_training_schedule=lstm_schedules.never_train,
lstm_test_N=500,
lstm_segmentation_method='color',
oracle_data=False,
parallel_vae_train=False,
vae_min_num_steps_before_training=0,
uniform_dataset=None,
keep_train_segmentation_lstm=False,
keep_train_original_vae=True,
**base_kwargs
):
super().__init__(*base_args, **base_kwargs)
assert isinstance(self.replay_buffer, OnlineLSTMRelabelingBuffer)
self.vae_original = vae_original
self.lstm_segmented = lstm_segmented
self.vae_trainer_original = vae_trainer_original
self.lstm_trainer_segmented = lstm_trainer_segmented
self.vae_trainer_original.model = self.vae_original
self.lstm_trainer_segmented.model = self.lstm_segmented
self.vae_save_period = vae_save_period
self.lstm_save_period = lstm_save_period
self.vae_training_schedule = vae_training_schedule
self.lstm_training_schedule = lstm_training_schedule
self.oracle_data = oracle_data
self.parallel_vae_train = parallel_vae_train
self.vae_min_num_steps_before_training = vae_min_num_steps_before_training
self.uniform_dataset = uniform_dataset
self._vae_training_process = None
self._update_subprocess_vae_thread = None
self._vae_conn_pipe = None
self.keep_train_segmentation_lstm = keep_train_segmentation_lstm
self.keep_train_original_vae = keep_train_original_vae
# below is just used for testing the segmentation vae.
self.env_id = env_id
self.lstm_test_N = lstm_test_N
self.lstm_segmentation_method = lstm_segmentation_method
def _train(self):
super()._train()
self._cleanup()
def _end_epoch(self, epoch):
# self.check_replay_buffer()
self._train_vae(epoch)
gt.stamp('vae training')
super()._end_epoch(epoch)
def _log_stats(self, epoch):
self._log_vae_stats()
super()._log_stats(epoch)
def to(self, device):
self.vae_original.to(device)
self.lstm_segmented.to(device)
super().to(device)
def _get_snapshot(self):
snapshot = super()._get_snapshot()
assert 'vae' not in snapshot
snapshot['vae_original'] = self.vae_original
snapshot['lstm_segmented'] = self.lstm_segmented
return snapshot
"""
debug code
"""
def check_replay_buffer(self):
batch = self.replay_buffer.random_batch(
self.batch_size)
rewards = batch['rewards']
terminals = batch['terminals']
obs = batch['observations']
actions = batch['actions']
next_obs = batch['next_observations']
goals = batch['resampled_goals']
print("obs: ", type(obs))
print("obs shape: ", obs.shape)
decoded_obs = self.eval_env._decode(obs, self.eval_env.vae_original)
for idx in range(10):
self.eval_env.show_obs(decoded_obs[idx], "sac policy obs")
print("next_obs: ", type(next_obs))
print("next obs shape: ", next_obs.shape)
decoded_next_obs = self.eval_env._decode(next_obs, self.eval_env.vae_original)
for idx in range(10):
self.eval_env.show_obs(decoded_next_obs[idx], "sac policy next_obs")
decoded_goal = self.eval_env._decode(goals, self.eval_env.lstm_segmented)
for idx in range(10):
self.eval_env.show_obs(decoded_goal[idx], "sac policy goal")
"""
VAE-specific Code
"""
def _train_vae(self, epoch):
if self.parallel_vae_train and self._vae_training_process is None:
self.init_vae_training_subprocess()
should_train, amount_to_train = self.vae_training_schedule(epoch)
_, lstm_amount_to_train = self.lstm_training_schedule(epoch)
rl_start_epoch = int(self.min_num_steps_before_training / (
self.num_expl_steps_per_train_loop * self.num_train_loops_per_epoch
))
print(" _train_vae called, should_train, amount_to_train", should_train, amount_to_train)
if should_train or epoch <= (rl_start_epoch - 1):
if self.parallel_vae_train:
assert self._vae_training_process.is_alive()
# Make sure the last vae update has finished before starting
# another one
if self._update_subprocess_vae_thread is not None:
self._update_subprocess_vae_thread.join()
self._update_subprocess_vae_thread = Thread(
target=OnlineVaeAlgorithmSegmented.update_vae_in_training_subprocess,
args=(self, epoch, ptu.device)
)
self._update_subprocess_vae_thread.start()
self._vae_conn_pipe.send((amount_to_train, epoch))
else:
if self.keep_train_original_vae:
_train_vae(
self.vae_trainer_original,
self.replay_buffer,
epoch,
amount_to_train,
key='image_observation'
)
_test_vae(
self.vae_trainer_original,
epoch,
self.replay_buffer,
vae_save_period=self.vae_save_period,
uniform_dataset=self.uniform_dataset,
save_prefix='r_original_'
)
if self.keep_train_segmentation_lstm:
_train_lstm(
lstm_trainer=self.lstm_trainer_segmented,
replay_buffer=self.replay_buffer,
epoch=epoch,
batches=lstm_amount_to_train,
oracle_data=False,
key='image_observation_segmented'
)
_test_lstm(
lstm_trainer=self.lstm_trainer_segmented,
epoch=epoch,
replay_buffer=self.replay_buffer,
env_id=self.env_id,
lstm_save_period=self.lstm_save_period,
uniform_dataset=None,
save_prefix='r_lstm_' ,
lstm_test_N=self.lstm_test_N,
lstm_segmentation_method=self.lstm_segmentation_method
)
# we only refresh goals if the segmentation lstm (used for goal sampling) has changed
self.replay_buffer.refresh_latents(epoch, refresh_goals=self.keep_train_segmentation_lstm)
def _log_vae_stats(self):
logger.record_dict(
self.vae_trainer_original.get_diagnostics(),
prefix='vae_trainer_original/',
)
logger.record_dict(
self.lstm_trainer_segmented.get_diagnostics(),
prefix='lstm_trainer_segmented/',
)
def _cleanup(self):
if self.parallel_vae_train:
self._vae_conn_pipe.close()
self._vae_training_process.terminate()
def init_vae_training_subprocess(self):
self._vae_conn_pipe, process_pipe = Pipe()
self._vae_training_process = Process(
target=subprocess_train_vae_loop,
args=(
process_pipe,
self.vae,
self.vae.state_dict(),
self.replay_buffer,
self.replay_buffer.get_mp_info(),
ptu.device,
)
)
self._vae_training_process.start()
self._vae_conn_pipe.send(self.vae_trainer)
def update_vae_in_training_subprocess(self, epoch, device):
self.vae.__setstate__(self._vae_conn_pipe.recv())
self.vae.to(device)
_test_vae(
self.vae_trainer,
epoch,
self.replay_buffer,
vae_save_period=self.vae_save_period,
uniform_dataset=self.uniform_dataset,
)
def _train_vae(vae_trainer, replay_buffer, epoch, batches=50, oracle_data=False, key='image_observation'):
batch_sampler = replay_buffer.random_vae_training_data
if oracle_data:
batch_sampler = None
vae_trainer.train_epoch(
epoch,
sample_batch=batch_sampler,
batches=batches,
from_rl=True,
key=key,
)
def _train_lstm(lstm_trainer, replay_buffer, epoch, batches=50, oracle_data=False, key='image_observation_segmented'):
batch_sampler = replay_buffer.random_lstm_training_data
if oracle_data:
batch_sampler = None
lstm_trainer.train_epoch(
epoch,
sample_batch=batch_sampler,
batches=batches,
from_rl=True,
key=key,
)
def _test_vae(vae_trainer, epoch, replay_buffer, vae_save_period=1, uniform_dataset=None, save_prefix='r'):
save_imgs = epoch % vae_save_period == 0
log_fit_skew_stats = replay_buffer._prioritize_vae_samples and uniform_dataset is not None
if uniform_dataset is not None:
replay_buffer.log_loss_under_uniform(uniform_dataset, vae_trainer.batch_size, rl_logger=vae_trainer.vae_logger_stats_for_rl)
vae_trainer.test_epoch(
epoch,
from_rl=True,
save_reconstruction=save_imgs,
save_prefix=save_prefix
)
if save_imgs:
sample_save_prefix = save_prefix.replace('r', 's')
vae_trainer.dump_samples(epoch, save_prefix=sample_save_prefix)
if log_fit_skew_stats:
replay_buffer.dump_best_reconstruction(epoch)
replay_buffer.dump_worst_reconstruction(epoch)
replay_buffer.dump_sampling_histogram(epoch, batch_size=vae_trainer.batch_size)
if uniform_dataset is not None:
replay_buffer.dump_uniform_imgs_and_reconstructions(dataset=uniform_dataset, epoch=epoch)
def _test_lstm(lstm_trainer, epoch, replay_buffer, env_id, lstm_save_period=1, uniform_dataset=None,
save_prefix='r', lstm_segmentation_method='color', lstm_test_N=500, key='image_observation_segmented'):
batch_sampler = replay_buffer.random_lstm_training_data
save_imgs = epoch % lstm_save_period == 0
log_fit_skew_stats = replay_buffer._prioritize_vae_samples and uniform_dataset is not None
if uniform_dataset is not None:
replay_buffer.log_loss_under_uniform(uniform_dataset, lstm_trainer.batch_size, rl_logger=lstm_trainer.vae_logger_stats_for_rl)
lstm_trainer.test_epoch(
epoch,
from_rl=True,
key=key,
sample_batch=batch_sampler,
save_reconstruction=save_imgs,
save_prefix=save_prefix
)
if save_imgs:
sample_save_prefix = save_prefix.replace('r', 's')
lstm_trainer.dump_samples(epoch, save_prefix=sample_save_prefix)
if log_fit_skew_stats:
replay_buffer.dump_best_reconstruction(epoch)
replay_buffer.dump_worst_reconstruction(epoch)
replay_buffer.dump_sampling_histogram(epoch, batch_size=lstm_trainer.batch_size)
if uniform_dataset is not None:
replay_buffer.dump_uniform_imgs_and_reconstructions(dataset=uniform_dataset, epoch=epoch)
m = lstm_trainer.model
pjhome = os.environ['PJHOME']
seg_name = 'seg-' + 'color'
if env_id in ['SawyerPushNIPSEasy-v0', 'SawyerPushHurdle-v0', 'SawyerPushHurdleMiddle-v0']:
N = 500
data_file_path = osp.join(pjhome, 'data/local/pre-train-lstm', '{}-{}-{}-0.3-0.5.npy'.format(env_id, seg_name, N))
puck_pos_path = osp.join(pjhome, 'data/local/pre-train-lstm', '{}-{}-{}-0.3-0.5-puck-pos.npy'.format(env_id, seg_name, N))
if osp.exists(data_file_path):
all_data = np.load(data_file_path)
puck_pos = np.load(puck_pos_path)
all_data = normalize_image(all_data.copy())
compare_latent_distance(m, all_data, puck_pos, save_dir=logger.get_snapshot_dir(), obj_name='puck',
save_name='online_lstm_latent_distance_{}.png'.format(epoch))
elif env_id == 'SawyerDoorHookResetFreeEnv-v1':
N = 1000
seg_name = 'seg-' + 'unet'
data_file_path = osp.join(pjhome, 'data/local/pre-train-lstm', 'vae-only-{}-{}-{}-0-0.npy'.format(env_id, seg_name, N))
door_angle_path = osp.join(pjhome, 'data/local/pre-train-lstm', 'vae-only-{}-{}-{}-0-0-door-angle.npy'.format(env_id, seg_name, N))
if osp.exists(data_file_path):
all_data = np.load(data_file_path)
door_angle = np.load(door_angle_path)
all_data = normalize_image(all_data.copy())
compare_latent_distance(m, all_data, door_angle, save_dir=logger.get_snapshot_dir(), obj_name='door',
save_name='online_lstm_latent_distance_{}.png'.format(epoch))
elif env_id == 'SawyerPushHurdleResetFreeEnv-v0':
N = 2000
data_file_path = osp.join(pjhome, 'data/local/pre-train-lstm', 'vae-only-{}-{}-{}-0.3-0.5.npy'.format(env_id, seg_name, N))
puck_pos_path = osp.join(pjhome, 'data/local/pre-train-lstm', 'vae-only-{}-{}-{}-0.3-0.5-puck-pos.npy'.format(env_id, seg_name, N))
if osp.exists(data_file_path):
all_data = np.load(data_file_path)
puck_pos = np.load(puck_pos_path)
all_data = normalize_image(all_data.copy())
compare_latent_distance(m, all_data, puck_pos, save_dir=logger.get_snapshot_dir(), obj_name='puck',
save_name='online_lstm_latent_distance_{}.png'.format(epoch))
test_lstm_traj(env_id, m, save_path=logger.get_snapshot_dir(),
save_name='online_lstm_test_traj_{}.png'.format(epoch))
test_masked_traj_lstm(env_id, m, save_dir=logger.get_snapshot_dir(),
save_name='online_masked_test_{}.png'.format(epoch))
def subprocess_train_vae_loop(
conn_pipe,
vae,
vae_params,
replay_buffer,
mp_info,
device,
):
"""
The observations and next_observations of the replay buffer are stored in
shared memory. This loop waits until the parent signals to start vae
training, trains and sends the vae back, and then refreshes the latents.
Refreshing latents in the subprocess reflects in the main process as well
since the latents are in shared memory. Since this is does asynchronously,
it is possible for the main process to see half the latents updated and half
not.
"""
ptu.device = device
vae_trainer = conn_pipe.recv()
vae.load_state_dict(vae_params)
vae.to(device)
vae_trainer.set_vae(vae)
replay_buffer.init_from_mp_info(mp_info)
replay_buffer.env.vae = vae
while True:
amount_to_train, epoch = conn_pipe.recv()
_train_vae(vae_trainer, replay_buffer, epoch, amount_to_train)
conn_pipe.send(vae_trainer.model.__getstate__())
replay_buffer.refresh_latents(epoch)
| [
"os.path.exists",
"torch.multiprocessing.Pipe",
"threading.Thread",
"numpy.load",
"gtimer.stamp",
"rlkit.core.logger.get_snapshot_dir"
] | [((3062, 3086), 'gtimer.stamp', 'gt.stamp', (['"""vae training"""'], {}), "('vae training')\n", (3070, 3086), True, 'import gtimer as gt\n'), ((8488, 8494), 'torch.multiprocessing.Pipe', 'Pipe', ([], {}), '()\n', (8492, 8494), False, 'from torch.multiprocessing import Process, Pipe\n'), ((12963, 12989), 'os.path.exists', 'osp.exists', (['data_file_path'], {}), '(data_file_path)\n', (12973, 12989), True, 'import os.path as osp\n'), ((5763, 5875), 'threading.Thread', 'Thread', ([], {'target': 'OnlineVaeAlgorithmSegmented.update_vae_in_training_subprocess', 'args': '(self, epoch, ptu.device)'}), '(target=OnlineVaeAlgorithmSegmented.update_vae_in_training_subprocess,\n args=(self, epoch, ptu.device))\n', (5769, 5875), False, 'from threading import Thread\n'), ((13018, 13041), 'numpy.load', 'np.load', (['data_file_path'], {}), '(data_file_path)\n', (13025, 13041), True, 'import numpy as np\n'), ((13069, 13091), 'numpy.load', 'np.load', (['puck_pos_path'], {}), '(puck_pos_path)\n', (13076, 13091), True, 'import numpy as np\n'), ((13758, 13784), 'os.path.exists', 'osp.exists', (['data_file_path'], {}), '(data_file_path)\n', (13768, 13784), True, 'import os.path as osp\n'), ((14957, 14982), 'rlkit.core.logger.get_snapshot_dir', 'logger.get_snapshot_dir', ([], {}), '()\n', (14980, 14982), False, 'from rlkit.core import logger\n'), ((15103, 15128), 'rlkit.core.logger.get_snapshot_dir', 'logger.get_snapshot_dir', ([], {}), '()\n', (15126, 15128), False, 'from rlkit.core import logger\n'), ((13813, 13836), 'numpy.load', 'np.load', (['data_file_path'], {}), '(data_file_path)\n', (13820, 13836), True, 'import numpy as np\n'), ((13866, 13890), 'numpy.load', 'np.load', (['door_angle_path'], {}), '(door_angle_path)\n', (13873, 13890), True, 'import numpy as np\n'), ((14525, 14551), 'os.path.exists', 'osp.exists', (['data_file_path'], {}), '(data_file_path)\n', (14535, 14551), True, 'import os.path as osp\n'), ((13224, 13249), 'rlkit.core.logger.get_snapshot_dir', 'logger.get_snapshot_dir', ([], {}), '()\n', (13247, 13249), False, 'from rlkit.core import logger\n'), ((14580, 14603), 'numpy.load', 'np.load', (['data_file_path'], {}), '(data_file_path)\n', (14587, 14603), True, 'import numpy as np\n'), ((14631, 14653), 'numpy.load', 'np.load', (['puck_pos_path'], {}), '(puck_pos_path)\n', (14638, 14653), True, 'import numpy as np\n'), ((14025, 14050), 'rlkit.core.logger.get_snapshot_dir', 'logger.get_snapshot_dir', ([], {}), '()\n', (14048, 14050), False, 'from rlkit.core import logger\n'), ((14786, 14811), 'rlkit.core.logger.get_snapshot_dir', 'logger.get_snapshot_dir', ([], {}), '()\n', (14809, 14811), False, 'from rlkit.core import logger\n')] |
#!/home/hiroya/Documents/Git-Repos/Lets_Play_Your_Waveform/.venv/bin/python
# -*- coding: utf-8 -*-
import cv2
import sys
import struct
import pyaudio
import pygame
import numpy as np
from matplotlib import pyplot
import matplotlib.gridspec as gridspec
from pygame.locals import K_s, K_d, K_f, K_g, K_h, K_j, K_k, K_l
from pygame.locals import KEYDOWN
from pygame.locals import K_ESCAPE
from pygame.locals import QUIT
from pygame.locals import K_q
import nlbcfg as cfg
DISPLAY_SIZE = (525, 210)
DISPNAY_CAPTION = "KEY BOARD"
WINDOW_NAME_LOADED_IMAGE = "LOADED IMAGE"
# 再生時間
PLAY_LENGTH = 1.0
# ドレミファソラシド
FREQUENCY_LIST = [262, 294, 330, 349, 392, 440, 494, 523]
# SDFGHJKL
KEY_LIST = [K_s, K_d, K_f, K_g, K_h, K_j, K_k, K_l]
def main():
"""
エントリポイント
"""
SELECTED_IMAGE_COUNT = 1
startup_player(SELECTED_IMAGE_COUNT)
def startup_player(SELECTED_IMAGE_COUNT):
"""
音声を再生するためのプレイヤーを立ち上げる
:param SELECTED_IMAGE_COUNT: int
プレイヤーに読み込ませる画像の番号
ex) IMG_TRIM_10.png --> SELECTED_IMAGE_COUNT = 10
"""
def load_image():
"""
画像を読み込む
:return loaded_image: np.ndarray
読み込んだ画像データ
"""
loaded_image = cv2.imread(
cfg.SAVE_TARGET_PATH
+ cfg.SAVE_IMAGE_NAME
+ str(SELECTED_IMAGE_COUNT)
+ ".png",
cv2.IMREAD_UNCHANGED,
)
# 画像ファイルの読み込みに失敗したらエラー終了
if loaded_image is None:
print("Failed to load image file.")
sys.exit(1)
else:
print("Success load image file.")
return loaded_image
def detect_waveform():
"""
読み込んだ画像から波形データを作成する
:return waveform_data: list
作成した波形データ
"""
waveform_data = []
for col in range(LOADED_IMAGE_WIDTH - 1):
one_column_data = loaded_image[:, col]
index_having_zero_data = np.where(one_column_data == 0)
# 0のインデックスがなかったとき
if index_having_zero_data[0].size == 0:
waveform_data.append(0)
else:
# 一番最初に見つかったデータだけ利用する
value = index_having_zero_data[0].item(0)
# 最大値 1 最小値 -1 に正規化
normalized_data = 2 * value / LOADED_IMAGE_HEIGHT - 1
# 範囲外になってしまうときは調整
if normalized_data > 1.0:
normalized_data = 1.0
elif normalized_data < -1.0:
normalized_data = -1.0
data_inverted_up_and_down = normalized_data * -1
waveform_data.append(data_inverted_up_and_down)
return waveform_data
def calculate_fft(waveform_data, fs):
"""
docstring here
:param waveform_data: list
FFTの計算に用いるデータ
:param fs: int
FFTの計算に用いるサンプリング周波数
:return fft_freq_list: np.array
FFTの計算で得た周波数のリスト 横軸
:return window_amp: np.array
FFTの計算で得た振幅のリスト 縦軸
"""
# サンプリングする開始位置
SAMPLING_START_INDEX = 0
N_USE_IN_FFT = 2048
hamming_window = np.hamming(N_USE_IN_FFT) # ハミング窓
# 切り出した波形データに窓関数をかける
windowed_data = (
hamming_window
* waveform_data[SAMPLING_START_INDEX : SAMPLING_START_INDEX + N_USE_IN_FFT]
)
windowed_dft = np.fft.fft(windowed_data)
fft_freq_list = np.fft.fftfreq(N_USE_IN_FFT, d=1.0 / fs)
windowed_amp = [np.sqrt(c.real ** 2 + c.imag ** 2) for c in windowed_dft]
return fft_freq_list, windowed_amp
def show_waveform_information():
"""
読み込んだ画像データ
作成した波形データ
FFTの計算結果
を表示する
"""
pyplot.figure(figsize=(10, 8))
grid_spec = gridspec.GridSpec(2, 2)
plt_loaded_image = pyplot.subplot(grid_spec[0, :])
plt_waveform = pyplot.subplot(grid_spec[1, 0])
plt_fft = pyplot.subplot(grid_spec[1, 1])
plt_loaded_image.set_title("LOADED IMAGE")
plt_loaded_image.imshow(loaded_image, pyplot.cm.gray)
plt_waveform.set_title("WAVEFORM_DATA")
plt_waveform.plot(waveform_data)
plt_waveform.set_xlabel("time [sample]")
plt_waveform.set_ylabel("amplitude")
plt_fft.set_title("FFT_RESULT")
plt_fft.plot(fft_freq_list, windowed_amp, linestyle="-")
plt_fft.set_xlim(0, 5000)
plt_fft.set_ylim(0, 200)
plt_fft.set_xlabel("frequency [Hz]")
plt_fft.set_ylabel("amplitude spectrum")
pyplot.pause(1)
def get_index_input_key():
"""
入力されたキーが登録されているリストのどこにあるか調べて返す
:return i: int
リストのインデックス
見つからなかったら-1を返す
"""
pygame.event.pump()
pressed_keys = pygame.key.get_pressed()
for i, key in enumerate(KEY_LIST):
if pressed_keys[key]:
return i
# キー入力がなかったら-1を返す
return -1
def input_event_handling():
"""
ウィンドウのxボタン、Q、ESCキーの入力を処理
どれも入力があれば終了フラグを立てる
"""
nonlocal isEnd
for event in pygame.event.get():
# ウィンドウのxボタン
isEnd = event.type == QUIT
if event.type == KEYDOWN:
if event.key == K_ESCAPE or event.key == K_q:
isEnd = True
def highlight_input_keyboard(index):
"""
受け取ったインデックスに対応する鍵盤を青色表示する
:param index: 押されたキーのインデックス
"""
KEY_WIDTH = 60
KEY_HEIGHT = 200
KEY_OFFSET = 5
for i, key in enumerate(KEY_LIST):
if index == i:
# 白鍵盤を青くする
pygame.draw.rect(
screen,
cfg.COLER_BLUE,
pygame.Rect(
(KEY_OFFSET + KEY_WIDTH) * i + KEY_OFFSET,
KEY_OFFSET,
KEY_WIDTH,
KEY_HEIGHT,
),
)
else:
pygame.draw.rect(
screen,
cfg.COLER_WHITE,
pygame.Rect(
(KEY_OFFSET + KEY_WIDTH) * i + KEY_OFFSET,
KEY_OFFSET,
KEY_WIDTH,
KEY_HEIGHT,
),
)
# 黒鍵盤
for i in range(8):
# 表示しない位置
if i == 2 or i == 6:
continue
pygame.draw.rect(
screen,
cfg.COLER_BLACK,
pygame.Rect(
(KEY_OFFSET + KEY_WIDTH) * i + KEY_OFFSET + KEY_WIDTH / 2,
KEY_OFFSET,
KEY_WIDTH,
KEY_HEIGHT / 2,
),
)
# 画面更新
pygame.display.update()
def play_waveform(original_waveform_data, FREQUENCY):
"""
波形データを指定された周波数で再生する
:param original_waveform_data: 再生する波形データ
:param FREQUENCY: 再生する周波数
"""
waveform_data_for_calculation = []
SAMPLING_FREQUENCY = LOADED_IMAGE_WIDTH * FREQUENCY
COPYING_TIMES = FREQUENCY * PLAY_LENGTH
for i in range(int(COPYING_TIMES)):
waveform_data_for_calculation.extend(original_waveform_data)
waveform_data_for_calculation = [
int(x * 32767.0) for x in waveform_data_for_calculation
]
audio_data = struct.pack(
"h" * len(waveform_data_for_calculation), *waveform_data_for_calculation
)
p = pyaudio.PyAudio()
stream = p.open(
format=pyaudio.paInt16,
channels=1,
rate=int(SAMPLING_FREQUENCY),
output=True,
)
# チャンク単位でストリームに出力し音声を再生
chunk = 1024
start_pointer = 0 # 再生位置ポインタ
buffer = audio_data[start_pointer : start_pointer + chunk]
while buffer != b"":
stream.write(buffer)
start_pointer = start_pointer + chunk
buffer = audio_data[start_pointer : start_pointer + chunk]
stream.close()
p.terminate()
isEnd = False
# 2回連続で再生されるのを防ぐ
previous_key_input_index = -1
loaded_image = load_image()
LOADED_IMAGE_HEIGHT, LOADED_IMAGE_WIDTH = loaded_image.shape[:2]
waveform_data = detect_waveform()
# その波形を500HzとしてFFTを計算
# sampling_frequency == width なら1Hz
SAMPLING_FREQUENCY = LOADED_IMAGE_WIDTH * 500
COPYING_TIMES = SAMPLING_FREQUENCY * PLAY_LENGTH
waveform_data_for_calculation = []
for i in range(int(COPYING_TIMES)):
waveform_data_for_calculation.extend(waveform_data)
fft_freq_list, windowed_amp = calculate_fft(
waveform_data_for_calculation, SAMPLING_FREQUENCY
)
show_waveform_information()
pygame.init()
screen = pygame.display.set_mode(DISPLAY_SIZE)
pygame.display.set_caption(DISPNAY_CAPTION)
while not isEnd:
index_input_key = get_index_input_key()
input_event_handling()
highlight_input_keyboard(index_input_key)
# キー入力がなければ再生はしない
if index_input_key == -1 or previous_key_input_index == index_input_key:
previous_key_input_index = index_input_key
continue
previous_key_input_index = index_input_key
PLAY_FREQUENCY = FREQUENCY_LIST[index_input_key]
play_waveform(waveform_data, PLAY_FREQUENCY)
pygame.quit()
pyplot.close()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| [
"numpy.sqrt",
"pygame.init",
"pygame.quit",
"cv2.destroyAllWindows",
"sys.exit",
"pygame.event.pump",
"numpy.where",
"pygame.display.set_mode",
"numpy.fft.fft",
"matplotlib.pyplot.close",
"matplotlib.gridspec.GridSpec",
"pygame.display.update",
"pygame.Rect",
"numpy.hamming",
"matplotlib... | [((8863, 8876), 'pygame.init', 'pygame.init', ([], {}), '()\n', (8874, 8876), False, 'import pygame\n'), ((8890, 8927), 'pygame.display.set_mode', 'pygame.display.set_mode', (['DISPLAY_SIZE'], {}), '(DISPLAY_SIZE)\n', (8913, 8927), False, 'import pygame\n'), ((8932, 8975), 'pygame.display.set_caption', 'pygame.display.set_caption', (['DISPNAY_CAPTION'], {}), '(DISPNAY_CAPTION)\n', (8958, 8975), False, 'import pygame\n'), ((9477, 9490), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (9488, 9490), False, 'import pygame\n'), ((9495, 9509), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (9507, 9509), False, 'from matplotlib import pyplot\n'), ((9514, 9537), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (9535, 9537), False, 'import cv2\n'), ((3174, 3198), 'numpy.hamming', 'np.hamming', (['N_USE_IN_FFT'], {}), '(N_USE_IN_FFT)\n', (3184, 3198), True, 'import numpy as np\n'), ((3411, 3436), 'numpy.fft.fft', 'np.fft.fft', (['windowed_data'], {}), '(windowed_data)\n', (3421, 3436), True, 'import numpy as np\n'), ((3461, 3501), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['N_USE_IN_FFT'], {'d': '(1.0 / fs)'}), '(N_USE_IN_FFT, d=1.0 / fs)\n', (3475, 3501), True, 'import numpy as np\n'), ((3766, 3796), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (3779, 3796), False, 'from matplotlib import pyplot\n'), ((3817, 3840), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(2)'], {}), '(2, 2)\n', (3834, 3840), True, 'import matplotlib.gridspec as gridspec\n'), ((3868, 3899), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['grid_spec[0, :]'], {}), '(grid_spec[0, :])\n', (3882, 3899), False, 'from matplotlib import pyplot\n'), ((3923, 3954), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['grid_spec[1, 0]'], {}), '(grid_spec[1, 0])\n', (3937, 3954), False, 'from matplotlib import pyplot\n'), ((3973, 4004), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['grid_spec[1, 1]'], {}), '(grid_spec[1, 1])\n', (3987, 4004), False, 'from matplotlib import pyplot\n'), ((4578, 4593), 'matplotlib.pyplot.pause', 'pyplot.pause', (['(1)'], {}), '(1)\n', (4590, 4593), False, 'from matplotlib import pyplot\n'), ((4783, 4802), 'pygame.event.pump', 'pygame.event.pump', ([], {}), '()\n', (4800, 4802), False, 'import pygame\n'), ((4826, 4850), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (4848, 4850), False, 'import pygame\n'), ((5158, 5176), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (5174, 5176), False, 'import pygame\n'), ((6867, 6890), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (6888, 6890), False, 'import pygame\n'), ((7623, 7640), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (7638, 7640), False, 'import pyaudio\n'), ((1534, 1545), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1542, 1545), False, 'import sys\n'), ((1946, 1976), 'numpy.where', 'np.where', (['(one_column_data == 0)'], {}), '(one_column_data == 0)\n', (1954, 1976), True, 'import numpy as np\n'), ((3526, 3560), 'numpy.sqrt', 'np.sqrt', (['(c.real ** 2 + c.imag ** 2)'], {}), '(c.real ** 2 + c.imag ** 2)\n', (3533, 3560), True, 'import numpy as np\n'), ((6619, 6732), 'pygame.Rect', 'pygame.Rect', (['((KEY_OFFSET + KEY_WIDTH) * i + KEY_OFFSET + KEY_WIDTH / 2)', 'KEY_OFFSET', 'KEY_WIDTH', '(KEY_HEIGHT / 2)'], {}), '((KEY_OFFSET + KEY_WIDTH) * i + KEY_OFFSET + KEY_WIDTH / 2,\n KEY_OFFSET, KEY_WIDTH, KEY_HEIGHT / 2)\n', (6630, 6732), False, 'import pygame\n'), ((5801, 5894), 'pygame.Rect', 'pygame.Rect', (['((KEY_OFFSET + KEY_WIDTH) * i + KEY_OFFSET)', 'KEY_OFFSET', 'KEY_WIDTH', 'KEY_HEIGHT'], {}), '((KEY_OFFSET + KEY_WIDTH) * i + KEY_OFFSET, KEY_OFFSET,\n KEY_WIDTH, KEY_HEIGHT)\n', (5812, 5894), False, 'import pygame\n'), ((6166, 6259), 'pygame.Rect', 'pygame.Rect', (['((KEY_OFFSET + KEY_WIDTH) * i + KEY_OFFSET)', 'KEY_OFFSET', 'KEY_WIDTH', 'KEY_HEIGHT'], {}), '((KEY_OFFSET + KEY_WIDTH) * i + KEY_OFFSET, KEY_OFFSET,\n KEY_WIDTH, KEY_HEIGHT)\n', (6177, 6259), False, 'import pygame\n')] |
import numpy as np
from predictions.utils.future import set_future_series
def random_forecast(series, steps_ahead=3, freq='D', series_name='random'):
"""
Function fits data into the random values within the interval given by a one standard deviation of a data.
INPUT:
:param series: pandas Series of data,
:param steps_ahead: number of steps into the future to predict, default is 3,
:param freq: (str) representation of a time frequency.
OUTPUT:
:return: series with extrapolated values
"""
_std = series.std()
_mean = series.mean()
_bottom = _mean - _std
_top = _mean + _std
preds = np.random.uniform(low=_bottom, high=_top, size=steps_ahead)
future = set_future_series(forecasted_values=preds,
series_name=series_name,
last_date=series.index[-1],
steps_ahead=steps_ahead,
frequency=freq)
return future
| [
"predictions.utils.future.set_future_series",
"numpy.random.uniform"
] | [((649, 708), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '_bottom', 'high': '_top', 'size': 'steps_ahead'}), '(low=_bottom, high=_top, size=steps_ahead)\n', (666, 708), True, 'import numpy as np\n'), ((722, 862), 'predictions.utils.future.set_future_series', 'set_future_series', ([], {'forecasted_values': 'preds', 'series_name': 'series_name', 'last_date': 'series.index[-1]', 'steps_ahead': 'steps_ahead', 'frequency': 'freq'}), '(forecasted_values=preds, series_name=series_name,\n last_date=series.index[-1], steps_ahead=steps_ahead, frequency=freq)\n', (739, 862), False, 'from predictions.utils.future import set_future_series\n')] |
from kdaHDFE.legacy.DemeanDataframe import demean_dataframe
from kdaHDFE.formula_transform import formula_transform
from kdaHDFE.legacy.OLSFixed import OLSFixed
from kdaHDFE.robust_error import robust_err
from kdaHDFE.clustering import *
from kdaHDFE.calculate_df import cal_df
from kdaHDFE.legacy.CalFullModel import cal_fullmodel
import statsmodels.api as sm
from scipy.stats import t
from scipy.stats import f
import numpy as np
import pandas as pd
import time
def ols_high_d_category(data_df, formula=None, robust=False, c_method='cgm', psdef=True, epsilon=1e-8, max_iter=1e6,
debug=False):
"""
:param data_df: Dataframe of relevant data
:type data_df: pd.DataFrame
:param formula: Formula takes the form of dependent_variable~continuous_variable|fixed_effect|clusters
:type formula: str
:param robust: bool value of whether to get a robust variance
:type robust: bool
:param c_method: method used to calculate multi-way clusters variance. Possible choices are:
- 'cgm'
- 'cgm2'
:type c_method: str
:param psdef:if True, replace negative eigenvalue of variance matrix with 0 (only in multi-way clusters variance)
:type psdef: bool
:param epsilon: tolerance of the demean process
:type epsilon: float
:param max_iter: max iteration of the demean process
:type max_iter: float
:param debug: If true then print all individual stage prints, defaults to false.
:type debug: bool
:return:params,df,bse,tvalues,pvalues,rsquared,rsquared_adj,fvalue,f_pvalue,variance_matrix,fittedvalues,resid,summary
Example
-------
y~x+x2|id+firm|id'
"""
total_start = time.time()
out_col, consist_col, category_col, cluster_col = formula_transform(formula)
consist_var = []
if len(category_col) == 0 or len(consist_col) == 0:
demeaned_df = data_df.copy()
const_consist = sm.add_constant(demeaned_df[consist_col])
consist_col = ['const'] + consist_col
demeaned_df['const'] = const_consist['const']
rank = 0
else:
for i in consist_col:
consist_var.append(i)
consist_var.append(out_col[0])
start = time.time()
demeaned_df = demean_dataframe(data_df, consist_var, category_col, epsilon, max_iter)
end = time.time()
start = time.process_time()
rank = cal_df(data_df, category_col)
end = time.process_time()
model = sm.OLS(demeaned_df[out_col], demeaned_df[consist_col])
result = model.fit()
demeaned_df['resid'] = result.resid
n = demeaned_df.shape[0]
k = len(consist_col)
f_result = OLSFixed()
f_result.out_col = out_col
f_result.consist_col = consist_col
f_result.category_col = category_col
f_result.data_df = data_df.copy()
f_result.demeaned_df = demeaned_df
f_result.params = result.params
f_result.df = result.df_resid - rank
# Now we need to update the standard errors of the OLS based on robust and clustering
if (len(cluster_col) == 0) & (robust is False):
std_error = result.bse * np.sqrt((n - k) / (n - k - rank))
covariance_matrix = result.normalized_cov_params * result.scale * result.df_resid / f_result.df
elif (len(cluster_col) == 0) & (robust is True):
covariance_matrix = robust_err(demeaned_df, consist_col, n, k, rank)
std_error = np.sqrt(np.diag(covariance_matrix))
else:
if category_col[0] == '0':
nested = False
else:
nested = is_nested(demeaned_df, category_col, cluster_col, consist_col)
covariance_matrix = clustered_error(demeaned_df, consist_col, cluster_col, n, k, rank, nested=nested,
c_method=c_method, psdef=psdef)
std_error = np.sqrt(np.diag(covariance_matrix))
f_result.bse = std_error
# print(f_result.bse)
f_result.variance_matrix = covariance_matrix
f_result.tvalues = f_result.params / f_result.bse
f_result.pvalues = pd.Series(2 * t.sf(np.abs(f_result.tvalues), f_result.df), index=list(result.params.index))
f_result.rsquared = result.rsquared
f_result.rsquared_adj = 1 - (len(data_df) - 1) / (result.df_resid - rank) * (1 - result.rsquared)
tmp1 = np.linalg.solve(f_result.variance_matrix, np.mat(f_result.params).T)
tmp2 = np.dot(np.mat(f_result.params), tmp1)
f_result.fvalue = tmp2[0, 0] / result.df_model
if len(cluster_col) > 0 and c_method == 'cgm':
f_result.f_pvalue = f.sf(f_result.fvalue, result.df_model,
min(min_clust(data_df, cluster_col) - 1, f_result.df))
f_result.f_df_proj = [result.df_model, (min(min_clust(data_df, cluster_col) - 1, f_result.df))]
else:
f_result.f_pvalue = f.sf(f_result.fvalue, result.df_model, f_result.df)
f_result.f_df_proj = [result.df_model, f_result.df]
# std err=diag( np.sqrt(result.normalized_cov_params*result.scale*result.df_resid/f_result.df) )
f_result.fittedvalues = result.fittedvalues
f_result.resid = result.resid
f_result.full_rsquared, f_result.full_rsquared_adj, f_result.full_fvalue, f_result.full_f_pvalue, f_result.f_df_full\
= cal_fullmodel(data_df, out_col, consist_col, rank, RSS=sum(result.resid ** 2))
f_result.nobs = result.nobs
f_result.yname = out_col
f_result.xname = consist_col
f_result.resid_std_err = np.sqrt(sum(result.resid ** 2) / (result.df_resid - rank))
if len(cluster_col) == 0:
f_result.cluster_method = 'no_cluster'
if robust:
f_result.Covariance_Type = 'robust'
else:
f_result.Covariance_Type = 'nonrobust'
else:
f_result.cluster_method = c_method
f_result.Covariance_Type = 'clustered'
end = time.time()
if debug:
print(f"Total {end - total_start}")
return f_result # , demeaned_df
| [
"kdaHDFE.calculate_df.cal_df",
"kdaHDFE.legacy.DemeanDataframe.demean_dataframe",
"numpy.mat",
"numpy.abs",
"numpy.sqrt",
"kdaHDFE.legacy.OLSFixed.OLSFixed",
"kdaHDFE.robust_error.robust_err",
"numpy.diag",
"statsmodels.api.add_constant",
"kdaHDFE.formula_transform.formula_transform",
"time.proc... | [((1704, 1715), 'time.time', 'time.time', ([], {}), '()\n', (1713, 1715), False, 'import time\n'), ((1771, 1797), 'kdaHDFE.formula_transform.formula_transform', 'formula_transform', (['formula'], {}), '(formula)\n', (1788, 1797), False, 'from kdaHDFE.formula_transform import formula_transform\n'), ((2485, 2539), 'statsmodels.api.OLS', 'sm.OLS', (['demeaned_df[out_col]', 'demeaned_df[consist_col]'], {}), '(demeaned_df[out_col], demeaned_df[consist_col])\n', (2491, 2539), True, 'import statsmodels.api as sm\n'), ((2674, 2684), 'kdaHDFE.legacy.OLSFixed.OLSFixed', 'OLSFixed', ([], {}), '()\n', (2682, 2684), False, 'from kdaHDFE.legacy.OLSFixed import OLSFixed\n'), ((5817, 5828), 'time.time', 'time.time', ([], {}), '()\n', (5826, 5828), False, 'import time\n'), ((1937, 1978), 'statsmodels.api.add_constant', 'sm.add_constant', (['demeaned_df[consist_col]'], {}), '(demeaned_df[consist_col])\n', (1952, 1978), True, 'import statsmodels.api as sm\n'), ((2225, 2236), 'time.time', 'time.time', ([], {}), '()\n', (2234, 2236), False, 'import time\n'), ((2259, 2330), 'kdaHDFE.legacy.DemeanDataframe.demean_dataframe', 'demean_dataframe', (['data_df', 'consist_var', 'category_col', 'epsilon', 'max_iter'], {}), '(data_df, consist_var, category_col, epsilon, max_iter)\n', (2275, 2330), False, 'from kdaHDFE.legacy.DemeanDataframe import demean_dataframe\n'), ((2345, 2356), 'time.time', 'time.time', ([], {}), '()\n', (2354, 2356), False, 'import time\n'), ((2373, 2392), 'time.process_time', 'time.process_time', ([], {}), '()\n', (2390, 2392), False, 'import time\n'), ((2408, 2437), 'kdaHDFE.calculate_df.cal_df', 'cal_df', (['data_df', 'category_col'], {}), '(data_df, category_col)\n', (2414, 2437), False, 'from kdaHDFE.calculate_df import cal_df\n'), ((2452, 2471), 'time.process_time', 'time.process_time', ([], {}), '()\n', (2469, 2471), False, 'import time\n'), ((4378, 4401), 'numpy.mat', 'np.mat', (['f_result.params'], {}), '(f_result.params)\n', (4384, 4401), True, 'import numpy as np\n'), ((4808, 4859), 'scipy.stats.f.sf', 'f.sf', (['f_result.fvalue', 'result.df_model', 'f_result.df'], {}), '(f_result.fvalue, result.df_model, f_result.df)\n', (4812, 4859), False, 'from scipy.stats import f\n'), ((3126, 3159), 'numpy.sqrt', 'np.sqrt', (['((n - k) / (n - k - rank))'], {}), '((n - k) / (n - k - rank))\n', (3133, 3159), True, 'import numpy as np\n'), ((3345, 3393), 'kdaHDFE.robust_error.robust_err', 'robust_err', (['demeaned_df', 'consist_col', 'n', 'k', 'rank'], {}), '(demeaned_df, consist_col, n, k, rank)\n', (3355, 3393), False, 'from kdaHDFE.robust_error import robust_err\n'), ((4333, 4356), 'numpy.mat', 'np.mat', (['f_result.params'], {}), '(f_result.params)\n', (4339, 4356), True, 'import numpy as np\n'), ((3422, 3448), 'numpy.diag', 'np.diag', (['covariance_matrix'], {}), '(covariance_matrix)\n', (3429, 3448), True, 'import numpy as np\n'), ((3835, 3861), 'numpy.diag', 'np.diag', (['covariance_matrix'], {}), '(covariance_matrix)\n', (3842, 3861), True, 'import numpy as np\n'), ((4064, 4088), 'numpy.abs', 'np.abs', (['f_result.tvalues'], {}), '(f_result.tvalues)\n', (4070, 4088), True, 'import numpy as np\n')] |
import numpy as np
class TrendLine(object):
def __init__(self, name, data):
self.name = name
self.values = data
def plot(self, ax):
z = np.polyfit(range(0, len(self.values)), self.values, 1)
p = np.poly1d(z)
for k, v in ax.spines.items():
v.set_edgecolor('#D3D3D3')
if k != 'bottom':
v.set_visible(False)
ax.set_xticklabels([], visible=False)
ax.set_yticklabels([], visible=False)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.plot(range(0, len(self.values)), p(self.values), ':', linewidth=0.5)
| [
"numpy.poly1d"
] | [((238, 250), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (247, 250), True, 'import numpy as np\n')] |
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import logging
import glob
import math
import json
import argparse
from tqdm import tqdm, trange
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
import random
import copy
import time
from pytorch_pretrained_bert.tokenization import BertTokenizer, WhitespaceTokenizer
from pytorch_pretrained_bert.modeling import BertForPreTrainingLossMask
from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
from vdbert.loader_utils import batch_list_to_batch_tensors_rank_loss
from vdbert.seq2seq_loader import VisdialDatasetRelRankLoss, Preprocess4TrainVisdialRankLoss
from vdbert.data_parallel import DataParallelImbalance
def _get_max_epoch_model(output_dir):
fn_model_list = glob.glob(os.path.join(output_dir, "model.*.bin"))
fn_optim_list = glob.glob(os.path.join(output_dir, "optim.*.bin"))
if (not fn_model_list) or (not fn_optim_list):
return None
both_set = set([int(Path(fn).stem.split('.')[-1]) for fn in fn_model_list]
) & set([int(Path(fn).stem.split('.')[-1]) for fn in fn_optim_list])
if both_set:
return max(both_set)
else:
return None
def process_args():
parser = argparse.ArgumentParser()
parser.add_argument("--train_src_file", type=str, help="The input data file name.")
parser.add_argument("--val_src_file", type=str, help="The input data file name.")
parser.add_argument("--train_rel_file", type=str, help="The input data file name.")
parser.add_argument("--val_rel_file", type=str, help="The input data file name.")
parser.add_argument("--float_nsp_label", type=int, help="")
parser.add_argument("--neg_num", default=0, type=int)
parser.add_argument("--rank_loss", default='', choices=['softmax', 'listmle', 'listnet', 'approxndcg'], type=str)
parser.add_argument("--add_val", default=0, type=int)
parser.add_argument("--train_visdial_disc_with_relevance.py", default=0, type=int)
parser.add_argument("--multiple_neg", default=0, type=int)
parser.add_argument("--inc_gt_rel", default=0, type=int)
parser.add_argument("--inc_full_hist", default=0, type=int)
parser.add_argument("--pad_hist", default=0, type=int)
parser.add_argument("--only_mask_ans", default=0, type=int)
parser.add_argument("--visdial_v", default='1.0', choices=['1.0', '0.9'], type=str)
parser.add_argument("--loss_type", default='mlm', choices=['mlm', 'nsp', 'mlm_nsp'], type=str)
parser.add_argument("--image_features_hdfpath", default='/export/home/vlp_data/visdial/img_feats1.0/train.h5',
type=str)
parser.add_argument('--len_vis_input', type=int, default=36)
parser.add_argument('--max_len_ans', type=int, default=10)
parser.add_argument('--max_len_hist_ques', type=int, default=40)
parser.add_argument("--finetune", default=0, type=int)
# parser.add_argument("--mask_all_ans", default=0, type=int)
# parser.add_argument("--include_mask_lm", default=0, type=int)
# parser.add_argument("--single_label", default=0, type=int)
parser.add_argument('--tasks', default='img2txt',
help='img2txt | vqa2| ctrl2 | visdial | visdial_short_hist | visdial_nsp')
# General
parser.add_argument("--bert_model", default="bert-base-cased", type=str,
help="Bert pre-trained model selected in the list: bert-base-cased, bert-large-cased.")
parser.add_argument("--config_path", default=None, type=str,
help="Bert config file path.")
parser.add_argument("--output_dir",
default='tmp',
type=str,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--log_file",
default="training.log",
type=str,
help="The output directory where the log will be written.")
parser.add_argument("--model_recover_path",
default=None,
type=str,
help="The file of fine-tuned pretraining model.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training. This should ALWAYS be set to True.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=64,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--label_smoothing", default=0, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay",
default=0.01,
type=float,
help="The weight decay rate for Adam.")
parser.add_argument("--finetune_decay",
action='store_true',
help="Weight decay to the original weights.")
parser.add_argument("--num_train_epochs",
default=30,
type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument("--global_rank",
type=int,
default=-1,
help="global_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--fp32_embedding', action='store_true',
help="Whether to use 32-bit float precision instead of 32-bit for embeddings")
parser.add_argument('--loss_scale', type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--amp', action='store_true',
help="Whether to use amp for fp16")
parser.add_argument('--from_scratch', action='store_true',
help="Initialize parameters with random values (i.e., training from scratch).")
parser.add_argument('--new_segment_ids', action='store_true',
help="Use new segment ids for bi-uni-directional LM.")
parser.add_argument('--tokenized_input', action='store_true',
help="Whether the input is tokenized.")
# parser.add_argument('--max_len_a', type=int, default=0,
# help="Truncate_config: maximum length of segment A. 0 means none.")
# parser.add_argument('--max_b_len', type=int, default=20,
# help="Truncate_config: maximum length of segment B.")
parser.add_argument('--trunc_seg', default='b',
help="Truncate_config: first truncate segment A/B (option: a, b).")
parser.add_argument('--always_truncate_tail', action='store_true',
help="Truncate_config: Whether we should always truncate tail.")
parser.add_argument("--mask_prob", default=0.15, type=float,
help="Number of prediction is sometimes less than max_pred when sequence is short.")
parser.add_argument('--max_pred', type=int, default=3,
help="Max tokens of prediction.")
parser.add_argument("--num_workers", default=4, type=int, # yue should be 4
help="Number of workers for the data loader.")
parser.add_argument('--max_position_embeddings', type=int, default=None,
help="max position embeddings")
# Others for VLP
parser.add_argument('--enable_visdom', action='store_true')
parser.add_argument('--visdom_port', type=int, default=8888)
# parser.add_argument('--resnet_model', type=str, default='imagenet_weights/resnet101.pth')
parser.add_argument('--image_root', type=str, default='/mnt/dat/COCO/images')
parser.add_argument('--dataset', default='coco', type=str,
help='coco | flickr30k | cc')
parser.add_argument('--split', type=str, nargs='+', default=['train', 'restval'])
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='file://[PT_OUTPUT_DIR]/nonexistent_file', type=str,
help='url used to set up distributed training')
parser.add_argument('--file_valid_jpgs', default='/mnt/dat/COCO/annotations/coco_valid_jpgs.json', type=str)
parser.add_argument('--sche_mode', default='warmup_linear', type=str,
help="warmup_linear | warmup_constant | warmup_cosine")
parser.add_argument('--drop_prob', default=0.1, type=float)
parser.add_argument('--use_num_imgs', default=500, type=int)
parser.add_argument('--vis_mask_prob', default=0, type=float)
parser.add_argument('--max_drop_worst_ratio', default=0, type=float)
parser.add_argument('--drop_after', default=6, type=int)
parser.add_argument('--s2s_prob', default=1, type=float,
help="Percentage of examples that are bi-uni-directional LM (seq2seq).")
parser.add_argument('--bi_prob', default=0, type=float,
help="Percentage of examples that are bidirectional LM.")
parser.add_argument('--l2r_prob', default=0, type=float,
help="Percentage of examples that are unidirectional (left-to-right) LM.")
parser.add_argument('--enable_butd', action='store_true',
help='set to take in region features')
parser.add_argument('--region_bbox_file', default='coco_detection_vg_thresh0.2_feat_gvd_checkpoint_trainvaltest.h5',
type=str)
parser.add_argument('--region_det_file_prefix',
default='feat_cls_1000/coco_detection_vg_100dets_gvd_checkpoint_trainval', type=str)
parser.add_argument('--relax_projection',
action='store_true',
help="Use different projection layers for tasks.")
parser.add_argument('--scst', action='store_true',
help='Self-critical sequence training')
args = parser.parse_args()
# os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(d) for d in range(args.world_size)])
print('Arguments: %s' % (' '.join(sys.argv[:])))
return args
def main():
args = process_args()
if args.loss_type == 'mlm':
assert args.neg_num == 0 and args.multiple_neg == 0
elif args.loss_type == 'nsp':
assert int(args.bi_prob) == 1 and args.max_pred == 0 and args.neg_num > 0
print('global_rank: {}, local rank: {}'.format(args.global_rank, args.local_rank))
# Input format: [CLS] img [SEP] hist [SEP_0] ques [SEP_1] ans [SEP]
args.max_seq_length = args.len_vis_input + 2 + args.max_len_hist_ques + 2 + args.max_len_ans + 1
args.mask_image_regions = (args.vis_mask_prob > 0) # whether to mask out image regions
args.dist_url = args.dist_url.replace('[PT_OUTPUT_DIR]', args.output_dir)
# arguments inspection
assert args.enable_butd, 'only support region attn! featmap attn deprecated'
if args.enable_butd:
if args.visdial_v == '1.0':
assert (args.len_vis_input == 36)
elif args.visdial_v == '0.9':
assert (args.len_vis_input == 100)
args.region_bbox_file = os.path.join(args.image_root, args.region_bbox_file)
args.region_det_file_prefix = os.path.join(args.image_root,
args.region_det_file_prefix) if args.dataset in (
'cc', 'coco') and args.region_det_file_prefix != '' else ''
# output config
os.makedirs(args.output_dir, exist_ok=True)
json.dump(args.__dict__, open(os.path.join(
args.output_dir, 'opt.json'), 'w'), sort_keys=True, indent=2)
logging.basicConfig(
filename=os.path.join(args.output_dir, args.log_file),
filemode='w',
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
stdout = True
if stdout:
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s'))
ch.setLevel(logging.INFO)
logger.addHandler(ch)
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl', init_method=args.dist_url,
world_size=args.world_size, rank=args.global_rank)
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(
args.train_batch_size / args.gradient_accumulation_steps)
# fix random seed
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
# plotting loss, optional
if args.enable_visdom:
import visdom
vis = visdom.Visdom(port=args.visdom_port, env=args.output_dir)
vis_window = {'iter': None, 'score': None}
tokenizer = BertTokenizer.from_pretrained(
args.bert_model, do_lower_case=args.do_lower_case,
cache_dir=args.output_dir + '/.pretrained_model_{}'.format(args.global_rank))
if args.max_position_embeddings:
tokenizer.max_len = args.max_position_embeddings
data_tokenizer = WhitespaceTokenizer() if args.tokenized_input else tokenizer
assert args.do_train
bi_uni_pipeline = [Preprocess4TrainVisdialRankLoss(args.max_pred, args.mask_prob,
list(tokenizer.vocab.keys()),
tokenizer.convert_tokens_to_ids, args.max_seq_length,
new_segment_ids=args.new_segment_ids,
truncate_config={'len_vis_input': args.len_vis_input,
'max_len_hist_ques': args.max_len_hist_ques,
'max_len_ans': args.max_len_ans},
mask_image_regions=args.mask_image_regions,
mode="s2s", vis_mask_prob=args.vis_mask_prob,
region_bbox_file=args.region_bbox_file,
region_det_file_prefix=args.region_det_file_prefix,
image_features_hdfpath=args.image_features_hdfpath,
visdial_v=args.visdial_v, pad_hist=args.pad_hist,
finetune=args.finetune,
only_mask_ans=args.only_mask_ans,
float_nsp_label=args.float_nsp_label),
Preprocess4TrainVisdialRankLoss(args.max_pred, args.mask_prob,
list(tokenizer.vocab.keys()),
tokenizer.convert_tokens_to_ids, args.max_seq_length,
new_segment_ids=args.new_segment_ids,
truncate_config={'len_vis_input': args.len_vis_input,
'max_len_hist_ques': args.max_len_hist_ques,
'max_len_ans': args.max_len_ans},
mask_image_regions=args.mask_image_regions,
mode="bi", vis_mask_prob=args.vis_mask_prob,
region_bbox_file=args.region_bbox_file,
region_det_file_prefix=args.region_det_file_prefix,
image_features_hdfpath=args.image_features_hdfpath,
visdial_v=args.visdial_v, pad_hist=args.pad_hist,
finetune=args.finetune,
only_mask_ans=args.only_mask_ans,
float_nsp_label=args.float_nsp_label)]
train_dataset = VisdialDatasetRelRankLoss(
args.train_src_file, args.val_src_file, args.train_rel_file, args.val_rel_file, args.train_batch_size, data_tokenizer,
use_num_imgs=args.use_num_imgs,
bi_uni_pipeline=bi_uni_pipeline, s2s_prob=args.s2s_prob, bi_prob=args.bi_prob,
is_train=args.do_train, neg_num=args.neg_num, inc_gt_rel=args.inc_gt_rel, inc_full_hist=args.inc_full_hist)
if args.world_size == 1:
train_sampler = RandomSampler(train_dataset, replacement=False)
else:
train_sampler = DistributedSampler(train_dataset)
train_dataloader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.train_batch_size, sampler=train_sampler,
num_workers=args.num_workers,
collate_fn=batch_list_to_batch_tensors_rank_loss, pin_memory=True)
# note: args.train_batch_size has been changed to (/= args.gradient_accumulation_steps)
t_total = int(len(train_dataloader) * args.num_train_epochs * 1. /
args.gradient_accumulation_steps)
amp_handle = None
if args.fp16 and args.amp:
from apex import amp
amp_handle = amp.init(enable_caching=True)
logger.info("enable fp16 with amp")
# Prepare model
recover_step = _get_max_epoch_model(args.output_dir)
cls_num_labels = 2
type_vocab_size = 6 if args.new_segment_ids else 2
relax_projection = 4 if args.relax_projection else 0
task_idx_proj = 3 if args.tasks == 'img2txt' else 0
mask_word_id, eos_word_ids, pad_word_ids = tokenizer.convert_tokens_to_ids(
["[MASK]", "[SEP]", "[PAD]"]) # index in BERT vocab: 103, 102, 0
if (recover_step is None) and (args.model_recover_path is None):
# if _state_dict == {}, the parameters are randomly initialized
# if _state_dict == None, the parameters are initialized with bert-init
assert args.scst == False, 'must init from maximum likelihood training'
_state_dict = {} if args.from_scratch else None
model = BertForPreTrainingLossMask.from_pretrained(
args.bert_model, state_dict=_state_dict, num_labels=cls_num_labels,
type_vocab_size=type_vocab_size, relax_projection=relax_projection,
config_path=args.config_path, task_idx=task_idx_proj,
max_position_embeddings=args.max_position_embeddings, label_smoothing=args.label_smoothing,
fp32_embedding=args.fp32_embedding,
cache_dir=args.output_dir + '/.pretrained_model_{}'.format(args.global_rank),
drop_prob=args.drop_prob, enable_butd=args.enable_butd,
len_vis_input=args.len_vis_input, visdial_v=args.visdial_v, loss_type=args.loss_type,
float_nsp_label=args.float_nsp_label, rank_loss=args.rank_loss)
global_step = 0
else:
if recover_step:
logger.info("***** Recover model: %d *****", recover_step)
model_recover = torch.load(os.path.join(
args.output_dir, "model.{0}.bin".format(recover_step)))
# recover_step == number of epochs
global_step = math.floor(
recover_step * t_total * 1. / args.num_train_epochs)
elif args.model_recover_path:
logger.info("***** Recover model: %s *****",
args.model_recover_path)
model_recover = torch.load(args.model_recover_path)
global_step = 0
model = BertForPreTrainingLossMask.from_pretrained(
args.bert_model, state_dict=model_recover, num_labels=cls_num_labels,
type_vocab_size=type_vocab_size, relax_projection=relax_projection,
config_path=args.config_path, task_idx=task_idx_proj,
max_position_embeddings=args.max_position_embeddings, label_smoothing=args.label_smoothing,
fp32_embedding=args.fp32_embedding,
cache_dir=args.output_dir + '/.pretrained_model_{}'.format(args.global_rank),
drop_prob=args.drop_prob, enable_butd=args.enable_butd,
len_vis_input=args.len_vis_input, visdial_v=args.visdial_v, loss_type=args.loss_type,
float_nsp_label=args.float_nsp_label, rank_loss=args.rank_loss)
del model_recover
torch.cuda.empty_cache()
if args.fp16:
model.half()
# cnn.half()
if args.fp32_embedding:
model.bert.embeddings.word_embeddings.float()
model.bert.embeddings.position_embeddings.float()
model.bert.embeddings.token_type_embeddings.float()
model.to(device)
# cnn.to(device)
if args.local_rank != -1:
try:
# from apex.parallel import DistributedDataParallel as DDP
from torch.nn.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
# cnn = DDP(cnn)
elif n_gpu > 1:
# model = torch.nn.DataParallel(model)
model = DataParallelImbalance(model)
# cnn = DataParallelImbalance(cnn)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(
nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
# from apex.optimizers import FP16_Optimizer
from pytorch_pretrained_bert.optimization_fp16 import FP16_Optimizer_State
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer_State(
optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer_State(
optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
schedule=args.sche_mode,
t_total=t_total)
if recover_step:
logger.info("***** Recover optimizer: %d *****", recover_step)
optim_recover = torch.load(os.path.join(
args.output_dir, "optim.{0}.bin".format(recover_step)))
if hasattr(optim_recover, 'state_dict'):
optim_recover = optim_recover.state_dict()
optimizer.load_state_dict(optim_recover)
if args.loss_scale == 0:
logger.info("***** Recover optimizer: dynamic_loss_scale *****")
optimizer.dynamic_loss_scale = True
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
if args.do_train:
logger.info("***** Running training *****")
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", t_total)
logger.info(" Loader length = %d", len(train_dataloader))
model.train()
if recover_step:
start_epoch = recover_step + 1
else:
start_epoch = 1
logger.info("Begin training from epoch = %d", start_epoch)
t0 = time.time()
for i_epoch in trange(start_epoch, args.num_train_epochs + 1, desc="Epoch"):
if args.multiple_neg and i_epoch != 1:
train_dataset = VisdialDatasetRelRankLoss(
args.train_src_file, args.val_src_file, args.train_rel_file, args.val_rel_file,
args.train_batch_size, data_tokenizer,
use_num_imgs=args.use_num_imgs,
bi_uni_pipeline=bi_uni_pipeline, s2s_prob=args.s2s_prob, bi_prob=args.bi_prob,
is_train=args.do_train, neg_num=args.neg_num, inc_gt_rel=args.inc_gt_rel,
inc_full_hist=args.inc_full_hist, add_val=args.add_val)
if args.world_size == 1:
train_sampler = RandomSampler(train_dataset, replacement=False)
else:
train_sampler = DistributedSampler(train_dataset)
train_dataloader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.train_batch_size, sampler=train_sampler,
num_workers=args.num_workers,
collate_fn=batch_list_to_batch_tensors_rank_loss, pin_memory=True)
if args.local_rank >= 0:
train_sampler.set_epoch(i_epoch - 1)
iter_bar = tqdm(train_dataloader, desc='Iter (loss=X.XXX)')
nbatches = len(train_dataloader)
losses = []
pretext_loss = []
mlm_losses = []
nsp_losses = []
zero_batch_cnt = 0
for step, batch in enumerate(iter_bar):
batch = [t.to(device) for t in batch]
input_ids, segment_ids, input_mask, lm_label_ids, masked_pos, masked_weights, is_next, \
task_idx, vis_masked_pos, img, vis_pe = batch
if args.fp16:
img = img.half()
vis_pe = vis_pe.half()
if args.enable_butd:
conv_feats = img.data # Bx100x2048
vis_pe = vis_pe.data
loss_tuple = model(conv_feats, vis_pe, input_ids, segment_ids,
input_mask, lm_label_ids, is_next, masked_pos=masked_pos,
masked_weights=masked_weights, task_idx=task_idx,
vis_masked_pos=vis_masked_pos, mask_image_regions=args.mask_image_regions,
drop_worst_ratio=args.max_drop_worst_ratio if i_epoch > args.drop_after else 0)
# disable pretext_loss_deprecated for now
masked_lm_loss, pretext_loss_deprecated, nsp_loss = loss_tuple
if n_gpu > 1: # mean() to average on multi-gpu. For dist, this is done through gradient addition.
masked_lm_loss = masked_lm_loss.mean()
pretext_loss_deprecated = pretext_loss_deprecated.mean()
nsp_loss = nsp_loss.mean()
loss = masked_lm_loss + pretext_loss_deprecated + nsp_loss
# if loss.item() == 0:
# zero_batch_cnt += 1
# continue
# logging for each step (i.e., before normalization by args.gradient_accumulation_steps)
iter_bar.set_description('Iter (loss=%5.3f)' % loss.item())
losses.append(loss.item())
mlm_losses.append(masked_lm_loss.item())
pretext_loss.append(pretext_loss_deprecated.item())
nsp_losses.append(nsp_loss.item())
if step % max(1, nbatches // 10) == 0:
logger.info(
"Epoch {}, Iter {}, Loss {:.2f}, MLM {:.2f}, NSP {:.2f}, Elapse time {:.2f}\n".format(
i_epoch, step, np.mean(losses), np.mean(mlm_losses), np.mean(nsp_losses), time.time() - t0))
if args.enable_visdom:
if vis_window['iter'] is None:
vis_window['iter'] = vis.line(
X=np.tile(np.arange((i_epoch - 1) * nbatches + step,
(i_epoch - 1) * nbatches + step + 1), (1, 1)).T,
Y=np.column_stack((np.asarray([np.mean(losses)]),)),
opts=dict(title='Training Loss',
xlabel='Training Iteration',
ylabel='Loss',
legend=['total'])
)
else:
vis.line(
X=np.tile(np.arange((i_epoch - 1) * nbatches + step,
(i_epoch - 1) * nbatches + step + 1), (1, 1)).T,
Y=np.column_stack((np.asarray([np.mean(losses)]),)),
opts=dict(title='Training Loss',
xlabel='Training Iteration',
ylabel='Loss',
legend=['total']),
win=vis_window['iter'],
update='append'
)
# ensure that accumlated gradients are normalized
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
if amp_handle:
amp_handle._clear_cache()
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
lr_this_step = args.learning_rate * \
warmup_linear(global_step / t_total,
args.warmup_proportion)
if args.fp16:
# modify learning rate with special warm up BERT uses
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
print("\nFinish one epoch, %d/%d is zero loss batch" % (zero_batch_cnt, nbatches))
# Save a trained model
logger.info(
"** ** * Saving fine-tuned model and optimizer ** ** * ")
model_to_save = model.module if hasattr(
model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(
args.output_dir, "model.%d.%.3f.bin" % (i_epoch, np.mean(losses)))
output_optim_file = os.path.join(
args.output_dir, "optim.{0}.bin".format(i_epoch))
if args.global_rank in (-1, 0): # save model if the first device or no dist
torch.save(copy.deepcopy(model_to_save).cpu().state_dict(), output_model_file)
logger.info("Save model to %s", output_model_file)
# torch.save(optimizer.state_dict(), output_optim_file) # disable for now, need to sanitize state and ship everthing back to cpu
logger.info("Finish training epoch %d, avg loss: %.2f and takes %.2f seconds" % (
i_epoch, np.mean(losses), time.time() - t0))
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
if args.world_size > 1:
torch.distributed.barrier()
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"logging.StreamHandler",
"math.floor",
"torch.cuda.device_count",
"torch.utils.data.distributed.DistributedSampler",
"torch.cuda.is_available",
"copy.deepcopy",
"pytorch_pretrained_bert.optimization.warmup_linear",
"visdom.Visdom",
"vdbert.data_parallel.DataParallelImbalance",... | [((1449, 1474), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1472, 1474), False, 'import argparse\n'), ((13492, 13535), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (13503, 13535), False, 'import os\n'), ((13916, 13943), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (13933, 13943), False, 'import logging\n'), ((15278, 15300), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (15289, 15300), False, 'import random\n'), ((15305, 15330), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (15319, 15330), True, 'import numpy as np\n'), ((15335, 15363), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (15352, 15363), False, 'import torch\n'), ((18967, 19353), 'vdbert.seq2seq_loader.VisdialDatasetRelRankLoss', 'VisdialDatasetRelRankLoss', (['args.train_src_file', 'args.val_src_file', 'args.train_rel_file', 'args.val_rel_file', 'args.train_batch_size', 'data_tokenizer'], {'use_num_imgs': 'args.use_num_imgs', 'bi_uni_pipeline': 'bi_uni_pipeline', 's2s_prob': 'args.s2s_prob', 'bi_prob': 'args.bi_prob', 'is_train': 'args.do_train', 'neg_num': 'args.neg_num', 'inc_gt_rel': 'args.inc_gt_rel', 'inc_full_hist': 'args.inc_full_hist'}), '(args.train_src_file, args.val_src_file, args.\n train_rel_file, args.val_rel_file, args.train_batch_size,\n data_tokenizer, use_num_imgs=args.use_num_imgs, bi_uni_pipeline=\n bi_uni_pipeline, s2s_prob=args.s2s_prob, bi_prob=args.bi_prob, is_train\n =args.do_train, neg_num=args.neg_num, inc_gt_rel=args.inc_gt_rel,\n inc_full_hist=args.inc_full_hist)\n', (18992, 19353), False, 'from vdbert.seq2seq_loader import VisdialDatasetRelRankLoss, Preprocess4TrainVisdialRankLoss\n'), ((19558, 19763), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.train_batch_size', 'sampler': 'train_sampler', 'num_workers': 'args.num_workers', 'collate_fn': 'batch_list_to_batch_tensors_rank_loss', 'pin_memory': '(True)'}), '(train_dataset, batch_size=args.train_batch_size,\n sampler=train_sampler, num_workers=args.num_workers, collate_fn=\n batch_list_to_batch_tensors_rank_loss, pin_memory=True)\n', (19585, 19763), False, 'import torch\n'), ((26468, 26492), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (26490, 26492), False, 'import torch\n'), ((988, 1027), 'os.path.join', 'os.path.join', (['output_dir', '"""model.*.bin"""'], {}), "(output_dir, 'model.*.bin')\n", (1000, 1027), False, 'import os\n'), ((1059, 1098), 'os.path.join', 'os.path.join', (['output_dir', '"""optim.*.bin"""'], {}), "(output_dir, 'optim.*.bin')\n", (1071, 1098), False, 'import os\n'), ((13990, 14023), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (14011, 14023), False, 'import logging\n'), ((14364, 14389), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (14387, 14389), False, 'import torch\n'), ((14408, 14446), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (14429, 14446), False, 'import torch\n'), ((14464, 14501), 'torch.device', 'torch.device', (['"""cuda"""', 'args.local_rank'], {}), "('cuda', args.local_rank)\n", (14476, 14501), False, 'import torch\n'), ((14622, 14757), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': 'args.dist_url', 'world_size': 'args.world_size', 'rank': 'args.global_rank'}), "(backend='nccl', init_method=args.\n dist_url, world_size=args.world_size, rank=args.global_rank)\n", (14658, 14757), False, 'import torch\n'), ((15390, 15427), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (15416, 15427), False, 'import torch\n'), ((15522, 15579), 'visdom.Visdom', 'visdom.Visdom', ([], {'port': 'args.visdom_port', 'env': 'args.output_dir'}), '(port=args.visdom_port, env=args.output_dir)\n', (15535, 15579), False, 'import visdom\n'), ((15939, 15960), 'pytorch_pretrained_bert.tokenization.WhitespaceTokenizer', 'WhitespaceTokenizer', ([], {}), '()\n', (15958, 15960), False, 'from pytorch_pretrained_bert.tokenization import BertTokenizer, WhitespaceTokenizer\n'), ((19418, 19465), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_dataset'], {'replacement': '(False)'}), '(train_dataset, replacement=False)\n', (19431, 19465), False, 'from torch.utils.data import DataLoader, RandomSampler\n'), ((19500, 19533), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_dataset'], {}), '(train_dataset)\n', (19518, 19533), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((20228, 20257), 'apex.amp.init', 'amp.init', ([], {'enable_caching': '(True)'}), '(enable_caching=True)\n', (20236, 20257), False, 'from apex import amp\n'), ((23306, 23330), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (23328, 23330), False, 'import torch\n'), ((24029, 24133), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['model'], {'device_ids': '[args.local_rank]', 'output_device': 'args.local_rank', 'find_unused_parameters': '(True)'}), '(model, device_ids=[args.local_rank], output_device=args.local_rank,\n find_unused_parameters=True)\n', (24032, 24133), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((25163, 25271), 'apex.optimizers.FusedAdam', 'FusedAdam', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'bias_correction': '(False)', 'max_grad_norm': '(1.0)'}), '(optimizer_grouped_parameters, lr=args.learning_rate,\n bias_correction=False, max_grad_norm=1.0)\n', (25172, 25271), False, 'from apex.optimizers import FusedAdam\n'), ((25641, 25780), 'pytorch_pretrained_bert.optimization.BertAdam', 'BertAdam', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'warmup': 'args.warmup_proportion', 'schedule': 'args.sche_mode', 't_total': 't_total'}), '(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.\n warmup_proportion, schedule=args.sche_mode, t_total=t_total)\n', (25649, 25780), False, 'from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear\n'), ((26961, 26972), 'time.time', 'time.time', ([], {}), '()\n', (26970, 26972), False, 'import time\n'), ((26996, 27056), 'tqdm.trange', 'trange', (['start_epoch', '(args.num_train_epochs + 1)'], {'desc': '"""Epoch"""'}), "(start_epoch, args.num_train_epochs + 1, desc='Epoch')\n", (27002, 27056), False, 'from tqdm import tqdm, trange\n'), ((13570, 13611), 'os.path.join', 'os.path.join', (['args.output_dir', '"""opt.json"""'], {}), "(args.output_dir, 'opt.json')\n", (13582, 13611), False, 'import os\n'), ((13697, 13741), 'os.path.join', 'os.path.join', (['args.output_dir', 'args.log_file'], {}), '(args.output_dir, args.log_file)\n', (13709, 13741), False, 'import os\n'), ((14048, 14123), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""'], {}), "('%(asctime)s - %(levelname)s - %(name)s - %(message)s')\n", (14065, 14123), False, 'import logging\n'), ((22181, 22245), 'math.floor', 'math.floor', (['(recover_step * t_total * 1.0 / args.num_train_epochs)'], {}), '(recover_step * t_total * 1.0 / args.num_train_epochs)\n', (22191, 22245), False, 'import math\n'), ((24238, 24266), 'vdbert.data_parallel.DataParallelImbalance', 'DataParallelImbalance', (['model'], {}), '(model)\n', (24259, 24266), False, 'from vdbert.data_parallel import DataParallelImbalance\n'), ((25415, 25471), 'pytorch_pretrained_bert.optimization_fp16.FP16_Optimizer_State', 'FP16_Optimizer_State', (['optimizer'], {'dynamic_loss_scale': '(True)'}), '(optimizer, dynamic_loss_scale=True)\n', (25435, 25471), False, 'from pytorch_pretrained_bert.optimization_fp16 import FP16_Optimizer_State\n'), ((25527, 25593), 'pytorch_pretrained_bert.optimization_fp16.FP16_Optimizer_State', 'FP16_Optimizer_State', (['optimizer'], {'static_loss_scale': 'args.loss_scale'}), '(optimizer, static_loss_scale=args.loss_scale)\n', (25547, 25593), False, 'from pytorch_pretrained_bert.optimization_fp16 import FP16_Optimizer_State\n'), ((28401, 28449), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'desc': '"""Iter (loss=X.XXX)"""'}), "(train_dataloader, desc='Iter (loss=X.XXX)')\n", (28405, 28449), False, 'from tqdm import tqdm, trange\n'), ((34590, 34614), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (34612, 34614), False, 'import torch\n'), ((13161, 13213), 'os.path.join', 'os.path.join', (['args.image_root', 'args.region_bbox_file'], {}), '(args.image_root, args.region_bbox_file)\n', (13173, 13213), False, 'import os\n'), ((22434, 22469), 'torch.load', 'torch.load', (['args.model_recover_path'], {}), '(args.model_recover_path)\n', (22444, 22469), False, 'import torch\n'), ((27141, 27549), 'vdbert.seq2seq_loader.VisdialDatasetRelRankLoss', 'VisdialDatasetRelRankLoss', (['args.train_src_file', 'args.val_src_file', 'args.train_rel_file', 'args.val_rel_file', 'args.train_batch_size', 'data_tokenizer'], {'use_num_imgs': 'args.use_num_imgs', 'bi_uni_pipeline': 'bi_uni_pipeline', 's2s_prob': 'args.s2s_prob', 'bi_prob': 'args.bi_prob', 'is_train': 'args.do_train', 'neg_num': 'args.neg_num', 'inc_gt_rel': 'args.inc_gt_rel', 'inc_full_hist': 'args.inc_full_hist', 'add_val': 'args.add_val'}), '(args.train_src_file, args.val_src_file, args.\n train_rel_file, args.val_rel_file, args.train_batch_size,\n data_tokenizer, use_num_imgs=args.use_num_imgs, bi_uni_pipeline=\n bi_uni_pipeline, s2s_prob=args.s2s_prob, bi_prob=args.bi_prob, is_train\n =args.do_train, neg_num=args.neg_num, inc_gt_rel=args.inc_gt_rel,\n inc_full_hist=args.inc_full_hist, add_val=args.add_val)\n', (27166, 27549), False, 'from vdbert.seq2seq_loader import VisdialDatasetRelRankLoss, Preprocess4TrainVisdialRankLoss\n'), ((27902, 28107), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.train_batch_size', 'sampler': 'train_sampler', 'num_workers': 'args.num_workers', 'collate_fn': 'batch_list_to_batch_tensors_rank_loss', 'pin_memory': '(True)'}), '(train_dataset, batch_size=args.train_batch_size,\n sampler=train_sampler, num_workers=args.num_workers, collate_fn=\n batch_list_to_batch_tensors_rank_loss, pin_memory=True)\n', (27929, 28107), False, 'import torch\n'), ((34668, 34695), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (34693, 34695), False, 'import torch\n'), ((13256, 13314), 'os.path.join', 'os.path.join', (['args.image_root', 'args.region_det_file_prefix'], {}), '(args.image_root, args.region_det_file_prefix)\n', (13268, 13314), False, 'import os\n'), ((14289, 14314), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14312, 14314), False, 'import torch\n'), ((27726, 27773), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_dataset'], {'replacement': '(False)'}), '(train_dataset, replacement=False)\n', (27739, 27773), False, 'from torch.utils.data import DataLoader, RandomSampler\n'), ((27832, 27865), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_dataset'], {}), '(train_dataset)\n', (27850, 27865), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((32904, 32964), 'pytorch_pretrained_bert.optimization.warmup_linear', 'warmup_linear', (['(global_step / t_total)', 'args.warmup_proportion'], {}), '(global_step / t_total, args.warmup_proportion)\n', (32917, 32964), False, 'from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear\n'), ((33839, 33854), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (33846, 33854), True, 'import numpy as np\n'), ((34484, 34499), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (34491, 34499), True, 'import numpy as np\n'), ((30905, 30920), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (30912, 30920), True, 'import numpy as np\n'), ((30922, 30941), 'numpy.mean', 'np.mean', (['mlm_losses'], {}), '(mlm_losses)\n', (30929, 30941), True, 'import numpy as np\n'), ((30943, 30962), 'numpy.mean', 'np.mean', (['nsp_losses'], {}), '(nsp_losses)\n', (30950, 30962), True, 'import numpy as np\n'), ((34501, 34512), 'time.time', 'time.time', ([], {}), '()\n', (34510, 34512), False, 'import time\n'), ((30964, 30975), 'time.time', 'time.time', ([], {}), '()\n', (30973, 30975), False, 'import time\n'), ((1195, 1203), 'pathlib.Path', 'Path', (['fn'], {}), '(fn)\n', (1199, 1203), False, 'from pathlib import Path\n'), ((1282, 1290), 'pathlib.Path', 'Path', (['fn'], {}), '(fn)\n', (1286, 1290), False, 'from pathlib import Path\n'), ((34085, 34113), 'copy.deepcopy', 'copy.deepcopy', (['model_to_save'], {}), '(model_to_save)\n', (34098, 34113), False, 'import copy\n'), ((31167, 31246), 'numpy.arange', 'np.arange', (['((i_epoch - 1) * nbatches + step)', '((i_epoch - 1) * nbatches + step + 1)'], {}), '((i_epoch - 1) * nbatches + step, (i_epoch - 1) * nbatches + step + 1)\n', (31176, 31246), True, 'import numpy as np\n'), ((31749, 31828), 'numpy.arange', 'np.arange', (['((i_epoch - 1) * nbatches + step)', '((i_epoch - 1) * nbatches + step + 1)'], {}), '((i_epoch - 1) * nbatches + step, (i_epoch - 1) * nbatches + step + 1)\n', (31758, 31828), True, 'import numpy as np\n'), ((31366, 31381), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (31373, 31381), True, 'import numpy as np\n'), ((31948, 31963), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (31955, 31963), True, 'import numpy as np\n')] |
"""Use EDIA to assess quality of model fitness to electron density."""
import numpy as np
from . import Structure, XMap, ElectronDensityRadiusTable
from . import ResolutionBins, BondLengthTable
import argparse
import logging
import os
import time
logger = logging.getLogger(__name__)
class ediaOptions:
def __init__(self):
# General options
self.directory = '.'
self.debug = False
# Density creation options
self.map_type = None
self.resolution = None
self.resolution_min = None
self.scattering = 'xray'
def apply_command_args(self, args):
for key, value in vars(args).items():
if hasattr(self, key):
setattr(self, key, value)
return self
class Weight():
def __init__(self, radius):
# Per definition:
self.b1 = 1.0 #(maximum of first parabola)
self.b2 = -0.4 #(minimum of second parabola)
self.b3 = 0.0 #(maximum of third parabola)
self.c1 = 0.0 # (we want the first parabola to have its maximum at x=0)
self.m1 = -1.0/(radius**2) # This ensures that the density becomes superfluous if p is in d(a)
self.c3 = 2 * radius # (we want the third parabola to have its maximum at x=2*r)
self.r0 = 1.0822*radius # The point where P1 and P2 intersect (pre-defined)
self.r2 = 2*radius # The point where P3 becomes 0.
# Calculate unknowns:
# Unknowns: r1,m2,m3,c2
self.c2 = -(self.b1-self.b2)/(self.m1*self.r0)
self.m2 = (self.r0**2) * (self.m1**2) / ((self.r0**2)*self.m1 - self.b2 + self.b1)
self.r1 = (self.m2*self.c2*self.c3 - self.m2*self.c2*self.c2 -self.b2)/ (self.m2*self.c3 - self.m2*self.c2)
self.m3 = self.m2*(self.r1-self.c2) / (self.r1 - self.c3)
self.P = lambda x,m,c,b: m*(x-c)**2 + b
def __call__(self, dist):
# Calculate the weight:
if(dist<self.r0):
return self.P(dist,self.m1,self.c1,self.b1)
elif(dist<self.r1):
return self.P(dist,self.m2,self.c2,self.b2)
elif(dist<self.r2):
return self.P(dist,self.m3,self.c3,self.b3)
else:
return 0.0
class Point():
def __init__(self,coor):
self.coor = coor
self.S=[]
self.D=[]
def set_Point(self, new_point):
self.coor = new_point.coor
self.S=new_point.S
self.D=new_point.D
class _BaseEDIA():
def __init__(self, conformer, structure, xmap, options):
self.structure = structure
self.conformer = conformer
self.residue = conformer
self.xmap = xmap
self.options = options
self._coor_set = [self.conformer.coor]
self._voxel_volume = self.xmap.unit_cell.calc_volume() / self.xmap.array.size
self.weighter = Weight(1.0)
# Calculate space diagonal and the partitioning factor p
self.d = np.linalg.norm(xmap.voxelspacing)
self.p = np.ceil(self.d/0.7)
abc = np.asarray([self.xmap.unit_cell.a, self.xmap.unit_cell.b, self.xmap.unit_cell.c])
self.grid_to_cartesian = np.transpose( ( self.xmap.unit_cell.frac_to_orth / abc ) * self.xmap.voxelspacing )
self.cartesian_to_grid = np.linalg.inv(self.grid_to_cartesian)
self.Grid = np.zeros_like(xmap.array, dtype=object)
self.mean = xmap.array.mean()
self.sigma = xmap.array.std()
self.Populate_Grid(self.residue)
def Populate_Grid(self,target_residue=None):
for chain in self.structure:
for residue in chain:
for ind in range(len(residue.name)):
atom,element,charge,coor,icode,record,occ,resi = residue.name[ind],residue.e[ind],residue.charge[ind],residue.coor[ind],residue.icode[ind],residue.record[ind],residue.q[ind],residue.resi[ind]
if target_residue!=None:
flag=0
for idx in range(len(target_residue.name)):
if np.linalg.norm(coor-target_residue.coor[idx])<2.16*2+0.2:
flag=1
break
if flag == 0:
continue
grid = np.dot(coor,self.cartesian_to_grid).astype(int) - np.asarray(self.xmap.offset) # (i,j,k)
if element == "H":
continue
if charge == '':
ed_radius = self.calculate_density_radius(element, self.options.resolution,int(residue.b[ind]))
else:
ed_radius = self.calculate_density_radius(element, self.options.resolution,int(residue.b[ind]),charge)
box = (np.ceil(ed_radius*2/self.xmap.voxelspacing)).astype(int)
# Iterate over all grid points in the box and calculate their ownership
for i in range(grid[2]-box[2],grid[2]+box[2]):
for j in range(grid[1]-box[1],grid[1]+box[1]):
for k in range(grid[0]-box[0],grid[0]+box[0]):
try:
dist = np.linalg.norm(coor-self.Grid[i][j][k].coor)
except:
self.Grid[i][j][k]=Point(np.dot(np.asarray([k,j,i])+np.asarray(self.xmap.offset),self.grid_to_cartesian))
dist = np.linalg.norm(coor-self.Grid[i][j][k].coor)
if(dist<ed_radius):
self.Grid[i][j][k].S.append([coor,atom,element,occ,resi])
elif(dist<ed_radius*2):
self.Grid[i][j][k].D.append([coor,atom,element,occ,resi])
# Calculates the atomic radius based on the table
def calculate_density_radius(self,atom, resolution,bfactor,charge="0"):
a = int(np.floor(resolution/0.5)-1)
b = int(np.ceil(resolution/0.5)-1)
if atom not in ElectronDensityRadiusTable.keys():
atom = atom[0]+atom[1:].lower()
if charge not in ElectronDensityRadiusTable[atom].keys():
charge = charge[::-1]
if a == b:
radius = ElectronDensityRadiusTable[atom][charge][a]
else:
radius = ElectronDensityRadiusTable[atom][charge][a]+(ElectronDensityRadiusTable[atom][charge][b]-ElectronDensityRadiusTable[atom][charge][a])*(resolution - ResolutionBins[a])/(ResolutionBins[b] - ResolutionBins[a])
return np.asarray(radius)
def ownership(self, p, dist, ed_radius,S,D,I):
if (dist/ed_radius >= 2.0): # Grid point p is too far from the atom...
o=0.0
elif (dist/ed_radius >= 1.0): # Grid point p is in d(atom)
if len(S)> 0: # Another atom owns the grid point
o=0.0
else:
if len(D)==1: # No other atom owns the grid point, target atom is the only atom in D.
o=1.0
else: # Ownership of the atom is adjusted by the contribution of all atoms in D.
o = 1 - dist/sum([ np.linalg.norm(p-atom[0]) for atom in D ])
else:
if len(I)==1: # Target atom is the only atom that owns the grid point.
o=1.0
else: # Ownership of the atom is adjusted by the contribution of other atoms that own the point.
o = 1 - dist/sum([ np.linalg.norm(p-atom[0]) for atom in I ])
return o
def print_density(self,contour=1.0):
for i in range(0,len(self.xmap.array)):
for j in range(0,len(self.xmap.array[i])):
for k in range(0,len(self.xmap.array[i][j])):
if(self.xmap.array[i][j][k] - self.mean >contour*self.sigma):
coor = np.dot(np.asarray([k,j,i])+np.asarray(self.xmap.offset),self.grid_to_cartesian)
print("HETATM {0:4d} H HOH A {0:3d} {1:8.3f}{2:8.3f}{3:8.3f} 1.00 37.00 H".format(1,coor[0],coor[1],coor[2]))
def print_stats(self):
# Note that values of the offset are based on C,R,S - these are not always ordered like x,y,z
offset=self.xmap.offset
voxelspacing=self.xmap.voxelspacing # These ARE ordered (x,y,z)
print("Unit cell shape:", self.xmap.unit_cell.shape) # These are ordered (z,y,x)
print("Unit cell a,b,c: {0:.2f} {1:.2f} {2:.2f}".format(self.xmap.unit_cell.a, self.xmap.unit_cell.b, self.xmap.unit_cell.c)) # These ARE ordered (x,y,z)
print("Unit cell alpha,beta,gamma: {0:.2f} {1:.2f} {2:.2f}".format(self.xmap.unit_cell.alpha,self.xmap.unit_cell.beta,self.xmap.unit_cell.gamma))
print("XMap array dimentions: ", [len(self.xmap.array),len(self.xmap.array[0]),len(self.xmap.array[0][0])]) # These are ordered (z,y,x)
abc = np.asarray([self.xmap.unit_cell.a, self.xmap.unit_cell.b, self.xmap.unit_cell.c])
print("abc/voxelspacing:",abc/self.xmap.voxelspacing)
print("Offset: ",offset)
# Returns 1 if atom_a is covalently bonded to atom_b, 0 otherwise.
def covalently_bonded(self,atom_a,atom_b):
error = 2*0.06 # two standard deviations from the largest observed standard deviation for protein bond lengths
try:
if np.linalg.norm(np.asarray(atom_a[0])-np.asarray(atom_b[0])) < float(BondLengthTable[atom_a[2]][atom_b[2]]) + error:
return 1
except:
return 0
return 0
# Find all atoms in 'set' that are not covalently bonded to 'atom_a'
def calculate_non_bonded(self,atom_a,set):
I = []
for atom_b in set:
if atom_a[4] == atom_b[4] and atom_a[3]!=atom_b[3] and atom_a[3]<1.0:
continue
if not self.covalently_bonded(atom_a,atom_b) or np.linalg.norm(np.asarray(atom_a[0])-np.asarray(atom_b[0])) <0.01:
I.append(atom_b)
return I
def calc_edia(self,atom,element,charge,coor,occ,resi,bfactor):
# Identify the closest grid point to the cartesian coordinates of the atom
grid = np.dot(coor,self.cartesian_to_grid).astype(int) - np.asarray(self.xmap.offset) # (x,y,z)
# Look up the electron density radius on the lookup table:
if charge == '':
ed_radius = self.calculate_density_radius(element, self.options.resolution,bfactor)
else:
ed_radius = self.calculate_density_radius(element, self.options.resolution,bfactor,charge)
# Update the parabolas used for Weighing
self.weighter = Weight(ed_radius)
# Define a box of grid points that inscribes the sphere of interest
box = (np.ceil(ed_radius*2/self.xmap.voxelspacing)).astype(int) # (x,y,z)
sum_pos_weights = sum_neg_weights = sum_product = sum_pos_product = sum_neg_product = 0.0
# Iterate over all grid points in the box and calculate their contribution to the EDIA score.
for i in range(grid[2]-box[2],grid[2]+box[2]): # z
for j in range(grid[1]-box[1],grid[1]+box[1]): # y
for k in range(grid[0]-box[0],grid[0]+box[0]): # x
# Identify the coordinates of grid point (k,j,i) of density self.xmap.array[i][j][k]
p = self.Grid[i][j][k].coor
#if(self.xmap.array[i][j][k] - self.mean > 1.2*self.sigma):
# print("HETATM {0:4d} H HOH A {0:3d} {1:8.3f}{2:8.3f}{3:8.3f} 1.00 37.00 H".format(1,p[0],p[1],p[2]))
#continue
dist = np.linalg.norm(coor-p)
# Calculate the distance-dependent weighting factor w
weight = self.weighter(dist)
# Calculate the ownership value o
I = self.calculate_non_bonded([coor,atom,element,occ,resi],self.Grid[i][j][k].S)
o = self.ownership(p, dist, ed_radius,self.Grid[i][j][k].S,self.Grid[i][j][k].D,I)
# Calculate the density score z(p) truncated at 1.2σs
z=min(max((self.xmap.array[i][j][k]-self.mean)/self.sigma,0.0),1.2)
#print(atom,dist,weight,o,z)
# Calculate the sums for EDIA
if weight > 0.0:
sum_pos_weights += weight
sum_pos_product += weight*o*z
else:
sum_neg_weights += weight
sum_neg_product += weight*o*z
sum_product += weight*o*z
return sum_pos_product/sum_pos_weights,sum_neg_product/sum_neg_weights,sum_product/sum_pos_weights
def calc_edia_residue(self,residue):
length={}
ediasum={}
occupancy={}
# Create arrays to store the EDIA components of the
edia = np.zeros(len(residue.name))
edia_plus = np.zeros(len(residue.name))
edia_minus = np.zeros(len(residue.name))
prev_altloc=residue.altloc[0]
# For each atom in the residue:
for ind in range(len(residue.name)):
atom,element,charge,coor,icode,record,occ = residue.name[ind],residue.e[ind],residue.charge[ind],residue.coor[ind],residue.icode[ind],residue.record[ind],residue.q[ind]
# By default, Hydrogens are excluded from the calculation!
if element == "H":
continue
# Store the values of the negative, positive, and full component in the atomic arrays:
edia_plus[ind],edia_minus[ind],edia[ind] = self.calc_edia(atom,element,charge,coor,occ,residue.resi[ind],residue.b[ind])
# Currently, we are truncating the negative values of EDIA at 0.
if edia[ind] < 0.0:
edia[ind] = 0.0
if residue.altloc[ind] not in ediasum:
ediasum[residue.altloc[ind]]=0.0
length[residue.altloc[ind]]=0.0
occupancy[residue.altloc[ind]]=residue.q[ind]
ediasum[residue.altloc[ind]]+=(edia[ind]+0.1)**(-2)
length[residue.altloc[ind]]+=1
EDIAm_Comb=0.0
for key in ediasum:
if length[key] > 0:
if key != "" and "" in ediasum:
flag=1
ediasum[key] += ediasum[""]
length[key] += length[""]
EDIAm = ( ediasum[key] / length[key] ) ** (-0.5) - 0.1
OPIA = self.calc_opia_residue(residue,edia,key)
if key != "":
EDIAm_Comb+=occupancy[key]*EDIAm
print("{0} {1} {2:.2f} {3:.2f} {4:.2f}".format(residue.resi[0],key,occupancy[key],EDIAm,OPIA))
try:
print("{0} Comb {1:.2f} {2:.2f} {3:.2f}".format(residue.resi[0],sum(occupancy.values())-occupancy[""],EDIAm_Comb,OPIA))
except:
print("{0} Comb {1:.2f} {2:.2f} {3:.2f}".format(residue.resi[0],sum(occupancy.values()),EDIAm_Comb,OPIA))
if "" in ediasum and len(list(set(ediasum.keys()))) == 1:
if length[""] > 0:
key=""
EDIAm = ( ediasum[key] / length[key] ) ** (-0.5) - 0.1
OPIA = self.calc_opia_residue(residue,edia,"")
print("{0} A 1.0 {2:.2f} {3:.2f}".format(residue.resi[0],EDIAm,OPIA))
return EDIAm,OPIA
def calc_opia_residue(self,residue,edia,key):
altloc = [ x for i,x in enumerate(residue.altloc) if x==key or x==""]
index_altloc = [ i for i,x in enumerate(residue.altloc) if x==key or x==""]
self.adj_matrix = np.zeros( ( len(altloc),len(altloc) ),dtype=int)
# Calculate adjacency matrix
for x,i in enumerate(index_altloc):
atom_a = [residue.coor[i],residue.name[i],residue.e[i]]
if edia[i] >= 0.8:
for y,j in enumerate(index_altloc):
atom_b = [residue.coor[j],residue.name[j],residue.e[j]]
if self.covalently_bonded(atom_a,atom_b):
self.adj_matrix[x][y]=1
self.adj_matrix[y][x]=1
# Initialize all vertices as not visited
self.visited = np.zeros(len(altloc),dtype=int)
# Perform DFS search to identify the connected components
label = 1
for i in range(len(altloc)):
if self.visited[i]==0:
self.DFS(i,label)
label+=1
# Calculate OPIA
coverage=0
for i in range(len(np.bincount(self.visited))):
if np.bincount(self.visited)[i] >= 2:
coverage += np.bincount(self.visited)[i]
return coverage/len(altloc)
def DFS(self,residue,label):
if self.visited[residue] != 0:
return
else:
self.visited[residue] = label
for i in range(len(self.adj_matrix)):
if self.adj_matrix[residue][i]:
self.DFS(i,label)
class ediaResidue(_BaseEDIA):
def __init__(self, residue, structure, xmap, options):
super().__init__(residue, structure, xmap, options)
def __call__(self):
#self.print_stats()
#self.print_density(2.5)
EDIAm, OPIA = self.calc_edia_residue(self.residue)
class ediaProtein(_BaseEDIA):
def __init__(self, structure, xmap, options):
super().__init__(structure, structure, xmap, options)
self.EDIAm = np.zeros(len(list(self.structure.residues)))
self.OPIA = np.zeros(len(list(self.structure.residues)))
def __call__(self):
#self.print_stats()
#self.print_density(3.0)
for chain in self.structure:
idx=0
for residue in chain:
self.EDIAm[idx],self.OPIA[idx] = self.calc_edia_residue(residue)
# Calculate the values of EDIAm for the residue:
print("{0} {1:.2f} {2:.2f}".format(residue.id[0],self.EDIAm[idx],self.OPIA[idx]))
idx+=1
def parse_args():
p = argparse.ArgumentParser(description=__doc__)
p.add_argument("xmap", type=str,
help="X-ray density map in CCP4 format.")
p.add_argument("resolution", type=float,
help="Map resolution in angstrom.")
p.add_argument("structure", type=str,
help="PDB-file containing structure.")
p.add_argument('--selection', default=None, type=str,
help="Chain, residue id, and optionally insertion code for residue in structure, e.g. A,105, or A,105:A.")
p.add_argument("-d", "--directory", type=os.path.abspath, default='.', metavar="<dir>",
help="Directory to store results.")
p.add_argument("-v", "--verbose", action="store_true",
help="Be verbose.")
return p.parse_args()
""" Main function """
def main():
args = parse_args()
""" Create the output directory provided by the user: """
try:
os.makedirs(args.directory)
except OSError: # If directory already exists...
pass
time0 = time.time() # Useful variable for profiling run times.
""" Processing input structure and map """
# Read structure in:
structure = Structure.fromfile(args.structure)
# This line would ensure that we only select the '' altlocs or the 'A' altlocs.
structure = structure.extract('altloc', ('', 'A','B','C','D','E'))
if args.selection is not None:
chainid, resi = args.selection.split(',')
# Select all residue conformers
chain = structure[chainid]
for res in chain:
if res.resi[0] == int(resi):
residue = res
break
# Prepare X-ray map
xmap = XMap.fromfile(args.xmap)
options = ediaOptions()
options.apply_command_args(args)
if args.selection is None:
edia = ediaProtein(structure, xmap, options)
else:
edia = ediaResidue(residue,structure,xmap,options)
edia()
""" Profiling run time: """
passed = time.time() - time0
# print(f"Time passed: {passed}s")
| [
"logging.getLogger",
"numpy.ceil",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.asarray",
"numpy.zeros_like",
"numpy.floor",
"numpy.dot",
"numpy.linalg.inv",
"numpy.bincount",
"numpy.linalg.norm",
"numpy.transpose",
"time.time"
] | [((257, 284), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (274, 284), False, 'import logging\n'), ((17936, 17980), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (17959, 17980), False, 'import argparse\n'), ((18940, 18951), 'time.time', 'time.time', ([], {}), '()\n', (18949, 18951), False, 'import time\n'), ((2937, 2970), 'numpy.linalg.norm', 'np.linalg.norm', (['xmap.voxelspacing'], {}), '(xmap.voxelspacing)\n', (2951, 2970), True, 'import numpy as np\n'), ((2988, 3009), 'numpy.ceil', 'np.ceil', (['(self.d / 0.7)'], {}), '(self.d / 0.7)\n', (2995, 3009), True, 'import numpy as np\n'), ((3022, 3108), 'numpy.asarray', 'np.asarray', (['[self.xmap.unit_cell.a, self.xmap.unit_cell.b, self.xmap.unit_cell.c]'], {}), '([self.xmap.unit_cell.a, self.xmap.unit_cell.b, self.xmap.\n unit_cell.c])\n', (3032, 3108), True, 'import numpy as np\n'), ((3137, 3214), 'numpy.transpose', 'np.transpose', (['(self.xmap.unit_cell.frac_to_orth / abc * self.xmap.voxelspacing)'], {}), '(self.xmap.unit_cell.frac_to_orth / abc * self.xmap.voxelspacing)\n', (3149, 3214), True, 'import numpy as np\n'), ((3254, 3291), 'numpy.linalg.inv', 'np.linalg.inv', (['self.grid_to_cartesian'], {}), '(self.grid_to_cartesian)\n', (3267, 3291), True, 'import numpy as np\n'), ((3312, 3351), 'numpy.zeros_like', 'np.zeros_like', (['xmap.array'], {'dtype': 'object'}), '(xmap.array, dtype=object)\n', (3325, 3351), True, 'import numpy as np\n'), ((6509, 6527), 'numpy.asarray', 'np.asarray', (['radius'], {}), '(radius)\n', (6519, 6527), True, 'import numpy as np\n'), ((8822, 8908), 'numpy.asarray', 'np.asarray', (['[self.xmap.unit_cell.a, self.xmap.unit_cell.b, self.xmap.unit_cell.c]'], {}), '([self.xmap.unit_cell.a, self.xmap.unit_cell.b, self.xmap.\n unit_cell.c])\n', (8832, 8908), True, 'import numpy as np\n'), ((18833, 18860), 'os.makedirs', 'os.makedirs', (['args.directory'], {}), '(args.directory)\n', (18844, 18860), False, 'import os\n'), ((19888, 19899), 'time.time', 'time.time', ([], {}), '()\n', (19897, 19899), False, 'import time\n'), ((10123, 10151), 'numpy.asarray', 'np.asarray', (['self.xmap.offset'], {}), '(self.xmap.offset)\n', (10133, 10151), True, 'import numpy as np\n'), ((5894, 5920), 'numpy.floor', 'np.floor', (['(resolution / 0.5)'], {}), '(resolution / 0.5)\n', (5902, 5920), True, 'import numpy as np\n'), ((5938, 5963), 'numpy.ceil', 'np.ceil', (['(resolution / 0.5)'], {}), '(resolution / 0.5)\n', (5945, 5963), True, 'import numpy as np\n'), ((10651, 10698), 'numpy.ceil', 'np.ceil', (['(ed_radius * 2 / self.xmap.voxelspacing)'], {}), '(ed_radius * 2 / self.xmap.voxelspacing)\n', (10658, 10698), True, 'import numpy as np\n'), ((16463, 16488), 'numpy.bincount', 'np.bincount', (['self.visited'], {}), '(self.visited)\n', (16474, 16488), True, 'import numpy as np\n'), ((10073, 10109), 'numpy.dot', 'np.dot', (['coor', 'self.cartesian_to_grid'], {}), '(coor, self.cartesian_to_grid)\n', (10079, 10109), True, 'import numpy as np\n'), ((11541, 11565), 'numpy.linalg.norm', 'np.linalg.norm', (['(coor - p)'], {}), '(coor - p)\n', (11555, 11565), True, 'import numpy as np\n'), ((16507, 16532), 'numpy.bincount', 'np.bincount', (['self.visited'], {}), '(self.visited)\n', (16518, 16532), True, 'import numpy as np\n'), ((16570, 16595), 'numpy.bincount', 'np.bincount', (['self.visited'], {}), '(self.visited)\n', (16581, 16595), True, 'import numpy as np\n'), ((4282, 4310), 'numpy.asarray', 'np.asarray', (['self.xmap.offset'], {}), '(self.xmap.offset)\n', (4292, 4310), True, 'import numpy as np\n'), ((9280, 9301), 'numpy.asarray', 'np.asarray', (['atom_a[0]'], {}), '(atom_a[0])\n', (9290, 9301), True, 'import numpy as np\n'), ((9302, 9323), 'numpy.asarray', 'np.asarray', (['atom_b[0]'], {}), '(atom_b[0])\n', (9312, 9323), True, 'import numpy as np\n'), ((4718, 4765), 'numpy.ceil', 'np.ceil', (['(ed_radius * 2 / self.xmap.voxelspacing)'], {}), '(ed_radius * 2 / self.xmap.voxelspacing)\n', (4725, 4765), True, 'import numpy as np\n'), ((9805, 9826), 'numpy.asarray', 'np.asarray', (['atom_a[0]'], {}), '(atom_a[0])\n', (9815, 9826), True, 'import numpy as np\n'), ((9827, 9848), 'numpy.asarray', 'np.asarray', (['atom_b[0]'], {}), '(atom_b[0])\n', (9837, 9848), True, 'import numpy as np\n'), ((4012, 4059), 'numpy.linalg.norm', 'np.linalg.norm', (['(coor - target_residue.coor[idx])'], {}), '(coor - target_residue.coor[idx])\n', (4026, 4059), True, 'import numpy as np\n'), ((4232, 4268), 'numpy.dot', 'np.dot', (['coor', 'self.cartesian_to_grid'], {}), '(coor, self.cartesian_to_grid)\n', (4238, 4268), True, 'import numpy as np\n'), ((7801, 7822), 'numpy.asarray', 'np.asarray', (['[k, j, i]'], {}), '([k, j, i])\n', (7811, 7822), True, 'import numpy as np\n'), ((7821, 7849), 'numpy.asarray', 'np.asarray', (['self.xmap.offset'], {}), '(self.xmap.offset)\n', (7831, 7849), True, 'import numpy as np\n'), ((5149, 5195), 'numpy.linalg.norm', 'np.linalg.norm', (['(coor - self.Grid[i][j][k].coor)'], {}), '(coor - self.Grid[i][j][k].coor)\n', (5163, 5195), True, 'import numpy as np\n'), ((7414, 7441), 'numpy.linalg.norm', 'np.linalg.norm', (['(p - atom[0])'], {}), '(p - atom[0])\n', (7428, 7441), True, 'import numpy as np\n'), ((5413, 5459), 'numpy.linalg.norm', 'np.linalg.norm', (['(coor - self.Grid[i][j][k].coor)'], {}), '(coor - self.Grid[i][j][k].coor)\n', (5427, 5459), True, 'import numpy as np\n'), ((7104, 7131), 'numpy.linalg.norm', 'np.linalg.norm', (['(p - atom[0])'], {}), '(p - atom[0])\n', (7118, 7131), True, 'import numpy as np\n'), ((5298, 5319), 'numpy.asarray', 'np.asarray', (['[k, j, i]'], {}), '([k, j, i])\n', (5308, 5319), True, 'import numpy as np\n'), ((5318, 5346), 'numpy.asarray', 'np.asarray', (['self.xmap.offset'], {}), '(self.xmap.offset)\n', (5328, 5346), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import emcee
paramnames = ["Offset days", "Init patients", "Infection rate", "Confirmed prob",
"Recovery rate", "Infect delay mean", "Infect delay std",
"Confirmed delay mean", "Confirmed delay std",
"Days to recover mean", "Days to recover std",
"Days to deceased mean", "Days to deceased std"]
idx_to_show = [1, 2, 3, 4, 5, 7, 9, 11]
filter_dict = {
"high_recovery_rate": lambda chain: chain[:,4] > 0.94,
"low_recovery_rate": lambda chain: chain[:,4] < 0.90,
"low_confirmation": lambda chain: chain[:,3] < 0.65,
"high_confirmation": lambda chain: chain[:,3] > 0.8,
}
# choosing which filter to use
filters = [
# "high_recovery_rate",
# "low_recovery_rate",
# "low_confirmation",
"high_confirmation",
]
bounds = np.array([
[0.0, 10.0], # offset_days: offset days to the back
[2.0, 100.0], # n0: the initial number of patient at day 0
[1.0, 2.5], # r0: the infection rate
[0.0, 1.0], # confirmed_prob1: the proportion of infected people that is confirmed
[0.0, 1.0], # recovery_rate
# day-related variables
[1.0, 14.0], # infectious_delay_mean: incubation period, where the patient is not infectious
[1.0, 10.0], # infectious_delay_std
[1.0, 10.0], # confirmed_delay_mean: how many days since infected is confirmed (if it will be)
[1.0, 10.0], # confirmed_delay_std
[1.0, 20.0], # days_to_recover_mean: how many days since infected to be recovered (if will be confirmed)
[1.0, 10.0], # days_to_recover_std
[1.0, 10.0], # days_to_deceased_mean: how many days since infected to be deceased (if will be confirmed)
[1.0, 10.0], # days_to_deceased_std
]) # (nfeat, 2)
reader = emcee.backends.HDFBackend("emcee_samples.h5", read_only=True)
flatchain = reader.get_chain(flat=True)
flatchain = flatchain[:flatchain.shape[0]//2,:] # (nsamples, nfeat)
flatchain = flatchain * (bounds[:,1] - bounds[:,0]) + bounds[:,0]
# filter the data
idx = flatchain[:,0] < np.inf
for filter_k in filters:
newidx = filter_dict[filter_k](flatchain)
idx = np.logical_and(idx, newidx)
fchain = flatchain[idx,:]
# print the summary
print("Data collected: %d" % flatchain.shape[0])
print("Filtered data: %d" % fchain.shape[0])
for i in range(len(paramnames)):
print("%25s: (median) %.3e (std) %.3e" % (paramnames[i], np.median(fchain[:,i]), np.std(fchain[:,i])))
nrows = int(np.sqrt(len(idx_to_show)))
ncols = int(np.ceil(len(idx_to_show)*1.0 / nrows))
for i in range(len(idx_to_show)):
idx = idx_to_show[i]
plt.subplot(nrows, ncols, i+1)
plt.hist(fchain[:,idx])
plt.xlabel(paramnames[idx])
plt.show()
| [
"numpy.median",
"matplotlib.pyplot.hist",
"numpy.logical_and",
"matplotlib.pyplot.xlabel",
"numpy.array",
"emcee.backends.HDFBackend",
"numpy.std",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((813, 1000), 'numpy.array', 'np.array', (['[[0.0, 10.0], [2.0, 100.0], [1.0, 2.5], [0.0, 1.0], [0.0, 1.0], [1.0, 14.0],\n [1.0, 10.0], [1.0, 10.0], [1.0, 10.0], [1.0, 20.0], [1.0, 10.0], [1.0, \n 10.0], [1.0, 10.0]]'], {}), '([[0.0, 10.0], [2.0, 100.0], [1.0, 2.5], [0.0, 1.0], [0.0, 1.0], [\n 1.0, 14.0], [1.0, 10.0], [1.0, 10.0], [1.0, 10.0], [1.0, 20.0], [1.0, \n 10.0], [1.0, 10.0], [1.0, 10.0]])\n', (821, 1000), True, 'import numpy as np\n'), ((1729, 1790), 'emcee.backends.HDFBackend', 'emcee.backends.HDFBackend', (['"""emcee_samples.h5"""'], {'read_only': '(True)'}), "('emcee_samples.h5', read_only=True)\n", (1754, 1790), False, 'import emcee\n'), ((2650, 2660), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2658, 2660), True, 'import matplotlib.pyplot as plt\n'), ((2095, 2122), 'numpy.logical_and', 'np.logical_and', (['idx', 'newidx'], {}), '(idx, newidx)\n', (2109, 2122), True, 'import numpy as np\n'), ((2559, 2591), 'matplotlib.pyplot.subplot', 'plt.subplot', (['nrows', 'ncols', '(i + 1)'], {}), '(nrows, ncols, i + 1)\n', (2570, 2591), True, 'import matplotlib.pyplot as plt\n'), ((2594, 2618), 'matplotlib.pyplot.hist', 'plt.hist', (['fchain[:, idx]'], {}), '(fchain[:, idx])\n', (2602, 2618), True, 'import matplotlib.pyplot as plt\n'), ((2622, 2649), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['paramnames[idx]'], {}), '(paramnames[idx])\n', (2632, 2649), True, 'import matplotlib.pyplot as plt\n'), ((2359, 2382), 'numpy.median', 'np.median', (['fchain[:, i]'], {}), '(fchain[:, i])\n', (2368, 2382), True, 'import numpy as np\n'), ((2383, 2403), 'numpy.std', 'np.std', (['fchain[:, i]'], {}), '(fchain[:, i])\n', (2389, 2403), True, 'import numpy as np\n')] |
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from random import randint
import numpy as np
import cv2
from PIL import Image
import random
###################################################################
# random mask generation
###################################################################
def random_regular_mask(img):
"""Generates a random regular hole"""
mask = torch.ones_like(img)
s = img.size()
N_mask = random.randint(1, 5)
limx = s[1] - s[1] / (N_mask + 1)
limy = s[2] - s[2] / (N_mask + 1)
for _ in range(N_mask):
x = random.randint(0, int(limx))
y = random.randint(0, int(limy))
range_x = x + random.randint(int(s[1] / (N_mask + 7)), int(s[1] - x))
range_y = y + random.randint(int(s[2] / (N_mask + 7)), int(s[2] - y))
mask[:, int(x):int(range_x), int(y):int(range_y)] = 0
return mask
def center_mask(img):
"""Generates a center hole with 1/4*W and 1/4*H"""
mask = torch.ones_like(img)
size = img.size()
x = int(size[1] / 4)
y = int(size[2] / 4)
range_x = int(size[1] * 3 / 4)
range_y = int(size[2] * 3 / 4)
mask[:, x:range_x, y:range_y] = 0
return mask
def random_irregular_mask(img):
"""Generates a random irregular mask with lines, circles and elipses"""
transform = transforms.Compose([transforms.ToTensor()])
mask = torch.ones_like(img)
size = img.size()
img = np.zeros((size[1], size[2], 1), np.uint8)
# Set size scale
max_width = 20
if size[1] < 64 or size[2] < 64:
raise Exception("Width and Height of mask must be at least 64!")
number = random.randint(16, 64)
for _ in range(number):
model = random.random()
if model < 0.6:
# Draw random lines
x1, x2 = randint(1, size[1]), randint(1, size[1])
y1, y2 = randint(1, size[2]), randint(1, size[2])
thickness = randint(4, max_width)
cv2.line(img, (x1, y1), (x2, y2), (1, 1, 1), thickness)
elif model > 0.6 and model < 0.8:
# Draw random circles
x1, y1 = randint(1, size[1]), randint(1, size[2])
radius = randint(4, max_width)
cv2.circle(img, (x1, y1), radius, (1, 1, 1), -1)
elif model > 0.8:
# Draw random ellipses
x1, y1 = randint(1, size[1]), randint(1, size[2])
s1, s2 = randint(1, size[1]), randint(1, size[2])
a1, a2, a3 = randint(3, 180), randint(3, 180), randint(3, 180)
thickness = randint(4, max_width)
cv2.ellipse(img, (x1, y1), (s1, s2), a1, a2, a3, (1, 1, 1), thickness)
img = img.reshape(size[2], size[1])
img = Image.fromarray(img*255)
img_mask = transform(img)
for j in range(size[0]):
mask[j, :, :] = img_mask < 1
return mask
# determine if pixel has color
def has_color(pixel):
diff1 = abs(pixel[0] - pixel[1])
# print(diff1)
diff2 = abs(pixel[1] - pixel[2])
# print(diff2)
diff3 = abs(pixel[2] - pixel[0])
# print(diff3)
# if the difference between r, b, and g is more than 0,
# the pixel has color
return max(diff1, diff2, diff3) > 0
# mask all pixels with color
def custom_helper(o, i, s):
clr = False
for x in range(s[0]):
for y in range(s[1]):
rgb = o[x][y]
if has_color(rgb):
clr = True
i[x][y] = [1,1,1]
# assert clr
print(clr)
return i
# mask annotations on the CXR
def annotation_mask(img):
original = img
transform = transforms.Compose([transforms.ToTensor()])
mask = torch.ones_like(img)
size = img.size()
img = np.zeros((size[1], size[2], 3), np.uint8)
original = torch.moveaxis(original, 1, 0)
original = torch.moveaxis(original, 2, 1)
img = custom_helper(original, img, original.size())
img = Image.fromarray((255*img).astype(np.uint8))
img_mask = transform(img)
for j in range(size[0]):
for k in range(size[1]):
for l in range(size[2]):
mask[j][k][l] = img_mask[j][k][l] < 1
return mask
###################################################################
# multi scale for image generation
###################################################################
def scale_img(img, size):
print(img.size())
scaled_img = F.interpolate(img, size=size, mode='bilinear', align_corners=True)
return scaled_img
def scale_pyramid(img, num_scales):
scaled_imgs = [img]
s = img.size()
h = s[2]
w = s[3]
for i in range(1, num_scales):
ratio = 2**i
nh = h // ratio
nw = w // ratio
scaled_img = scale_img(img, size=[nh, nw])
scaled_imgs.append(scaled_img)
scaled_imgs.reverse()
return scaled_imgs
| [
"torch.ones_like",
"PIL.Image.fromarray",
"cv2.line",
"cv2.ellipse",
"numpy.zeros",
"cv2.circle",
"torch.nn.functional.interpolate",
"torch.moveaxis",
"random.random",
"torchvision.transforms.ToTensor",
"random.randint"
] | [((429, 449), 'torch.ones_like', 'torch.ones_like', (['img'], {}), '(img)\n', (444, 449), False, 'import torch\n'), ((482, 502), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (496, 502), False, 'import random\n'), ((1013, 1033), 'torch.ones_like', 'torch.ones_like', (['img'], {}), '(img)\n', (1028, 1033), False, 'import torch\n'), ((1412, 1432), 'torch.ones_like', 'torch.ones_like', (['img'], {}), '(img)\n', (1427, 1432), False, 'import torch\n'), ((1465, 1506), 'numpy.zeros', 'np.zeros', (['(size[1], size[2], 1)', 'np.uint8'], {}), '((size[1], size[2], 1), np.uint8)\n', (1473, 1506), True, 'import numpy as np\n'), ((1672, 1694), 'random.randint', 'random.randint', (['(16)', '(64)'], {}), '(16, 64)\n', (1686, 1694), False, 'import random\n'), ((2733, 2759), 'PIL.Image.fromarray', 'Image.fromarray', (['(img * 255)'], {}), '(img * 255)\n', (2748, 2759), False, 'from PIL import Image\n'), ((3672, 3692), 'torch.ones_like', 'torch.ones_like', (['img'], {}), '(img)\n', (3687, 3692), False, 'import torch\n'), ((3725, 3766), 'numpy.zeros', 'np.zeros', (['(size[1], size[2], 3)', 'np.uint8'], {}), '((size[1], size[2], 3), np.uint8)\n', (3733, 3766), True, 'import numpy as np\n'), ((3783, 3813), 'torch.moveaxis', 'torch.moveaxis', (['original', '(1)', '(0)'], {}), '(original, 1, 0)\n', (3797, 3813), False, 'import torch\n'), ((3829, 3859), 'torch.moveaxis', 'torch.moveaxis', (['original', '(2)', '(1)'], {}), '(original, 2, 1)\n', (3843, 3859), False, 'import torch\n'), ((4416, 4482), 'torch.nn.functional.interpolate', 'F.interpolate', (['img'], {'size': 'size', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(img, size=size, mode='bilinear', align_corners=True)\n", (4429, 4482), True, 'import torch.nn.functional as F\n'), ((1739, 1754), 'random.random', 'random.random', ([], {}), '()\n', (1752, 1754), False, 'import random\n'), ((1377, 1398), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1396, 1398), True, 'import torchvision.transforms as transforms\n'), ((1959, 1980), 'random.randint', 'randint', (['(4)', 'max_width'], {}), '(4, max_width)\n', (1966, 1980), False, 'from random import randint\n'), ((1993, 2048), 'cv2.line', 'cv2.line', (['img', '(x1, y1)', '(x2, y2)', '(1, 1, 1)', 'thickness'], {}), '(img, (x1, y1), (x2, y2), (1, 1, 1), thickness)\n', (2001, 2048), False, 'import cv2\n'), ((3637, 3658), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3656, 3658), True, 'import torchvision.transforms as transforms\n'), ((1832, 1851), 'random.randint', 'randint', (['(1)', 'size[1]'], {}), '(1, size[1])\n', (1839, 1851), False, 'from random import randint\n'), ((1853, 1872), 'random.randint', 'randint', (['(1)', 'size[1]'], {}), '(1, size[1])\n', (1860, 1872), False, 'from random import randint\n'), ((1894, 1913), 'random.randint', 'randint', (['(1)', 'size[2]'], {}), '(1, size[2])\n', (1901, 1913), False, 'from random import randint\n'), ((1915, 1934), 'random.randint', 'randint', (['(1)', 'size[2]'], {}), '(1, size[2])\n', (1922, 1934), False, 'from random import randint\n'), ((2209, 2230), 'random.randint', 'randint', (['(4)', 'max_width'], {}), '(4, max_width)\n', (2216, 2230), False, 'from random import randint\n'), ((2243, 2291), 'cv2.circle', 'cv2.circle', (['img', '(x1, y1)', 'radius', '(1, 1, 1)', '(-1)'], {}), '(img, (x1, y1), radius, (1, 1, 1), -1)\n', (2253, 2291), False, 'import cv2\n'), ((2147, 2166), 'random.randint', 'randint', (['(1)', 'size[1]'], {}), '(1, size[1])\n', (2154, 2166), False, 'from random import randint\n'), ((2168, 2187), 'random.randint', 'randint', (['(1)', 'size[2]'], {}), '(1, size[2])\n', (2175, 2187), False, 'from random import randint\n'), ((2577, 2598), 'random.randint', 'randint', (['(4)', 'max_width'], {}), '(4, max_width)\n', (2584, 2598), False, 'from random import randint\n'), ((2611, 2681), 'cv2.ellipse', 'cv2.ellipse', (['img', '(x1, y1)', '(s1, s2)', 'a1', 'a2', 'a3', '(1, 1, 1)', 'thickness'], {}), '(img, (x1, y1), (s1, s2), a1, a2, a3, (1, 1, 1), thickness)\n', (2622, 2681), False, 'import cv2\n'), ((2375, 2394), 'random.randint', 'randint', (['(1)', 'size[1]'], {}), '(1, size[1])\n', (2382, 2394), False, 'from random import randint\n'), ((2396, 2415), 'random.randint', 'randint', (['(1)', 'size[2]'], {}), '(1, size[2])\n', (2403, 2415), False, 'from random import randint\n'), ((2437, 2456), 'random.randint', 'randint', (['(1)', 'size[1]'], {}), '(1, size[1])\n', (2444, 2456), False, 'from random import randint\n'), ((2458, 2477), 'random.randint', 'randint', (['(1)', 'size[2]'], {}), '(1, size[2])\n', (2465, 2477), False, 'from random import randint\n'), ((2503, 2518), 'random.randint', 'randint', (['(3)', '(180)'], {}), '(3, 180)\n', (2510, 2518), False, 'from random import randint\n'), ((2520, 2535), 'random.randint', 'randint', (['(3)', '(180)'], {}), '(3, 180)\n', (2527, 2535), False, 'from random import randint\n'), ((2537, 2552), 'random.randint', 'randint', (['(3)', '(180)'], {}), '(3, 180)\n', (2544, 2552), False, 'from random import randint\n')] |
import cv2
import numpy as np
# erosion
# used for noise removal, only kernels with all one values
# result in one.
img = cv2.imread('j.png',0)
kernel = np.ones((5,5),np.uint8)
erosion = cv2.erode(img, kernel,viterations=1)
cv2.imshow('img', img)
cv2.imshow('erode', erosion)
| [
"cv2.erode",
"cv2.imread",
"numpy.ones",
"cv2.imshow"
] | [((131, 153), 'cv2.imread', 'cv2.imread', (['"""j.png"""', '(0)'], {}), "('j.png', 0)\n", (141, 153), False, 'import cv2\n'), ((163, 188), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (170, 188), True, 'import numpy as np\n'), ((198, 235), 'cv2.erode', 'cv2.erode', (['img', 'kernel'], {'viterations': '(1)'}), '(img, kernel, viterations=1)\n', (207, 235), False, 'import cv2\n'), ((238, 260), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (248, 260), False, 'import cv2\n'), ((262, 290), 'cv2.imshow', 'cv2.imshow', (['"""erode"""', 'erosion'], {}), "('erode', erosion)\n", (272, 290), False, 'import cv2\n')] |
#########
#
# Copyright (c) 2005 <NAME>
#
# This file is part of the vignette-removal library.
#
# Vignette-removal is free software; you can redistribute it and/or modify
# it under the terms of the X11 Software License (see the LICENSE file
# for details).
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the X11
# License for more details.
#
#########
from types import *
from math import *
# from numarray import *
# from numarray.linear_algebra import *
import numpy as np
import numpy.linalg as linalg
from functools import reduce
class CylCam:
def __init__(self, size, thetarange, yrange):
self.size = size
offset = np.array([[1,0,-thetarange[0]],
[0,1,-yrange[0]],
[0,0,1]])
scale = np.array([[size[0]/(thetarange[1]-thetarange[0]),0,0],
[0,size[1]/(yrange[1]-yrange[0]),0],
[0,0,1]])
self.proj = np.dot(scale,offset)
self.invproj = linalg.inv(self.proj)
def worldToCamera(self,p):
theta = np.arctan2(p[1],p[2])
y = p[0]/np.sqrt(p[1]*p[1]+p[2]*p[2])
c = np.dot(self.proj,[theta,y,1])
return np.array([c[0],c[1]])
def cameraToWorld(self,p):
if len(p) == 2:
tp0 = type(p[0])
if isinstance(p[0],(float,int)):
p = np.array([p[0],p[1],1.0])
elif isinstance(p[0],np.ndarray):
#print p.shape, p[0].shape, p[1].shape
p = np.array([p[0],p[1],np.ones(p[0].shape)])
else:
p = np.array(p)
ty = np.dot(self.invproj,p)
theta = ty[0]/ty[2]
y = ty[1]/ty[2]
return np.array([y,np.sin(theta),np.cos(theta)])
def cameraToCamera(self,ocam,p):
if isinstance(p,list):
return list(map(lambda x,ocam=ocam:self.cameraToCamera(ocam,x),p))
else:
return ocam.worldToCamera(self.cameraToWorld(p))
def yExtent(cyl,p0,p1):
p0s = np.array([p0[1],p0[0],p0[2]])
p1s = np.array([p1[1],p1[0],p1[2]])
d = p1s-p0s
denom = -d[0]*d[1]*p0s[0]+d[0]*d[0]*p0s[1]+d[2]*(d[2]*p0s[1]-d[1]*p0s[2])
if denom == 0:
pc = p0
else:
num = -p0s[1]*(d[0]*p0s[0]+d[2]*p0s[2])+d[1]*(p0s[0]*p0s[0]*p0s[1]*p0s[1])
t = num/denom
t = min(max(0,t),1)
#print 't',t
pc = p0+(p1-p0)*t
#pc = (p1+p0)/2
pts = map(lambda x,c=cyl:c.worldToCamera(x),[p0,pc,p1])
#print pts
ys = map(lambda x:x[1],pts)
ys = list(ys)
ymin = reduce(min,ys)
ymax = reduce(max,ys)
#print ymin, ymax
return ymin,ymax
def cylExtent(cameras):
cyl = CylCam((2*pi,1),(0,2*pi),(0,1))
w,h = cameras[0].size
corners = [(0,0),(0,h),(w,h),(w,0)]
ccorners = map(lambda x,cyl=cyl,cnr=corners: x.cameraToCamera(cyl,cnr),
cameras)
ccorners_result = list(ccorners)
ccorners = reduce(lambda a,b:a+b,ccorners_result)
ts = map(lambda x:x[0],ccorners)
ts = list(ts)
tmin = reduce(min,ts)
tmax = reduce(max,ts)
ymin = 10e6
ymax = -10e6
for camera in cameras:
wcorners = map(lambda x,c=camera:c.cameraToWorld(x),corners)
wcorners = list(wcorners)
for i in range(len(wcorners)):
p0 = np.array(wcorners[i])
p1 = np.array(wcorners[(i+1)%4])
#print 'edge',i
ymini,ymaxi = yExtent(cyl,p0,p1)
ymin = min(ymini,ymin)
ymax = max(ymaxi,ymax)
return (tmin, tmax, ymin, ymax)
def makeCylCam(cameras,scale=1):
tmin, tmax, ymin, ymax = cylExtent(cameras)
print (tmin, tmax, ymin, ymax)
scale *= cameras[0].focal
# return CylCam((800,300),(-pi/4,pi/4),(0,1))
size = (int(scale*(tmax-tmin)),int(scale*(ymax-ymin)))
print ('making cylcam of size', size)
return CylCam(size,(tmin,tmax),(ymin,ymax))
| [
"numpy.sqrt",
"numpy.ones",
"functools.reduce",
"numpy.array",
"numpy.dot",
"numpy.linalg.inv",
"numpy.arctan2",
"numpy.cos",
"numpy.sin"
] | [((2128, 2159), 'numpy.array', 'np.array', (['[p0[1], p0[0], p0[2]]'], {}), '([p0[1], p0[0], p0[2]])\n', (2136, 2159), True, 'import numpy as np\n'), ((2168, 2199), 'numpy.array', 'np.array', (['[p1[1], p1[0], p1[2]]'], {}), '([p1[1], p1[0], p1[2]])\n', (2176, 2199), True, 'import numpy as np\n'), ((2674, 2689), 'functools.reduce', 'reduce', (['min', 'ys'], {}), '(min, ys)\n', (2680, 2689), False, 'from functools import reduce\n'), ((2700, 2715), 'functools.reduce', 'reduce', (['max', 'ys'], {}), '(max, ys)\n', (2706, 2715), False, 'from functools import reduce\n'), ((3046, 3089), 'functools.reduce', 'reduce', (['(lambda a, b: a + b)', 'ccorners_result'], {}), '(lambda a, b: a + b, ccorners_result)\n', (3052, 3089), False, 'from functools import reduce\n'), ((3152, 3167), 'functools.reduce', 'reduce', (['min', 'ts'], {}), '(min, ts)\n', (3158, 3167), False, 'from functools import reduce\n'), ((3178, 3193), 'functools.reduce', 'reduce', (['max', 'ts'], {}), '(max, ts)\n', (3184, 3193), False, 'from functools import reduce\n'), ((790, 855), 'numpy.array', 'np.array', (['[[1, 0, -thetarange[0]], [0, 1, -yrange[0]], [0, 0, 1]]'], {}), '([[1, 0, -thetarange[0]], [0, 1, -yrange[0]], [0, 0, 1]])\n', (798, 855), True, 'import numpy as np\n'), ((914, 1033), 'numpy.array', 'np.array', (['[[size[0] / (thetarange[1] - thetarange[0]), 0, 0], [0, size[1] / (yrange[1\n ] - yrange[0]), 0], [0, 0, 1]]'], {}), '([[size[0] / (thetarange[1] - thetarange[0]), 0, 0], [0, size[1] /\n (yrange[1] - yrange[0]), 0], [0, 0, 1]])\n', (922, 1033), True, 'import numpy as np\n'), ((1083, 1104), 'numpy.dot', 'np.dot', (['scale', 'offset'], {}), '(scale, offset)\n', (1089, 1104), True, 'import numpy as np\n'), ((1127, 1148), 'numpy.linalg.inv', 'linalg.inv', (['self.proj'], {}), '(self.proj)\n', (1137, 1148), True, 'import numpy.linalg as linalg\n'), ((1197, 1219), 'numpy.arctan2', 'np.arctan2', (['p[1]', 'p[2]'], {}), '(p[1], p[2])\n', (1207, 1219), True, 'import numpy as np\n'), ((1277, 1309), 'numpy.dot', 'np.dot', (['self.proj', '[theta, y, 1]'], {}), '(self.proj, [theta, y, 1])\n', (1283, 1309), True, 'import numpy as np\n'), ((1322, 1344), 'numpy.array', 'np.array', (['[c[0], c[1]]'], {}), '([c[0], c[1]])\n', (1330, 1344), True, 'import numpy as np\n'), ((1738, 1761), 'numpy.dot', 'np.dot', (['self.invproj', 'p'], {}), '(self.invproj, p)\n', (1744, 1761), True, 'import numpy as np\n'), ((1236, 1270), 'numpy.sqrt', 'np.sqrt', (['(p[1] * p[1] + p[2] * p[2])'], {}), '(p[1] * p[1] + p[2] * p[2])\n', (1243, 1270), True, 'import numpy as np\n'), ((1713, 1724), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (1721, 1724), True, 'import numpy as np\n'), ((3413, 3434), 'numpy.array', 'np.array', (['wcorners[i]'], {}), '(wcorners[i])\n', (3421, 3434), True, 'import numpy as np\n'), ((3452, 3483), 'numpy.array', 'np.array', (['wcorners[(i + 1) % 4]'], {}), '(wcorners[(i + 1) % 4])\n', (3460, 3483), True, 'import numpy as np\n'), ((1494, 1521), 'numpy.array', 'np.array', (['[p[0], p[1], 1.0]'], {}), '([p[0], p[1], 1.0])\n', (1502, 1521), True, 'import numpy as np\n'), ((1840, 1853), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1846, 1853), True, 'import numpy as np\n'), ((1854, 1867), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1860, 1867), True, 'import numpy as np\n'), ((1661, 1680), 'numpy.ones', 'np.ones', (['p[0].shape'], {}), '(p[0].shape)\n', (1668, 1680), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os, sys, json, warnings
from functools import wraps
import numpy as np
from PyQt5.QtGui import QColor
from qgis.core import (
Qgis,
QgsApplication,
QgsMeshLayer,
QgsMeshDatasetIndex,
QgsMeshUtils,
QgsProject,
QgsRasterLayer,
QgsRasterFileWriter,
QgsRasterPipe,
QgsCoordinateReferenceSystem,
QgsColorRampShader,
QgsRasterShader,
QgsSingleBandPseudoColorRenderer,
QgsRasterHistogram,
QgsErrorMessage
)
# Ignore warning function
def ignore_warnings(f):
@wraps(f)
def inner(*args, **kwargs):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore")
response = f(*args, **kwargs)
return response
return inner
# Initialize application
def initialize_qgis_application():
sys.path.append('/opt/conda/envs/mbtiles/share/qgis')
sys.path.append('/opt/conda/envs/mbtiles/share/qgis/python/plugins')
app = QgsApplication([], False)
return (app)
# Add the path to processing so we can import it next
@ignore_warnings # Ignored because we want the output of this script to be a single value, and "import processing" is noisy
def initialize_processing(app):
# import processing module
import processing
from processing.core.Processing import Processing
# Initialize Processing
Processing.initialize()
return (app, processing)
# Convert mesh layer as raster and save as a GeoTiff
def exportRaster(parameters):
# Open layer from infile
infile = parameters['INPUT_LAYER']
meshfile = infile.strip().split('/')[-1]
meshlayer = meshfile.split('.')[0]
layer = QgsMeshLayer(infile, meshlayer, 'mdal')
# Check if layer is valid
if layer.isValid() is True:
# Get parameters for processing
dataset = parameters['INPUT_GROUP']
timestep = parameters['INPUT_TIMESTEP']
mupp = parameters['MAP_UNITS_PER_PIXEL']
extent = layer.extent()
output_layer = parameters['OUTPUT_RASTER']
width = extent.width()/mupp
height = extent.height()/mupp
crs = layer.crs()
crs.createFromSrid(4326)
transform_context = QgsProject.instance().transformContext()
output_format = QgsRasterFileWriter.driverForExtension(os.path.splitext(output_layer)[1])
# Open output file for writing
rfw = QgsRasterFileWriter(output_layer)
rfw.setOutputProviderKey('gdal')
rfw.setOutputFormat(output_format)
# Create one band raster
rdp = rfw.createOneBandRaster( Qgis.Float64, width, height, extent, crs)
# Get dataset index
dataset_index = QgsMeshDatasetIndex(dataset, timestep)
# Regred mesh layer to raster
block = QgsMeshUtils.exportRasterBlock( layer, dataset_index, crs,
transform_context, mupp, extent)
# Write raster to GeoTiff file
rdp.writeBlock(block, 1)
rdp.setNoDataValue(1, block.noDataValue())
rdp.setEditable(False)
return(output_layer)
if layer.isValid() is False:
raise Exception('Invalid mesh')
# Add color and set transparency to GeoTiff
def styleRaster(filename):
# Create outfile name
outfile = "".join(filename.strip().split('.raw'))
# Open layer from filename
rasterfile = filename.strip().split('/')[-1]
rasterlayer = rasterfile.split('.')[0]
rlayer = QgsRasterLayer(filename, rasterlayer, 'gdal')
# Check if layer is valid
if rlayer.isValid() is True:
# Get layer data provider
provider = rlayer.dataProvider()
# Calculate histrogram
provider.initHistogram(QgsRasterHistogram(),1,100)
hist = provider.histogram(1)
# Get histograms stats
nbins = hist.binCount
minv = hist.minimum
maxv = hist.maximum
# Create histogram array, bin array, and histogram index
hista = np.array(hist.histogramVector)
bins = np.arange(minv, maxv, (maxv - minv)/nbins)
index = np.where(hista > 5)
# Get bottom and top color values from bin values
bottomcolor = bins[index[0][0]]
topcolor = bins[index[0][-1]]
# Calculate range value between the bottom and top color values
if bottomcolor < 0:
vrange = topcolor + bottomcolor
else:
vrange = topcolor - bottomcolor
# Calculate values for bottom middle, and top middle color values
if rasterlayer == 'maxele':
bottommiddle = vrange * 0.3333
topmiddle = vrange * 0.6667
else:
bottommiddle = vrange * 0.375
topmiddle = vrange * 0.75
# Create list of color values
valueList =[bottomcolor, bottommiddle, topmiddle, topcolor]
# Create color dictionary
if rasterlayer == 'maxele':
colDic = {'bottomcolor':'#0000ff', 'bottommiddle':'#00ffff', 'topmiddle':'#ffff00', 'topcolor':'#ff0000'}
else:
colDic = {'bottomcolor':'#000000', 'bottommiddle':'#ff0000', 'topmiddle':'#ffff00', 'topcolor':'#ffffff'}
# Create color ramp function and add colors
fnc = QgsColorRampShader()
fnc.setColorRampType(QgsColorRampShader.Interpolated)
lst = [QgsColorRampShader.ColorRampItem(valueList[0], QColor(colDic['bottomcolor'])),\
QgsColorRampShader.ColorRampItem(valueList[1], QColor(colDic['bottommiddle'])), \
QgsColorRampShader.ColorRampItem(valueList[2], QColor(colDic['topmiddle'])), \
QgsColorRampShader.ColorRampItem(valueList[3], QColor(colDic['topcolor']))]
fnc.setColorRampItemList(lst)
# Create raster shader and add color ramp function
shader = QgsRasterShader()
shader.setRasterShaderFunction(fnc)
# Create color render and set opacity
renderer = QgsSingleBandPseudoColorRenderer(provider, 1, shader)
renderer.setOpacity(0.75)
# Get output format
output_format = QgsRasterFileWriter.driverForExtension(os.path.splitext(outfile)[1])
# Open output file for writing
rfw = QgsRasterFileWriter(outfile)
rfw.setOutputProviderKey('gdal')
rfw.setOutputFormat(output_format)
# Add EPSG 4326 to layer crs
crs = QgsCoordinateReferenceSystem()
crs.createFromSrid(4326)
# Create Raster pipe and set provider and renderer
pipe = QgsRasterPipe()
pipe.set(provider.clone())
pipe.set(renderer.clone())
# Get transform context
transform_context = QgsProject.instance().transformContext()
# Write to file
rfw.writeRaster(
pipe,
provider.xSize(),
provider.ySize(),
provider.extent(),
crs,
transform_context
)
if not rlayer.isValid():
raise Exception('Invalid raster')
app = initialize_qgis_application()
app.initQgis()
app, processing = initialize_processing(app)
parameters = json.loads(sys.argv[1])
filename = exportRaster(parameters)
styleRaster(filename)
app.exitQgis()
| [
"PyQt5.QtGui.QColor",
"numpy.array",
"qgis.core.QgsMeshUtils.exportRasterBlock",
"sys.path.append",
"qgis.core.QgsRasterHistogram",
"numpy.arange",
"qgis.core.QgsMeshLayer",
"qgis.core.QgsRasterLayer",
"qgis.core.QgsRasterShader",
"numpy.where",
"qgis.core.QgsMeshDatasetIndex",
"functools.wrap... | [((7056, 7079), 'json.loads', 'json.loads', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (7066, 7079), False, 'import os, sys, json, warnings\n'), ((546, 554), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (551, 554), False, 'from functools import wraps\n'), ((835, 888), 'sys.path.append', 'sys.path.append', (['"""/opt/conda/envs/mbtiles/share/qgis"""'], {}), "('/opt/conda/envs/mbtiles/share/qgis')\n", (850, 888), False, 'import os, sys, json, warnings\n'), ((893, 961), 'sys.path.append', 'sys.path.append', (['"""/opt/conda/envs/mbtiles/share/qgis/python/plugins"""'], {}), "('/opt/conda/envs/mbtiles/share/qgis/python/plugins')\n", (908, 961), False, 'import os, sys, json, warnings\n'), ((972, 997), 'qgis.core.QgsApplication', 'QgsApplication', (['[]', '(False)'], {}), '([], False)\n', (986, 997), False, 'from qgis.core import Qgis, QgsApplication, QgsMeshLayer, QgsMeshDatasetIndex, QgsMeshUtils, QgsProject, QgsRasterLayer, QgsRasterFileWriter, QgsRasterPipe, QgsCoordinateReferenceSystem, QgsColorRampShader, QgsRasterShader, QgsSingleBandPseudoColorRenderer, QgsRasterHistogram, QgsErrorMessage\n'), ((1366, 1389), 'processing.core.Processing.Processing.initialize', 'Processing.initialize', ([], {}), '()\n', (1387, 1389), False, 'from processing.core.Processing import Processing\n'), ((1668, 1707), 'qgis.core.QgsMeshLayer', 'QgsMeshLayer', (['infile', 'meshlayer', '"""mdal"""'], {}), "(infile, meshlayer, 'mdal')\n", (1680, 1707), False, 'from qgis.core import Qgis, QgsApplication, QgsMeshLayer, QgsMeshDatasetIndex, QgsMeshUtils, QgsProject, QgsRasterLayer, QgsRasterFileWriter, QgsRasterPipe, QgsCoordinateReferenceSystem, QgsColorRampShader, QgsRasterShader, QgsSingleBandPseudoColorRenderer, QgsRasterHistogram, QgsErrorMessage\n'), ((3435, 3480), 'qgis.core.QgsRasterLayer', 'QgsRasterLayer', (['filename', 'rasterlayer', '"""gdal"""'], {}), "(filename, rasterlayer, 'gdal')\n", (3449, 3480), False, 'from qgis.core import Qgis, QgsApplication, QgsMeshLayer, QgsMeshDatasetIndex, QgsMeshUtils, QgsProject, QgsRasterLayer, QgsRasterFileWriter, QgsRasterPipe, QgsCoordinateReferenceSystem, QgsColorRampShader, QgsRasterShader, QgsSingleBandPseudoColorRenderer, QgsRasterHistogram, QgsErrorMessage\n'), ((2395, 2428), 'qgis.core.QgsRasterFileWriter', 'QgsRasterFileWriter', (['output_layer'], {}), '(output_layer)\n', (2414, 2428), False, 'from qgis.core import Qgis, QgsApplication, QgsMeshLayer, QgsMeshDatasetIndex, QgsMeshUtils, QgsProject, QgsRasterLayer, QgsRasterFileWriter, QgsRasterPipe, QgsCoordinateReferenceSystem, QgsColorRampShader, QgsRasterShader, QgsSingleBandPseudoColorRenderer, QgsRasterHistogram, QgsErrorMessage\n'), ((2683, 2721), 'qgis.core.QgsMeshDatasetIndex', 'QgsMeshDatasetIndex', (['dataset', 'timestep'], {}), '(dataset, timestep)\n', (2702, 2721), False, 'from qgis.core import Qgis, QgsApplication, QgsMeshLayer, QgsMeshDatasetIndex, QgsMeshUtils, QgsProject, QgsRasterLayer, QgsRasterFileWriter, QgsRasterPipe, QgsCoordinateReferenceSystem, QgsColorRampShader, QgsRasterShader, QgsSingleBandPseudoColorRenderer, QgsRasterHistogram, QgsErrorMessage\n'), ((2777, 2871), 'qgis.core.QgsMeshUtils.exportRasterBlock', 'QgsMeshUtils.exportRasterBlock', (['layer', 'dataset_index', 'crs', 'transform_context', 'mupp', 'extent'], {}), '(layer, dataset_index, crs, transform_context,\n mupp, extent)\n', (2807, 2871), False, 'from qgis.core import Qgis, QgsApplication, QgsMeshLayer, QgsMeshDatasetIndex, QgsMeshUtils, QgsProject, QgsRasterLayer, QgsRasterFileWriter, QgsRasterPipe, QgsCoordinateReferenceSystem, QgsColorRampShader, QgsRasterShader, QgsSingleBandPseudoColorRenderer, QgsRasterHistogram, QgsErrorMessage\n'), ((3948, 3978), 'numpy.array', 'np.array', (['hist.histogramVector'], {}), '(hist.histogramVector)\n', (3956, 3978), True, 'import numpy as np\n'), ((3994, 4038), 'numpy.arange', 'np.arange', (['minv', 'maxv', '((maxv - minv) / nbins)'], {}), '(minv, maxv, (maxv - minv) / nbins)\n', (4003, 4038), True, 'import numpy as np\n'), ((4053, 4072), 'numpy.where', 'np.where', (['(hista > 5)'], {}), '(hista > 5)\n', (4061, 4072), True, 'import numpy as np\n'), ((5197, 5217), 'qgis.core.QgsColorRampShader', 'QgsColorRampShader', ([], {}), '()\n', (5215, 5217), False, 'from qgis.core import Qgis, QgsApplication, QgsMeshLayer, QgsMeshDatasetIndex, QgsMeshUtils, QgsProject, QgsRasterLayer, QgsRasterFileWriter, QgsRasterPipe, QgsCoordinateReferenceSystem, QgsColorRampShader, QgsRasterShader, QgsSingleBandPseudoColorRenderer, QgsRasterHistogram, QgsErrorMessage\n'), ((5772, 5789), 'qgis.core.QgsRasterShader', 'QgsRasterShader', ([], {}), '()\n', (5787, 5789), False, 'from qgis.core import Qgis, QgsApplication, QgsMeshLayer, QgsMeshDatasetIndex, QgsMeshUtils, QgsProject, QgsRasterLayer, QgsRasterFileWriter, QgsRasterPipe, QgsCoordinateReferenceSystem, QgsColorRampShader, QgsRasterShader, QgsSingleBandPseudoColorRenderer, QgsRasterHistogram, QgsErrorMessage\n'), ((5900, 5953), 'qgis.core.QgsSingleBandPseudoColorRenderer', 'QgsSingleBandPseudoColorRenderer', (['provider', '(1)', 'shader'], {}), '(provider, 1, shader)\n', (5932, 5953), False, 'from qgis.core import Qgis, QgsApplication, QgsMeshLayer, QgsMeshDatasetIndex, QgsMeshUtils, QgsProject, QgsRasterLayer, QgsRasterFileWriter, QgsRasterPipe, QgsCoordinateReferenceSystem, QgsColorRampShader, QgsRasterShader, QgsSingleBandPseudoColorRenderer, QgsRasterHistogram, QgsErrorMessage\n'), ((6164, 6192), 'qgis.core.QgsRasterFileWriter', 'QgsRasterFileWriter', (['outfile'], {}), '(outfile)\n', (6183, 6192), False, 'from qgis.core import Qgis, QgsApplication, QgsMeshLayer, QgsMeshDatasetIndex, QgsMeshUtils, QgsProject, QgsRasterLayer, QgsRasterFileWriter, QgsRasterPipe, QgsCoordinateReferenceSystem, QgsColorRampShader, QgsRasterShader, QgsSingleBandPseudoColorRenderer, QgsRasterHistogram, QgsErrorMessage\n'), ((6329, 6359), 'qgis.core.QgsCoordinateReferenceSystem', 'QgsCoordinateReferenceSystem', ([], {}), '()\n', (6357, 6359), False, 'from qgis.core import Qgis, QgsApplication, QgsMeshLayer, QgsMeshDatasetIndex, QgsMeshUtils, QgsProject, QgsRasterLayer, QgsRasterFileWriter, QgsRasterPipe, QgsCoordinateReferenceSystem, QgsColorRampShader, QgsRasterShader, QgsSingleBandPseudoColorRenderer, QgsRasterHistogram, QgsErrorMessage\n'), ((6468, 6483), 'qgis.core.QgsRasterPipe', 'QgsRasterPipe', ([], {}), '()\n', (6481, 6483), False, 'from qgis.core import Qgis, QgsApplication, QgsMeshLayer, QgsMeshDatasetIndex, QgsMeshUtils, QgsProject, QgsRasterLayer, QgsRasterFileWriter, QgsRasterPipe, QgsCoordinateReferenceSystem, QgsColorRampShader, QgsRasterShader, QgsSingleBandPseudoColorRenderer, QgsRasterHistogram, QgsErrorMessage\n'), ((600, 636), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (623, 636), False, 'import os, sys, json, warnings\n'), ((655, 686), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (676, 686), False, 'import os, sys, json, warnings\n'), ((3683, 3703), 'qgis.core.QgsRasterHistogram', 'QgsRasterHistogram', ([], {}), '()\n', (3701, 3703), False, 'from qgis.core import Qgis, QgsApplication, QgsMeshLayer, QgsMeshDatasetIndex, QgsMeshUtils, QgsProject, QgsRasterLayer, QgsRasterFileWriter, QgsRasterPipe, QgsCoordinateReferenceSystem, QgsColorRampShader, QgsRasterShader, QgsSingleBandPseudoColorRenderer, QgsRasterHistogram, QgsErrorMessage\n'), ((2202, 2223), 'qgis.core.QgsProject.instance', 'QgsProject.instance', ([], {}), '()\n', (2221, 2223), False, 'from qgis.core import Qgis, QgsApplication, QgsMeshLayer, QgsMeshDatasetIndex, QgsMeshUtils, QgsProject, QgsRasterLayer, QgsRasterFileWriter, QgsRasterPipe, QgsCoordinateReferenceSystem, QgsColorRampShader, QgsRasterShader, QgsSingleBandPseudoColorRenderer, QgsRasterHistogram, QgsErrorMessage\n'), ((2306, 2336), 'os.path.splitext', 'os.path.splitext', (['output_layer'], {}), '(output_layer)\n', (2322, 2336), False, 'import os, sys, json, warnings\n'), ((5342, 5371), 'PyQt5.QtGui.QColor', 'QColor', (["colDic['bottomcolor']"], {}), "(colDic['bottomcolor'])\n", (5348, 5371), False, 'from PyQt5.QtGui import QColor\n'), ((5437, 5467), 'PyQt5.QtGui.QColor', 'QColor', (["colDic['bottommiddle']"], {}), "(colDic['bottommiddle'])\n", (5443, 5467), False, 'from PyQt5.QtGui import QColor\n'), ((5534, 5561), 'PyQt5.QtGui.QColor', 'QColor', (["colDic['topmiddle']"], {}), "(colDic['topmiddle'])\n", (5540, 5561), False, 'from PyQt5.QtGui import QColor\n'), ((5628, 5654), 'PyQt5.QtGui.QColor', 'QColor', (["colDic['topcolor']"], {}), "(colDic['topcolor'])\n", (5634, 5654), False, 'from PyQt5.QtGui import QColor\n'), ((6080, 6105), 'os.path.splitext', 'os.path.splitext', (['outfile'], {}), '(outfile)\n', (6096, 6105), False, 'import os, sys, json, warnings\n'), ((6615, 6636), 'qgis.core.QgsProject.instance', 'QgsProject.instance', ([], {}), '()\n', (6634, 6636), False, 'from qgis.core import Qgis, QgsApplication, QgsMeshLayer, QgsMeshDatasetIndex, QgsMeshUtils, QgsProject, QgsRasterLayer, QgsRasterFileWriter, QgsRasterPipe, QgsCoordinateReferenceSystem, QgsColorRampShader, QgsRasterShader, QgsSingleBandPseudoColorRenderer, QgsRasterHistogram, QgsErrorMessage\n')] |
"""
"""
from keras.models import Model
from keras.layers import Input, Dropout, Dense, Embedding, concatenate
from keras.layers import GRU, LSTM, Flatten
from keras.preprocessing.sequence import pad_sequences
#from keras.preprocessing import text, sequence
from keras.preprocessing.text import Tokenizer
from keras import backend as K
from sklearn.preprocessing import LabelEncoder
from keras import backend as K
from sklearn.model_selection import train_test_split
import warnings
import os
import numpy as np
import pandas as pd
import gc
# from . import embeddings
from aisimplekit.dnn import embeddings
warnings.filterwarnings('ignore')
os.environ['OMP_NUM_THREADS'] = '4'
def root_mean_squared_error(y_true, y_pred):
""" Compute rmse loss. """
return K.sqrt(K.mean(K.square(y_true-y_pred)))
class RnnModelType(object):
""" Supported RNN model types (gru or lstm). """
GRU = "gru"
LSTM = "lstm"
__SUPPORTED__ = [GRU, LSTM]
class RnnTextModel(object):
""" Class for RNN Text Model. """
def __init__(self, tokenizer_num_words, cat_cols=[],
text_seq_cols=[],
num_cols=[], num_transform_spec={},
max_seq_length=100,
embedding_file='../input/fasttest-common-crawl-russian/cc.ru.300.vec',
embedding_dim1=300, emb_out_size=10,
_prepare_df_handler=None,
batch_size=512*3, model_type=RnnModelType.GRU, n_units=50,
dropout_0=0.1, dropout_1=0.1, ndense_0=512, ndense_1=64,
final_layer_handler=None,
loss_fn=None, metrics_fns=None, learning_rates=(0.009, 0.0045),
optimizer="adam",
text_spec={}):
"""
:param text_spec: Specification of text columns, num_word per col, embeddings..
:type text_spec: dict
"""
assert(batch_size > 0)
assert(emb_out_size > 0)
assert(model_type in RnnModelType.__SUPPORTED__)
assert(n_units > 0)
# Loss and metrics functions
if loss_fn is None and (metrics_fns is None or len(metrics_fns)==0):
print('No loss, nor metrics specified: using rmse by default!')
loss_fn = root_mean_squared_error
metrics_fns = [root_mean_squared_error]
assert(loss_fn is not None)
assert(metrics_fns is not None and len(metrics_fns) > 0)
self.loss_fn = loss_fn
self.metrics_fns = metrics_fns
self.learning_rates = learning_rates
self.optimizer = optimizer
# Inputs and Preprocessing
self.max_seq_length = max_seq_length
self.cat_cols = cat_cols
self.text_seq_cols = text_seq_cols
self.num_cols = num_cols
self.num_transform_spec = num_transform_spec
# num_words: the maximum number of words to keep, based
# on word frequency. Only the most common `num_words-1` words will be kept.
tokenizers = {}
for col in self.text_seq_cols:
tokenizers[col] = Tokenizer(num_words=tokenizer_num_words)
self.tokenizers = tokenizers
# self.tokenizer = Tokenizer(num_words=num_words)
vocab_size = {}
for col in self.text_seq_cols:
vocab_size[col] = -1
self.vocab_size = vocab_size
self._prepare_df_handler = _prepare_df_handler
# Embeddings for categorical
self.embedding_dim1 = embedding_dim1 # from the pretrained vectors
self.embedding_file = embedding_file
self.emb_out_size = emb_out_size
# Model: GRU or LSTM
self.batch_size = batch_size
self.model_type = model_type
self.n_units = n_units
self.model = None
# Final layer
self.dropout_0 = dropout_0
self.dropout_1 = dropout_1
self.ndense_0 = ndense_0
self.ndense_1 = ndense_1
self.final_layer_handler = final_layer_handler # possibility to override final layer composition.
def _prepare_df(self, df):
""" """
if self._prepare_df_handler:
return self._prepare_df_handler(df)
return df
def _fit_text(self, df, traindex):
""" """
for col in self.text_seq_cols:
all_text = np.hstack([df.loc[traindex,:][col].str.lower()])
self.tokenizers[col].fit_on_texts(all_text)
self.vocab_size[col] = len(self.tokenizers[col].word_index)+2
del(all_text)
gc.collect()
def _encode_categorical(self, df):
""" """
for col in self.cat_cols:
le = LabelEncoder()
le.fit(df[col])
df[col] = le.transform(df[col])
return df
def _build_text_sequences(self, df):
""" """
for col in self.text_seq_cols:
df['seq_{}'.format(col)] = (
self.tokenizers[col]
.texts_to_sequences(
df[col].str.lower()
)
)
del(df[col])
gc.collect()
return df
def _preprocess_numerical(self, df):
""" """
# df['price'] = np.log1p(df['price']) # already transformed to log
# if False:
# print('WITH USER AGG !')
# df['avg_days_up_user'] = np.log1p(df['avg_days_up_user'])
# df['avg_times_up_user'] = np.log1p(df['avg_times_up_user'])
# df['n_user_items'] = np.log1p(df['n_user_items'])
for col in self.num_cols:
if col in self.num_transform_spec.keys():
transf_fn = self.num_transform_spec[col]
df[col] = transf_fn(df[col])
return df
def prepare_df(self, df, traindex):
""" """
df = self._prepare_df(df)
self._fit_text(df, traindex)
df = self._encode_categorical(df)
df = self._build_text_sequences(df)
df = self._preprocess_numerical(df)
return df
def get_keras_data(self, dataset, max_seq_length):
""" """
data = {}
for col in self.text_seq_cols:
data['seq_{}'.format(col)] = pad_sequences(dataset['seq_{}'.format(col)],
maxlen=max_seq_length)
# FIXME: max_seq_title.. is common to all text_seq_cols => should not be !
# checking that text_seq_cols dont contain any categorical simple cols => exclusive.
assert(all([col not in self.cat_cols for col in self.text_seq_cols]))
# categorical + numerical
cols = self.cat_cols + self.num_cols
for col in cols:
data[col] = np.array(dataset[[col]])
return data
def build_rnn_model(self, embedding_matrixes):
""" """
# Inputs
# 1) sequential columns
arr_inputs_seq = []
for col in self.text_seq_cols:
in_seq = Input(shape=[self.max_seq_length], # FIXME: shouldnt be common to all text_seq_cols
name="seq_{}".format(col))
arr_inputs_seq.append(in_seq)
# 2) categorical+numerical columns
arr_inputs_cat = []
for col in self.cat_cols:
in_cat = Input(shape=[1], name=col)
arr_inputs_cat.append(in_cat)
arr_inputs_num = []
for col in self.num_cols:
in_num = Input(shape=[1], name=col)
arr_inputs_num.append(in_num)
# Embeddings layers
col = self.text_seq_cols[0] # FIXME: why do we arbirarily choose the first column ?????
if self.vocab_size[col] < 0:
self.vocab_size[col] = len(self.tokenizers[col].word_index)+2
global_vocab_size = self.vocab_size[col] ## FIXME: vocab size is for the 1st seq column only ???
## FIXME: Why do we use it in all the embeddings ????
embs_seqs = []
for idx, col in enumerate(self.text_seq_cols):
if self.vocab_size[col] < 0:
self.vocab_size[col] = len(self.tokenizers[col].word_index)+2
vocab_size = self.vocab_size[col] ## FIXME: vocab size is for the 1st seq column only ???
emb_col = Embedding(
vocab_size, self.embedding_dim1, weights=[embedding_matrixes[idx]],
trainable=False
)(arr_inputs_seq[idx])
embs_seqs.append(emb_col)
# emb_seq_title_description = Embedding(
# vocab_size, self.embedding_dim1, weights=[embedding_matrix1],
# trainable=False
# )(seq_title_description)
# For each categorical col, transform to vector of scalars using Embedding.
emb_out_size = self.emb_out_size # embedding output size default
embs_cat = []
for idx, col in enumerate(self.cat_cols):
emb_col = Embedding(global_vocab_size, emb_out_size)(arr_inputs_cat[idx])
embs_cat.append(emb_col)
# GRU Model (or LSTM)
rnn_layers = []
if self.model_type is RnnModelType.GRU:
rnn_layers = [
GRU(self.n_units)(emb)
for emb in embs_seqs
]
elif self.model_type is RnnModelType.LSTM:
rnn_layers = [
LSTM(self.n_units)(emb)
for emb in embs_seqs
]
else:
raise Exception('[error] Unsupported Model Type:{}'.format(self.model_type))
#main layer
layers = [
*rnn_layers,
*[Flatten()(emb) for emb in embs_cat],
*arr_inputs_num,
]
main_l = concatenate(layers)
if self.final_layer_handler is not None:
# Possibility to override defaut double dense layers with dropout
main_l = self.final_layer_handler(main_l)
else:
main_l = Dropout(self.dropout_0)(Dense(self.ndense_0, activation='relu') (main_l))
main_l = Dropout(self.dropout_1)(Dense(self.ndense_1, activation='relu') (main_l))
#output
output = Dense(1, activation="sigmoid") (main_l)
#model
inputs = arr_inputs_seq + arr_inputs_cat + arr_inputs_num # order matters
model = Model(inputs, output)
model.compile(optimizer=self.optimizer, loss=self.loss_fn, metrics=self.metrics_fns)
self.model = model
def rmse(self, y, y_pred):
""" """
rsum = np.sum((y-y_pred)**2)
n = y.shape[0]
rmse = np.sqrt(rsum/n)
return rmse
def eval_model(self, X_test1, y_test1):
""" """
val_preds = self.model.predict(X_test1)
y_pred = val_preds[:, 0]
y_true = np.array(y_test1)
yt = pd.DataFrame(y_true)
yp = pd.DataFrame(y_pred)
print(yt.isnull().any())
print(yp.isnull().any())
v_rmse = self.rmse(y_true, y_pred)
print("rmse for validation set: "+str(v_rmse))
return v_rmse
def init_predictor(self, df, traindex):
""" """
df = self.prepare_df(df, traindex)
embedding_matrixes = []
for col in self.text_seq_cols:
embedding_matrix1 = embeddings.load_embedding_matrix(
self.embedding_file,
self.vocab_size[col],
self.embedding_dim1, # FIXME: same for all text_seq_cols ????
self.tokenizers[col]
)
embedding_matrixes.append(embedding_matrix1)
self.build_rnn_model(embedding_matrixes)
return df
def fit(self, train, y, n_iter=3, cv=False, test_size=0.10, random_state=23):
""" """
if cv is True:
raise Exception('Not Yet Implemented !')
X_train, X_valid, y_train, y_valid = train_test_split(
train, y,
test_size=test_size,
random_state=random_state
)
# Fit the NN Model
X_train = self.get_keras_data(X_train, self.max_seq_length)
X_valid = self.get_keras_data(X_valid, self.max_seq_length)
exp_decay = lambda init, fin, steps: (init/fin)**(1/(steps-1)) - 1
# Initializing a new model for current fold
epochs = 1
steps = (int(train.shape[0]/self.batch_size))*epochs
(lr_init, lr_fin) = self.learning_rates
lr_decay = exp_decay(lr_init, lr_fin, steps)
K.set_value(self.model.optimizer.lr, lr_init)
K.set_value(self.model.optimizer.decay, lr_decay)
for i in range(n_iter):
hist = self.model.fit(X_train, y_train,
batch_size=self.batch_size+(self.batch_size*(2*i)),
epochs=epochs, validation_data=(X_valid, y_valid),
verbose=1)
v_rmse = self.eval_model(X_valid, y_valid)
del(X_train)
del(X_valid)
del(y_train)
del(y_valid)
gc.collect()
return v_rmse
def predict(self, df_test, verbose=1):
""" """
X_test = self.get_keras_data(
df_test,
max_seq_length=self.max_seq_length
)
preds1 = self.model.predict(X_test, batch_size=self.batch_size, verbose=verbose)
del(X_test)
gc.collect()
print("RNN Prediction is done.")
preds = preds1.reshape(-1,1)
preds = np.clip(preds, 0, 1)
print(preds.shape)
return preds | [
"numpy.clip",
"sklearn.preprocessing.LabelEncoder",
"numpy.sqrt",
"numpy.array",
"keras.layers.Dense",
"keras.backend.square",
"keras.layers.LSTM",
"keras.layers.concatenate",
"keras.models.Model",
"pandas.DataFrame",
"keras.layers.Flatten",
"sklearn.model_selection.train_test_split",
"aisim... | [((609, 642), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (632, 642), False, 'import warnings\n'), ((9648, 9667), 'keras.layers.concatenate', 'concatenate', (['layers'], {}), '(layers)\n', (9659, 9667), False, 'from keras.layers import Input, Dropout, Dense, Embedding, concatenate\n'), ((10243, 10264), 'keras.models.Model', 'Model', (['inputs', 'output'], {}), '(inputs, output)\n', (10248, 10264), False, 'from keras.models import Model\n'), ((10448, 10473), 'numpy.sum', 'np.sum', (['((y - y_pred) ** 2)'], {}), '((y - y_pred) ** 2)\n', (10454, 10473), True, 'import numpy as np\n'), ((10508, 10525), 'numpy.sqrt', 'np.sqrt', (['(rsum / n)'], {}), '(rsum / n)\n', (10515, 10525), True, 'import numpy as np\n'), ((10703, 10720), 'numpy.array', 'np.array', (['y_test1'], {}), '(y_test1)\n', (10711, 10720), True, 'import numpy as np\n'), ((10734, 10754), 'pandas.DataFrame', 'pd.DataFrame', (['y_true'], {}), '(y_true)\n', (10746, 10754), True, 'import pandas as pd\n'), ((10768, 10788), 'pandas.DataFrame', 'pd.DataFrame', (['y_pred'], {}), '(y_pred)\n', (10780, 10788), True, 'import pandas as pd\n'), ((11767, 11841), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train', 'y'], {'test_size': 'test_size', 'random_state': 'random_state'}), '(train, y, test_size=test_size, random_state=random_state)\n', (11783, 11841), False, 'from sklearn.model_selection import train_test_split\n'), ((12370, 12415), 'keras.backend.set_value', 'K.set_value', (['self.model.optimizer.lr', 'lr_init'], {}), '(self.model.optimizer.lr, lr_init)\n', (12381, 12415), True, 'from keras import backend as K\n'), ((12424, 12473), 'keras.backend.set_value', 'K.set_value', (['self.model.optimizer.decay', 'lr_decay'], {}), '(self.model.optimizer.decay, lr_decay)\n', (12435, 12473), True, 'from keras import backend as K\n'), ((12877, 12889), 'gc.collect', 'gc.collect', ([], {}), '()\n', (12887, 12889), False, 'import gc\n'), ((13206, 13218), 'gc.collect', 'gc.collect', ([], {}), '()\n', (13216, 13218), False, 'import gc\n'), ((13314, 13334), 'numpy.clip', 'np.clip', (['preds', '(0)', '(1)'], {}), '(preds, 0, 1)\n', (13321, 13334), True, 'import numpy as np\n'), ((782, 807), 'keras.backend.square', 'K.square', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (790, 807), True, 'from keras import backend as K\n'), ((3084, 3124), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'tokenizer_num_words'}), '(num_words=tokenizer_num_words)\n', (3093, 3124), False, 'from keras.preprocessing.text import Tokenizer\n'), ((4519, 4531), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4529, 4531), False, 'import gc\n'), ((4639, 4653), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4651, 4653), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((5077, 5089), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5087, 5089), False, 'import gc\n'), ((6687, 6711), 'numpy.array', 'np.array', (['dataset[[col]]'], {}), '(dataset[[col]])\n', (6695, 6711), True, 'import numpy as np\n'), ((7254, 7280), 'keras.layers.Input', 'Input', ([], {'shape': '[1]', 'name': 'col'}), '(shape=[1], name=col)\n', (7259, 7280), False, 'from keras.layers import Input, Dropout, Dense, Embedding, concatenate\n'), ((7407, 7433), 'keras.layers.Input', 'Input', ([], {'shape': '[1]', 'name': 'col'}), '(shape=[1], name=col)\n', (7412, 7433), False, 'from keras.layers import Input, Dropout, Dense, Embedding, concatenate\n'), ((10088, 10118), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (10093, 10118), False, 'from keras.layers import Input, Dropout, Dense, Embedding, concatenate\n'), ((11183, 11305), 'aisimplekit.dnn.embeddings.load_embedding_matrix', 'embeddings.load_embedding_matrix', (['self.embedding_file', 'self.vocab_size[col]', 'self.embedding_dim1', 'self.tokenizers[col]'], {}), '(self.embedding_file, self.vocab_size[col],\n self.embedding_dim1, self.tokenizers[col])\n', (11215, 11305), False, 'from aisimplekit.dnn import embeddings\n'), ((8239, 8338), 'keras.layers.Embedding', 'Embedding', (['vocab_size', 'self.embedding_dim1'], {'weights': '[embedding_matrixes[idx]]', 'trainable': '(False)'}), '(vocab_size, self.embedding_dim1, weights=[embedding_matrixes[idx]\n ], trainable=False)\n', (8248, 8338), False, 'from keras.layers import Input, Dropout, Dense, Embedding, concatenate\n'), ((8883, 8925), 'keras.layers.Embedding', 'Embedding', (['global_vocab_size', 'emb_out_size'], {}), '(global_vocab_size, emb_out_size)\n', (8892, 8925), False, 'from keras.layers import Input, Dropout, Dense, Embedding, concatenate\n'), ((9885, 9908), 'keras.layers.Dropout', 'Dropout', (['self.dropout_0'], {}), '(self.dropout_0)\n', (9892, 9908), False, 'from keras.layers import Input, Dropout, Dense, Embedding, concatenate\n'), ((9980, 10003), 'keras.layers.Dropout', 'Dropout', (['self.dropout_1'], {}), '(self.dropout_1)\n', (9987, 10003), False, 'from keras.layers import Input, Dropout, Dense, Embedding, concatenate\n'), ((9130, 9147), 'keras.layers.GRU', 'GRU', (['self.n_units'], {}), '(self.n_units)\n', (9133, 9147), False, 'from keras.layers import GRU, LSTM, Flatten\n'), ((9909, 9948), 'keras.layers.Dense', 'Dense', (['self.ndense_0'], {'activation': '"""relu"""'}), "(self.ndense_0, activation='relu')\n", (9914, 9948), False, 'from keras.layers import Input, Dropout, Dense, Embedding, concatenate\n'), ((10004, 10043), 'keras.layers.Dense', 'Dense', (['self.ndense_1'], {'activation': '"""relu"""'}), "(self.ndense_1, activation='relu')\n", (10009, 10043), False, 'from keras.layers import Input, Dropout, Dense, Embedding, concatenate\n'), ((9298, 9316), 'keras.layers.LSTM', 'LSTM', (['self.n_units'], {}), '(self.n_units)\n', (9302, 9316), False, 'from keras.layers import GRU, LSTM, Flatten\n'), ((9555, 9564), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (9562, 9564), False, 'from keras.layers import GRU, LSTM, Flatten\n')] |
# import native Python packages
import random
# import third party packages
from fastapi import APIRouter, Request
from fastapi.responses import HTMLResponse
from fastapi.templating import Jinja2Templates
import pandas
import numpy
import scipy
# import api stuff
from src.api.autobracket import single_sim_bracket
# router and templates
autobracket_views = APIRouter(prefix="/autobracket")
templates = Jinja2Templates(directory="templates")
@autobracket_views.get("/", response_class=HTMLResponse, tags=["react_view"])
async def app2021(request: Request):
return templates.TemplateResponse(
'autobracket/app-2021.html',
context={'request': request}
)
@autobracket_views.get("/plotly", response_class=HTMLResponse, tags=["react_view"])
async def app2021plotly(request: Request):
return templates.TemplateResponse(
'autobracket/app-2021-plotly.html',
context={'request': request}
)
@autobracket_views.get("/generate", response_class=HTMLResponse, tags=["simple_view"])
async def generate(request: Request):
return templates.TemplateResponse(
'autobracket/generate.html',
context={'request': request}
)
@autobracket_views.post("/old-bracket", response_class=HTMLResponse, tags=["form_post_view"])
async def bracket(request: Request):
form = await request.form()
bracket_json = await single_sim_bracket("2021", form['spice_level'])
return templates.TemplateResponse(
'autobracket/bracket.html',
context={
'request': request,
'bracket': bracket_json,
}
)
@autobracket_views.post("/old-bracket", response_class=HTMLResponse, tags=["form_post_view"])
async def old_bracket(request: Request):
# If you go straight to the bracket page, you'll get a 400 error!
form = await request.form()
# If you try to input non-string objects, you'll get an error!
# if isinstance(request.form['model_choice'], type('anystring')):
model_choice = form['model_choice']
# else:
# return 'You didn\'t fill out the form correctly!', 400
# if isinstance(request.form['chaos_choice'], type('anystring')):
chaos_choice = int(form['chaos_choice'])
# else:
# return 'You didn\'t fill out the form correctly!', 400
# if isinstance(request.form['model_current'], type('anystring')):
model_current = form['model_current']
# else:
# return 'You didn\'t fill out the form correctly!', 400
# If your chaos_choice is less than 0 or greater than 10, you'll get an error!
# Otherwise you'll get model results.
if int(chaos_choice) < 0 or int(chaos_choice) > 10:
return 'You didn\'t fill out the form correctly!', 400
else:
simulated_df, actual_df = run_tournament(
model_choice,
chaos_choice,
model_current,
)
# the only thing this is missing is passing through the results...
return templates.TemplateResponse(
'autobracket/bracket.html',
context={
'request': request,
'simulated_df': simulated_df,
'actual_df': actual_df,
}
)
def run_tournament(model_choice, chaos_choice, model_current):
# pull a clean matchup table for the next model run
bracket_19 = pandas.read_csv(
'backup/autobracket/matchup_table_2019.csv',
index_col='game_id',
)
# also pull the actual results, for comparison purposes
actual_19 = pandas.read_csv(
'backup/autobracket/autobracket_actual_19.csv',
index_col='game_id',
)
# full Kenpom table reads for the classic and modern methods
kenpom_19 = pandas.read_csv(
'backup/autobracket/team_index_2019.csv',
index_col='seed',
)
kenpom_19_full = pandas.read_csv(
'backup/autobracket/team_index_2019_full.csv',
index_col='team_id',
)
# core of the model. this is the field that will be distributed
kenpom_19['teamsim'] = kenpom_19['KenTeamAdjO'] / kenpom_19['KenTeamAdjD'] + (kenpom_19['KenTeamOppAdjEM'] / 100)
kenpom_19_full['teamsim'] = (
kenpom_19_full['KenTeamAdjO'] /
kenpom_19_full['KenTeamAdjD'] +
(kenpom_19_full['KenTeamOppAdjEM'] / 100)
)
# classic version
if (model_choice == 'Classic'):
zmean = kenpom_19['teamsim'].mean()
zstd = kenpom_19['teamsim'].std(ddof=0)
# modern version
else:
zmean = kenpom_19_full['teamsim'].mean()
zstd = kenpom_19_full['teamsim'].std(ddof=0)
# bring in team rating and simulate the game
x = 1
# update x if user only wants to run the games that haven't happened yet
if (model_current == 'partial'):
bracket_19 = pandas.read_csv(
'backup/autobracket/autobracket_actual_19.csv',
index_col='game_id'
)
x = x + 4 + 32 + 16 # + 8 + 4 + 2 + 1
while x < 68:
# lookup values for seed1, seed2
z1 = bracket_19.loc[x, 'seed1']
z2 = bracket_19.loc[x, 'seed2']
# identifying the advancing location in bracket
z3 = bracket_19.loc[x, 'advance_to']
# populate games (z-sim uniform method)
bracket_19.loc[x, 'team1sim'] = random.uniform(
scipy.stats.norm.cdf((kenpom_19.loc[str(z1), 'teamsim'] - zmean) / zstd) * (10 - chaos_choice), 10
)
bracket_19.loc[x, 'team2sim'] = random.uniform(
scipy.stats.norm.cdf((kenpom_19.loc[str(z2), 'teamsim'] - zmean) / zstd) * (10 - chaos_choice), 10
)
# who won?
w_team = numpy.where(
bracket_19.loc[x, 'team1sim'] > bracket_19.loc[x, 'team2sim'],
bracket_19.loc[x, 'team1'],
bracket_19.loc[x, 'team2']
)
w_seed = numpy.where(
bracket_19.loc[x, 'team1sim'] > bracket_19.loc[x, 'team2sim'],
bracket_19.loc[x, 'seed1'],
bracket_19.loc[x, 'seed2']
)
# if the team1 slot is full, need to fill the team2 slot instead
f1 = numpy.where(
pandas.isna(bracket_19.loc[z3, 'team1']),
'team1',
'team2'
)
f2 = numpy.where(
pandas.isna(bracket_19.loc[z3, 'team1']),
'seed1',
'seed2'
)
# advance the correct team/seed to the correct slot
bracket_19.loc[z3, str(f1)] = str(w_team)
bracket_19.loc[z3, str(f2)] = str(w_seed)
x = x + 1
# output modeled bracket to database.
# step removed
# if(str(bracket_19.loc[x-1, 'team1']) == str(w_team)):
# winner = 'team1'
# else:
# winner = 'team2'
# champion_19 = pandas.DataFrame(
# {
# 'champion': [bracket_19.loc[x-1, winner]],
# 'timestamp': datetime.datetime.now(),
# 'model_choice': model_choice,
# 'modelChaos': chaos_choice,
# 'modelSize': model_current,
# }
# )
# output champion to champion table in database
# step removed
return(bracket_19, actual_19)
| [
"pandas.read_csv",
"numpy.where",
"fastapi.templating.Jinja2Templates",
"fastapi.APIRouter",
"src.api.autobracket.single_sim_bracket",
"pandas.isna"
] | [((362, 394), 'fastapi.APIRouter', 'APIRouter', ([], {'prefix': '"""/autobracket"""'}), "(prefix='/autobracket')\n", (371, 394), False, 'from fastapi import APIRouter, Request\n'), ((407, 445), 'fastapi.templating.Jinja2Templates', 'Jinja2Templates', ([], {'directory': '"""templates"""'}), "(directory='templates')\n", (422, 445), False, 'from fastapi.templating import Jinja2Templates\n'), ((3298, 3384), 'pandas.read_csv', 'pandas.read_csv', (['"""backup/autobracket/matchup_table_2019.csv"""'], {'index_col': '"""game_id"""'}), "('backup/autobracket/matchup_table_2019.csv', index_col=\n 'game_id')\n", (3313, 3384), False, 'import pandas\n'), ((3479, 3568), 'pandas.read_csv', 'pandas.read_csv', (['"""backup/autobracket/autobracket_actual_19.csv"""'], {'index_col': '"""game_id"""'}), "('backup/autobracket/autobracket_actual_19.csv', index_col=\n 'game_id')\n", (3494, 3568), False, 'import pandas\n'), ((3669, 3744), 'pandas.read_csv', 'pandas.read_csv', (['"""backup/autobracket/team_index_2019.csv"""'], {'index_col': '"""seed"""'}), "('backup/autobracket/team_index_2019.csv', index_col='seed')\n", (3684, 3744), False, 'import pandas\n'), ((3789, 3877), 'pandas.read_csv', 'pandas.read_csv', (['"""backup/autobracket/team_index_2019_full.csv"""'], {'index_col': '"""team_id"""'}), "('backup/autobracket/team_index_2019_full.csv', index_col=\n 'team_id')\n", (3804, 3877), False, 'import pandas\n'), ((1373, 1420), 'src.api.autobracket.single_sim_bracket', 'single_sim_bracket', (['"""2021"""', "form['spice_level']"], {}), "('2021', form['spice_level'])\n", (1391, 1420), False, 'from src.api.autobracket import single_sim_bracket\n'), ((4733, 4822), 'pandas.read_csv', 'pandas.read_csv', (['"""backup/autobracket/autobracket_actual_19.csv"""'], {'index_col': '"""game_id"""'}), "('backup/autobracket/autobracket_actual_19.csv', index_col=\n 'game_id')\n", (4748, 4822), False, 'import pandas\n'), ((5581, 5715), 'numpy.where', 'numpy.where', (["(bracket_19.loc[x, 'team1sim'] > bracket_19.loc[x, 'team2sim'])", "bracket_19.loc[x, 'team1']", "bracket_19.loc[x, 'team2']"], {}), "(bracket_19.loc[x, 'team1sim'] > bracket_19.loc[x, 'team2sim'],\n bracket_19.loc[x, 'team1'], bracket_19.loc[x, 'team2'])\n", (5592, 5715), False, 'import numpy\n'), ((5775, 5909), 'numpy.where', 'numpy.where', (["(bracket_19.loc[x, 'team1sim'] > bracket_19.loc[x, 'team2sim'])", "bracket_19.loc[x, 'seed1']", "bracket_19.loc[x, 'seed2']"], {}), "(bracket_19.loc[x, 'team1sim'] > bracket_19.loc[x, 'team2sim'],\n bracket_19.loc[x, 'seed1'], bracket_19.loc[x, 'seed2'])\n", (5786, 5909), False, 'import numpy\n'), ((6064, 6104), 'pandas.isna', 'pandas.isna', (["bracket_19.loc[z3, 'team1']"], {}), "(bracket_19.loc[z3, 'team1'])\n", (6075, 6104), False, 'import pandas\n'), ((6195, 6235), 'pandas.isna', 'pandas.isna', (["bracket_19.loc[z3, 'team1']"], {}), "(bracket_19.loc[z3, 'team1'])\n", (6206, 6235), False, 'import pandas\n')] |
#!/usr/bin/env python
import requests
import json
import time
import bs4 as bs
import datetime as dt
import os
import pandas_datareader.data as web
import pickle
import requests
import yaml
import yfinance as yf
import pandas as pd
import dateutil.relativedelta
import numpy as np
from datetime import date
from datetime import datetime
DIR = os.path.dirname(os.path.realpath(__file__))
if not os.path.exists(os.path.join(DIR, 'data')):
os.makedirs(os.path.join(DIR, 'data'))
if not os.path.exists(os.path.join(DIR, 'tmp')):
os.makedirs(os.path.join(DIR, 'tmp'))
try:
with open(os.path.join(DIR, 'config_private.yaml'), 'r') as stream:
private_config = yaml.safe_load(stream)
except FileNotFoundError:
private_config = None
except yaml.YAMLError as exc:
print(exc)
try:
with open('config.yaml', 'r') as stream:
config = yaml.safe_load(stream)
except FileNotFoundError:
config = None
except yaml.YAMLError as exc:
print(exc)
def cfg(key):
try:
return private_config[key]
except:
try:
return config[key]
except:
return None
def getSecurities(url, tickerPos = 1, tablePos = 1, sectorPosOffset = 1, universe = "N/A"):
resp = requests.get(url)
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.findAll('table', {'class': 'wikitable sortable'})[tablePos-1]
secs = {}
for row in table.findAll('tr')[tablePos:]:
sec = {}
sec["ticker"] = row.findAll('td')[tickerPos-1].text.strip()
sec["sector"] = row.findAll('td')[tickerPos-1+sectorPosOffset].text.strip()
sec["universe"] = universe
secs[sec["ticker"]] = sec
with open(os.path.join(DIR, "tmp", "tickers.pickle"), "wb") as f:
pickle.dump(secs, f)
return secs
def get_resolved_securities():
tickers = {}
if cfg("NQ100"):
tickers.update(getSecurities('https://en.wikipedia.org/wiki/Nasdaq-100', 2, 3, universe="Nasdaq 100"))
if cfg("SP500"):
tickers.update(getSecurities('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies', sectorPosOffset=3, universe="S&P 500"))
if cfg("SP400"):
tickers.update(getSecurities('https://en.wikipedia.org/wiki/List_of_S%26P_400_companies', 2, universe="S&P 400"))
if cfg("SP600"):
tickers.update(getSecurities('https://en.wikipedia.org/wiki/List_of_S%26P_600_companies', 2, universe="S&P 600"))
return tickers
API_KEY = cfg("API_KEY")
TD_API = cfg("TICKERS_API")
PRICE_DATA_OUTPUT = os.path.join(DIR, "data", "price_history.json")
SECURITIES = get_resolved_securities().values()
DATA_SOURCE = cfg("DATA_SOURCE")
def create_price_history_file(tickers_dict):
with open(PRICE_DATA_OUTPUT, "w") as fp:
json.dump(tickers_dict, fp)
def enrich_ticker_data(ticker_response, security):
ticker_response["sector"] = security["sector"]
ticker_response["universe"] = security["universe"]
def tda_params(apikey, period_type="year", period=1, frequency_type="daily", frequency=1):
"""Returns tuple of api get params. Uses clenow default values."""
return (
("apikey", apikey),
("periodType", period_type),
("period", period),
("frequencyType", frequency_type),
("frequency", frequency)
)
def print_data_progress(ticker, universe, idx, securities, error_text, elapsed_s, remaining_s):
dt_ref = datetime.fromtimestamp(0)
dt_e = datetime.fromtimestamp(elapsed_s)
elapsed = dateutil.relativedelta.relativedelta (dt_e, dt_ref)
if remaining_s and not np.isnan(remaining_s):
dt_r = datetime.fromtimestamp(remaining_s)
remaining = dateutil.relativedelta.relativedelta (dt_r, dt_ref)
remaining_string = f'{remaining.minutes}m {remaining.seconds}s'
else:
remaining_string = "?"
print(f'{ticker} from {universe}{error_text} ({idx+1} / {len(securities)}). Elapsed: {elapsed.minutes}m {elapsed.seconds}s. Remaining: {remaining_string}.')
def get_remaining_seconds(all_load_times, idx, len):
load_time_ma = pd.Series(all_load_times).rolling(np.minimum(idx+1, 25)).mean().tail(1).item()
remaining_seconds = (len - idx) * load_time_ma
return remaining_seconds
def load_prices_from_tda(securities):
print("*** Loading Stocks from TD Ameritrade ***")
headers = {"Cache-Control" : "no-cache"}
params = tda_params(API_KEY)
tickers_dict = {}
start = time.time()
load_times = []
for idx, sec in enumerate(securities):
r_start = time.time()
response = requests.get(
TD_API % sec["ticker"],
params=params,
headers=headers
)
now = time.time()
current_load_time = now - r_start
load_times.append(current_load_time)
remaining_seconds = get_remaining_seconds(load_times, idx, len(securities))
ticker_data = response.json()
enrich_ticker_data(ticker_data, sec)
tickers_dict[sec["ticker"]] = ticker_data
error_text = f' Error with code {response.status_code}' if response.status_code != 200 else ''
print_data_progress(sec["ticker"], sec["universe"], idx, securities, error_text, now - start, remaining_seconds)
create_price_history_file(tickers_dict)
def get_yf_data(security, start_date, end_date):
escaped_ticker = security["ticker"].replace(".","-")
df = yf.download(escaped_ticker, start=start_date, end=end_date)
yahoo_response = df.to_dict()
timestamps = list(yahoo_response["Open"].keys())
timestamps = list(map(lambda timestamp: int(timestamp.timestamp()), timestamps))
opens = list(yahoo_response["Open"].values())
closes = list(yahoo_response["Close"].values())
lows = list(yahoo_response["Low"].values())
highs = list(yahoo_response["High"].values())
volumes = list(yahoo_response["Volume"].values())
ticker_data = {}
candles = []
for i in range(0, len(opens)):
candle = {}
candle["open"] = opens[i]
candle["close"] = closes[i]
candle["low"] = lows[i]
candle["high"] = highs[i]
candle["volume"] = volumes[i]
candle["datetime"] = timestamps[i]
candles.append(candle)
ticker_data["candles"] = candles
enrich_ticker_data(ticker_data, security)
return ticker_data
def load_prices_from_yahoo(securities):
print("*** Loading Stocks from Yahoo Finance ***")
today = date.today()
start = time.time()
start_date = today - dt.timedelta(days=1*365)
tickers_dict = {}
load_times = []
for idx, security in enumerate(securities):
r_start = time.time()
ticker_data = get_yf_data(security, start_date, today)
now = time.time()
current_load_time = now - r_start
load_times.append(current_load_time)
remaining_seconds = remaining_seconds = get_remaining_seconds(load_times, idx, len(securities))
print_data_progress(security["ticker"], security["universe"], idx, securities, "", time.time() - start, remaining_seconds)
tickers_dict[security["ticker"]] = ticker_data
create_price_history_file(tickers_dict)
def save_data(source, securities):
if source == "YAHOO":
load_prices_from_yahoo(securities)
elif source == "TD_AMERITRADE":
load_prices_from_tda(securities)
def main():
save_data(DATA_SOURCE, SECURITIES)
if __name__ == "__main__":
main()
| [
"pandas.Series",
"datetime.datetime.fromtimestamp",
"pickle.dump",
"numpy.minimum",
"os.path.join",
"requests.get",
"datetime.timedelta",
"os.path.realpath",
"bs4.BeautifulSoup",
"yfinance.download",
"yaml.safe_load",
"numpy.isnan",
"datetime.date.today",
"time.time",
"json.dump"
] | [((2520, 2567), 'os.path.join', 'os.path.join', (['DIR', '"""data"""', '"""price_history.json"""'], {}), "(DIR, 'data', 'price_history.json')\n", (2532, 2567), False, 'import os\n'), ((361, 387), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (377, 387), False, 'import os\n'), ((1244, 1261), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1256, 1261), False, 'import requests\n'), ((1273, 1308), 'bs4.BeautifulSoup', 'bs.BeautifulSoup', (['resp.text', '"""lxml"""'], {}), "(resp.text, 'lxml')\n", (1289, 1308), True, 'import bs4 as bs\n'), ((3410, 3435), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(0)'], {}), '(0)\n', (3432, 3435), False, 'from datetime import datetime\n'), ((3447, 3480), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['elapsed_s'], {}), '(elapsed_s)\n', (3469, 3480), False, 'from datetime import datetime\n'), ((4432, 4443), 'time.time', 'time.time', ([], {}), '()\n', (4441, 4443), False, 'import time\n'), ((5408, 5467), 'yfinance.download', 'yf.download', (['escaped_ticker'], {'start': 'start_date', 'end': 'end_date'}), '(escaped_ticker, start=start_date, end=end_date)\n', (5419, 5467), True, 'import yfinance as yf\n'), ((6539, 6551), 'datetime.date.today', 'date.today', ([], {}), '()\n', (6549, 6551), False, 'from datetime import date\n'), ((6564, 6575), 'time.time', 'time.time', ([], {}), '()\n', (6573, 6575), False, 'import time\n'), ((412, 437), 'os.path.join', 'os.path.join', (['DIR', '"""data"""'], {}), "(DIR, 'data')\n", (424, 437), False, 'import os\n'), ((456, 481), 'os.path.join', 'os.path.join', (['DIR', '"""data"""'], {}), "(DIR, 'data')\n", (468, 481), False, 'import os\n'), ((505, 529), 'os.path.join', 'os.path.join', (['DIR', '"""tmp"""'], {}), "(DIR, 'tmp')\n", (517, 529), False, 'import os\n'), ((548, 572), 'os.path.join', 'os.path.join', (['DIR', '"""tmp"""'], {}), "(DIR, 'tmp')\n", (560, 572), False, 'import os\n'), ((677, 699), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (691, 699), False, 'import yaml\n'), ((869, 891), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (883, 891), False, 'import yaml\n'), ((1765, 1785), 'pickle.dump', 'pickle.dump', (['secs', 'f'], {}), '(secs, f)\n', (1776, 1785), False, 'import pickle\n'), ((2748, 2775), 'json.dump', 'json.dump', (['tickers_dict', 'fp'], {}), '(tickers_dict, fp)\n', (2757, 2775), False, 'import json\n'), ((3612, 3647), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['remaining_s'], {}), '(remaining_s)\n', (3634, 3647), False, 'from datetime import datetime\n'), ((4526, 4537), 'time.time', 'time.time', ([], {}), '()\n', (4535, 4537), False, 'import time\n'), ((4557, 4625), 'requests.get', 'requests.get', (["(TD_API % sec['ticker'])"], {'params': 'params', 'headers': 'headers'}), "(TD_API % sec['ticker'], params=params, headers=headers)\n", (4569, 4625), False, 'import requests\n'), ((4698, 4709), 'time.time', 'time.time', ([], {}), '()\n', (4707, 4709), False, 'import time\n'), ((6601, 6627), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1 * 365)'}), '(days=1 * 365)\n', (6613, 6627), True, 'import datetime as dt\n'), ((6734, 6745), 'time.time', 'time.time', ([], {}), '()\n', (6743, 6745), False, 'import time\n'), ((6823, 6834), 'time.time', 'time.time', ([], {}), '()\n', (6832, 6834), False, 'import time\n'), ((594, 634), 'os.path.join', 'os.path.join', (['DIR', '"""config_private.yaml"""'], {}), "(DIR, 'config_private.yaml')\n", (606, 634), False, 'import os\n'), ((1701, 1743), 'os.path.join', 'os.path.join', (['DIR', '"""tmp"""', '"""tickers.pickle"""'], {}), "(DIR, 'tmp', 'tickers.pickle')\n", (1713, 1743), False, 'import os\n'), ((3574, 3595), 'numpy.isnan', 'np.isnan', (['remaining_s'], {}), '(remaining_s)\n', (3582, 3595), True, 'import numpy as np\n'), ((7117, 7128), 'time.time', 'time.time', ([], {}), '()\n', (7126, 7128), False, 'import time\n'), ((4101, 4124), 'numpy.minimum', 'np.minimum', (['(idx + 1)', '(25)'], {}), '(idx + 1, 25)\n', (4111, 4124), True, 'import numpy as np\n'), ((4067, 4092), 'pandas.Series', 'pd.Series', (['all_load_times'], {}), '(all_load_times)\n', (4076, 4092), True, 'import pandas as pd\n')] |
# cannot combine, regulons are different in different datasets
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
#----------------------variable------------------------
fmt='tif'
n=10 #rows to plot
o=20 #overlap check
fd_rss='./out/a07_regulon_01_rss'
fd_out='./out/a07_regulon_04_plot-rss-avg'
l_sample=['Ctrl', 'MethFix', 'RNAlater']
l_cell=['Root', 'Spindle']
l_color=['#c2eb7c', '#ebbfa4']
#--------------------setup----------------------------
Path(fd_out).mkdir(exist_ok=True, parents=True)
#-------------------function--------------------------
def load_df(fname):
'''load df, and split to spindle and root
df1 is root, df2 is spindle
'''
#1. load
df=pd.read_csv(fname, index_col=0)
#2. split
df1=df.loc[:, ['Spindle-Root-1']].copy()
df1.columns=[f'{Path(fname).stem.lstrip("rss_")}']
df2=df.loc[:, ['Spindle-Root-2']].copy()
df2.columns=[f'{Path(fname).stem.lstrip("rss_")}']
return df1, df2
def merge_df(l_df):
#1. merge
df=l_df[0]
for dfi in l_df[1:]:
df=df.merge(dfi, left_index=True, right_index=True)
#2. calculate avg and std
df['avg']=df.loc[:, l_sample].mean(axis=1)
df['std']=df.loc[:, l_sample].std(axis=1)
#3. sort
df=df.sort_values('avg', ascending=False)
return df
def barplot_rss(df, title, f_out, n=n, c='#ebbfa4'):
#1. clean df
n=min(n, df.shape[0])
df=df.iloc[0:n, :]
df.index.name='Regulon'
df=df.reset_index()
low=np.zeros(n)
#2. plot
sns.set()
fig, ax=plt.subplots(figsize=(6, 6))
ax=sns.barplot(x='avg', y='Regulon', data=df, color=c)
ax.errorbar(df['avg'], df.index, xerr=[low, df['std']], linestyle='none', color='Grey')
#3. adjust
plt.title(title, fontsize=24, pad=25, weight='medium')
plt.ylim([n-0.5, -0.5])
plt.xlabel('', fontsize=22, labelpad=10)
plt.ylabel('', fontsize=22, labelpad=10)
ax.xaxis.tick_top()
plt.xticks(fontsize=13)
plt.yticks(fontsize=16, rotation=0, weight='medium')
#4. save
plt.tight_layout()
plt.savefig(f_out, dpi=300)
plt.close()
return
#############################################################
#1. load
l_root=[]
l_spin=[]
for sample in l_sample:
df1, df2=load_df(f'{fd_rss}/rss_{sample}.csv')
l_root.append(df1)
l_spin.append(df2)
#2. merge
df_root=merge_df(l_root)
df_root.to_csv(f'{fd_out}/root.csv')
df_spin=merge_df(l_spin)
df_spin.to_csv(f'{fd_out}/spin.csv')
#3. get uniq regulons
l_overlap=[i for i in df_root.index.tolist()[0:o] if i in df_spin.index.tolist()[0:o]]
df_root=df_root.loc[~df_root.index.isin(l_overlap), :]
df_spin=df_spin.loc[~df_spin.index.isin(l_overlap), :]
#3. plot
title=l_cell[0]
f_out=f'{fd_out}/{title}.{fmt}'
barplot_rss(df_root, title, f_out, c=l_color[0])
title=l_cell[1]
f_out=f'{fd_out}/{title}.{fmt}'
barplot_rss(df_spin, title, f_out, c=l_color[1])
| [
"seaborn.set",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"pathlib.Path",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
... | [((743, 774), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'index_col': '(0)'}), '(fname, index_col=0)\n', (754, 774), True, 'import pandas as pd\n'), ((1454, 1465), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1462, 1465), True, 'import numpy as np\n'), ((1477, 1486), 'seaborn.set', 'sns.set', ([], {}), '()\n', (1484, 1486), True, 'import seaborn as sns\n'), ((1496, 1524), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (1508, 1524), True, 'import matplotlib.pyplot as plt\n'), ((1529, 1580), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""avg"""', 'y': '"""Regulon"""', 'data': 'df', 'color': 'c'}), "(x='avg', y='Regulon', data=df, color=c)\n", (1540, 1580), True, 'import seaborn as sns\n'), ((1683, 1737), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(24)', 'pad': '(25)', 'weight': '"""medium"""'}), "(title, fontsize=24, pad=25, weight='medium')\n", (1692, 1737), True, 'import matplotlib.pyplot as plt\n'), ((1739, 1764), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[n - 0.5, -0.5]'], {}), '([n - 0.5, -0.5])\n', (1747, 1764), True, 'import matplotlib.pyplot as plt\n'), ((1764, 1804), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""""""'], {'fontsize': '(22)', 'labelpad': '(10)'}), "('', fontsize=22, labelpad=10)\n", (1774, 1804), True, 'import matplotlib.pyplot as plt\n'), ((1806, 1846), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""""""'], {'fontsize': '(22)', 'labelpad': '(10)'}), "('', fontsize=22, labelpad=10)\n", (1816, 1846), True, 'import matplotlib.pyplot as plt\n'), ((1869, 1892), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(13)'}), '(fontsize=13)\n', (1879, 1892), True, 'import matplotlib.pyplot as plt\n'), ((1894, 1946), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(16)', 'rotation': '(0)', 'weight': '"""medium"""'}), "(fontsize=16, rotation=0, weight='medium')\n", (1904, 1946), True, 'import matplotlib.pyplot as plt\n'), ((1958, 1976), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1974, 1976), True, 'import matplotlib.pyplot as plt\n'), ((1978, 2005), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f_out'], {'dpi': '(300)'}), '(f_out, dpi=300)\n', (1989, 2005), True, 'import matplotlib.pyplot as plt\n'), ((2007, 2018), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2016, 2018), True, 'import matplotlib.pyplot as plt\n'), ((524, 536), 'pathlib.Path', 'Path', (['fd_out'], {}), '(fd_out)\n', (528, 536), False, 'from pathlib import Path\n'), ((846, 857), 'pathlib.Path', 'Path', (['fname'], {}), '(fname)\n', (850, 857), False, 'from pathlib import Path\n'), ((940, 951), 'pathlib.Path', 'Path', (['fname'], {}), '(fname)\n', (944, 951), False, 'from pathlib import Path\n')] |
import torch
import numpy as np
import torch.optim as optim
from torch.nn import NLLLoss
from torch.utils.data import DataLoader
from torch.utils.data.sampler import RandomSampler
from torch.nn.utils import clip_grad_norm
from torchvision.datasets import CIFAR10
from torchvision.transforms import transforms
from src.model import CIFAR10_Network
class CIFAR10Trainer:
def __init__(self, parameters):
self.params = parameters
# Transform applied to each image
transform = transforms.ToTensor()
# Initialize datasets
self.trainset = CIFAR10(
root=self.params.dataset_dir, train=True, download=True, transform=transform
)
self.testset = CIFAR10(
root=self.params.dataset_dir,
train=False,
download=True,
transform=transform,
)
# Initialize loaders
self.trainloader = DataLoader(
self.trainset,
batch_size=self.params.batch_size,
shuffle=False,
num_workers=self.params.num_workers,
sampler=RandomSampler(self.trainset),
)
self.testloader = DataLoader(
self.testset,
batch_size=self.params.batch_size,
shuffle=False,
num_workers=self.params.num_workers,
)
# Checking for GPU
self.use_gpu = self.params.use_gpu and torch.cuda.is_available()
self.device = torch.device("cuda:0" if self.use_gpu else "cpu")
# Initialize model
self.model = CIFAR10_Network(self.params)
self.model.to(self.device)
print(self.model)
print("Number of parameters = {}".format(self.model.num_parameters()))
# Setup optimizer
self.optimizer = self.optimizer_select()
# Criterion
self.criterion = NLLLoss()
def train_model(self):
max_accuracy = None
best_model = None
avg_losses = np.zeros(self.params.num_epochs)
for epoch in range(self.params.num_epochs):
try:
print("Epoch {}".format(epoch + 1))
print("Learning Rate= {}".format(self.optimizer.param_groups[0]["lr"]))
# Set mode to training
self.model.train()
# Go through the training set
avg_losses[epoch] = self.train_epoch()
print("Average loss= {}".format(avg_losses[epoch]))
# Switch to eval and go through the test set
self.model.eval()
# Go through the test set
test_accuracy = self.test_epoch()
print(
"In Epoch {}, Obtained Accuracy {:.2f}".format(
epoch + 1, test_accuracy
)
)
if max_accuracy is None or max_accuracy < test_accuracy:
max_accuracy = test_accuracy
best_model = self.model.state_dict()
except KeyboardInterrupt:
print("Training was interrupted")
break
# Saving trained model
self.save_model(best_model)
return avg_losses
def train_epoch(self):
losses = 0.0
for batch_index, (data) in enumerate(self.trainloader, 1):
if batch_index % 200 == 0:
print("Step {}".format(batch_index))
print("Average Loss so far: {}".format(losses / batch_index))
# Split data tuple
inputs, labels = data
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Main Model Forward Step
output = self.model(inputs)
# Loss Computation
loss = self.criterion(output, labels)
inf = float("inf")
if loss.data.item() == inf or loss.data.item() == -inf:
print("Warning, received inf loss. Skipping it")
elif loss.data.item() != loss.data.item():
print("Warning, received nan loss.")
else:
losses = losses + loss.data.item()
# Zero the optimizer gradient
self.optimizer.zero_grad()
# Backward step
loss.backward()
# Clip gradients
clip_grad_norm(self.model.parameters(), self.params.max_norm)
# Weight Update
self.optimizer.step()
if self.use_gpu is True:
torch.cuda.synchronize()
del inputs, labels, data, loss, output
# Compute the average loss for this epoch
avg_loss = losses / len(self.trainloader)
return avg_loss
def test_epoch(self):
correct = 0
total = 0
for data in self.testloader:
# Split data tuple
inputs, labels = data
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Forward step
outputs = self.model(inputs)
_, predicted = torch.max(outputs.data, dim=1)
total += labels.size(0)
correct += torch.sum(predicted == labels.data)
del outputs, inputs, labels, data
total_accuracy = correct * 1.0 / total * 100.0
return total_accuracy
def save_model(self, model_parameters):
self.model.load_state_dict(model_parameters)
torch.save(
self.serialize(), self.params.model_dir / "trained_model.pt",
)
def serialize(self):
model_is_cuda = next(self.model.parameters()).is_cuda
model = self.model.cpu() if model_is_cuda else self.model
package = {
"state_dict": model.state_dict(),
"optim_dict": self.optimizer.state_dict(),
}
return package
def optimizer_select(self):
if self.params.optimizer == "Adam":
return optim.Adam(self.model.parameters(), lr=self.params.learning_rate)
elif self.params.optimizer == "SGD":
return optim.SGD(
self.model.parameters(),
lr=self.params.learning_rate,
momentum=self.params.momentum,
nesterov=self.params.nesterov,
)
else:
raise NotImplementedError
| [
"src.model.CIFAR10_Network",
"torch.max",
"torch.cuda.synchronize",
"torchvision.datasets.CIFAR10",
"numpy.zeros",
"torchvision.transforms.transforms.ToTensor",
"torch.nn.NLLLoss",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.utils.data.sampler.RandomSampler",
... | [((503, 524), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (522, 524), False, 'from torchvision.transforms import transforms\n'), ((580, 670), 'torchvision.datasets.CIFAR10', 'CIFAR10', ([], {'root': 'self.params.dataset_dir', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), '(root=self.params.dataset_dir, train=True, download=True, transform=\n transform)\n', (587, 670), False, 'from torchvision.datasets import CIFAR10\n'), ((711, 802), 'torchvision.datasets.CIFAR10', 'CIFAR10', ([], {'root': 'self.params.dataset_dir', 'train': '(False)', 'download': '(True)', 'transform': 'transform'}), '(root=self.params.dataset_dir, train=False, download=True, transform\n =transform)\n', (718, 802), False, 'from torchvision.datasets import CIFAR10\n'), ((1163, 1278), 'torch.utils.data.DataLoader', 'DataLoader', (['self.testset'], {'batch_size': 'self.params.batch_size', 'shuffle': '(False)', 'num_workers': 'self.params.num_workers'}), '(self.testset, batch_size=self.params.batch_size, shuffle=False,\n num_workers=self.params.num_workers)\n', (1173, 1278), False, 'from torch.utils.data import DataLoader\n'), ((1457, 1506), 'torch.device', 'torch.device', (["('cuda:0' if self.use_gpu else 'cpu')"], {}), "('cuda:0' if self.use_gpu else 'cpu')\n", (1469, 1506), False, 'import torch\n'), ((1556, 1584), 'src.model.CIFAR10_Network', 'CIFAR10_Network', (['self.params'], {}), '(self.params)\n', (1571, 1584), False, 'from src.model import CIFAR10_Network\n'), ((1849, 1858), 'torch.nn.NLLLoss', 'NLLLoss', ([], {}), '()\n', (1856, 1858), False, 'from torch.nn import NLLLoss\n'), ((1962, 1994), 'numpy.zeros', 'np.zeros', (['self.params.num_epochs'], {}), '(self.params.num_epochs)\n', (1970, 1994), True, 'import numpy as np\n'), ((1409, 1434), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1432, 1434), False, 'import torch\n'), ((5022, 5052), 'torch.max', 'torch.max', (['outputs.data'], {'dim': '(1)'}), '(outputs.data, dim=1)\n', (5031, 5052), False, 'import torch\n'), ((5112, 5147), 'torch.sum', 'torch.sum', (['(predicted == labels.data)'], {}), '(predicted == labels.data)\n', (5121, 5147), False, 'import torch\n'), ((1096, 1124), 'torch.utils.data.sampler.RandomSampler', 'RandomSampler', (['self.trainset'], {}), '(self.trainset)\n', (1109, 1124), False, 'from torch.utils.data.sampler import RandomSampler\n'), ((4484, 4508), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (4506, 4508), False, 'import torch\n')] |
""" To make fake Datasets
Wanted to keep this out of the testing frame works, as other repos, might want to use this
"""
from typing import List
import numpy as np
import pandas as pd
import xarray as xr
from nowcasting_dataset.consts import NWP_VARIABLE_NAMES, SAT_VARIABLE_NAMES
from nowcasting_dataset.data_sources.gsp.gsp_model import GSP
from nowcasting_dataset.data_sources.metadata.metadata_model import Metadata
from nowcasting_dataset.data_sources.nwp.nwp_model import NWP
from nowcasting_dataset.data_sources.pv.pv_model import PV
from nowcasting_dataset.data_sources.satellite.satellite_model import HRVSatellite, Satellite
from nowcasting_dataset.data_sources.sun.sun_model import Sun
from nowcasting_dataset.data_sources.topographic.topographic_model import Topographic
from nowcasting_dataset.dataset.xr_utils import (
convert_coordinates_to_indexes,
convert_coordinates_to_indexes_for_list_datasets,
join_list_dataset_to_batch_dataset,
)
def gsp_fake(
batch_size,
seq_length_30,
n_gsp_per_batch,
):
"""Create fake data"""
# make batch of arrays
xr_datasets = [
create_gsp_pv_dataset(
seq_length=seq_length_30,
freq="30T",
number_of_systems=n_gsp_per_batch,
)
for _ in range(batch_size)
]
# change dimensions to dimension indexes
xr_datasets = convert_coordinates_to_indexes_for_list_datasets(xr_datasets)
# make dataset
xr_dataset = join_list_dataset_to_batch_dataset(xr_datasets)
return GSP(xr_dataset)
def metadata_fake(batch_size):
"""Make a xr dataset"""
xr_arrays = [create_metadata_dataset() for _ in range(batch_size)]
# change to indexes
xr_arrays = [convert_coordinates_to_indexes(xr_array) for xr_array in xr_arrays]
# make dataset
xr_dataset = join_list_dataset_to_batch_dataset(xr_arrays)
return Metadata(xr_dataset)
def nwp_fake(
batch_size=32,
seq_length_5=19,
image_size_pixels=64,
number_nwp_channels=7,
) -> NWP:
"""Create fake data"""
# make batch of arrays
xr_arrays = [
create_image_array(
seq_length_5=seq_length_5,
image_size_pixels=image_size_pixels,
channels=NWP_VARIABLE_NAMES[0:number_nwp_channels],
)
for _ in range(batch_size)
]
# make dataset
xr_dataset = join_list_data_array_to_batch_dataset(xr_arrays)
xr_dataset["init_time"] = xr_dataset.time[:, 0]
return NWP(xr_dataset)
def pv_fake(batch_size, seq_length_5, n_pv_systems_per_batch):
"""Create fake data"""
# make batch of arrays
xr_datasets = [
create_gsp_pv_dataset(
seq_length=seq_length_5,
freq="5T",
number_of_systems=n_pv_systems_per_batch,
time_dependent_capacity=False,
)
for _ in range(batch_size)
]
# change dimensions to dimension indexes
xr_datasets = convert_coordinates_to_indexes_for_list_datasets(xr_datasets)
# make dataset
xr_dataset = join_list_dataset_to_batch_dataset(xr_datasets)
return PV(xr_dataset)
def satellite_fake(
batch_size=32,
seq_length_5=19,
satellite_image_size_pixels=64,
number_satellite_channels=7,
) -> Satellite:
"""Create fake data"""
# make batch of arrays
xr_arrays = [
create_image_array(
seq_length_5=seq_length_5,
image_size_pixels=satellite_image_size_pixels,
channels=SAT_VARIABLE_NAMES[1:number_satellite_channels],
)
for _ in range(batch_size)
]
# make dataset
xr_dataset = join_list_data_array_to_batch_dataset(xr_arrays)
return Satellite(xr_dataset)
def hrv_satellite_fake(
batch_size=32,
seq_length_5=19,
satellite_image_size_pixels=64,
number_satellite_channels=7,
) -> Satellite:
"""Create fake data"""
# make batch of arrays
xr_arrays = [
create_image_array(
seq_length_5=seq_length_5,
image_size_pixels=satellite_image_size_pixels * 3, # HRV images are 3x other images
channels=SAT_VARIABLE_NAMES[0:1],
)
for _ in range(batch_size)
]
# make dataset
xr_dataset = join_list_data_array_to_batch_dataset(xr_arrays)
return HRVSatellite(xr_dataset)
def sun_fake(batch_size, seq_length_5):
"""Create fake data"""
# create dataset with both azimuth and elevation, index with time
# make batch of arrays
xr_arrays = [
create_sun_dataset(
seq_length=seq_length_5,
)
for _ in range(batch_size)
]
# make dataset
xr_dataset = join_list_dataset_to_batch_dataset(xr_arrays)
return Sun(xr_dataset)
def topographic_fake(batch_size, image_size_pixels):
"""Create fake data"""
# make batch of arrays
xr_arrays = [
xr.DataArray(
data=np.random.randn(
image_size_pixels,
image_size_pixels,
),
dims=["x", "y"],
coords=dict(
x=np.sort(np.random.randn(image_size_pixels)),
y=np.sort(np.random.randn(image_size_pixels))[::-1].copy(),
),
name="data",
)
for _ in range(batch_size)
]
# make dataset
xr_dataset = join_list_data_array_to_batch_dataset(xr_arrays)
return Topographic(xr_dataset)
def create_image_array(
dims=("time", "x", "y", "channels"),
seq_length_5=19,
image_size_pixels=64,
channels=SAT_VARIABLE_NAMES,
):
"""Create Satellite or NWP fake image data"""
ALL_COORDS = {
"time": pd.date_range("2021-01-01", freq="5T", periods=seq_length_5),
"x": np.random.randint(low=0, high=1000, size=image_size_pixels),
"y": np.random.randint(low=0, high=1000, size=image_size_pixels),
"channels": np.array(channels),
}
coords = [(dim, ALL_COORDS[dim]) for dim in dims]
image_data_array = xr.DataArray(
abs(
np.random.randn(
seq_length_5,
image_size_pixels,
image_size_pixels,
len(channels),
)
),
coords=coords,
name="data",
) # Fake data for testing!
return image_data_array
def create_gsp_pv_dataset(
dims=("time", "id"),
freq="5T",
seq_length=19,
number_of_systems=128,
time_dependent_capacity: bool = True,
) -> xr.Dataset:
"""
Create gsp or pv fake dataset
Args:
dims: the dims that are made for "power_mw"
freq: the frequency of the time steps
seq_length: the time sequence length
number_of_systems: number of pv or gsp systems
time_dependent_capacity: if the capacity is time dependent.
GSP capacities increase over time,
but PV systems are the same (or should be).
Returns: xr.Dataset of fake data
"""
ALL_COORDS = {
"time": pd.date_range("2021-01-01", freq=freq, periods=seq_length),
"id": np.random.choice(range(1000), number_of_systems, replace=False),
}
coords = [(dim, ALL_COORDS[dim]) for dim in dims]
data_array = xr.DataArray(
np.random.randn(
seq_length,
number_of_systems,
),
coords=coords,
) # Fake data for testing!
if time_dependent_capacity:
capacity = xr.DataArray(
np.repeat(np.random.randn(seq_length), number_of_systems)
.reshape(number_of_systems, seq_length)
.T,
coords=coords,
)
else:
capacity = xr.DataArray(
np.random.randn(number_of_systems),
coords=[coords[1]],
)
data = data_array.to_dataset(name="power_mw")
x_coords = xr.DataArray(
data=np.sort(
np.random.choice(range(2 * number_of_systems), number_of_systems, replace=False)
),
dims=["id"],
)
y_coords = xr.DataArray(
data=np.sort(
np.random.choice(range(2 * number_of_systems), number_of_systems, replace=False)
),
dims=["id"],
)
data["capacity_mwp"] = capacity
data["x_coords"] = x_coords
data["y_coords"] = y_coords
# Add 1000 to the id numbers for the row numbers.
# This is a quick way to make sure row number is different from id,
data["pv_system_row_number"] = data["id"] + 1000
data.__setitem__("power_mw", data.power_mw.clip(min=0))
return data
def create_sun_dataset(
dims=("time",),
freq="5T",
seq_length=19,
) -> xr.Dataset:
"""
Create sun fake dataset
Args:
dims: # TODO
freq: # TODO
seq_length: # TODO
Returns: # TODO
"""
ALL_COORDS = {
"time": pd.date_range("2021-01-01", freq=freq, periods=seq_length),
}
coords = [(dim, ALL_COORDS[dim]) for dim in dims]
data_array = xr.DataArray(
np.random.randn(
seq_length,
),
coords=coords,
) # Fake data for testing!
sun = data_array.to_dataset(name="elevation")
sun["azimuth"] = sun.elevation
sun.__setitem__("azimuth", sun.azimuth.clip(min=0, max=360))
sun.__setitem__("elevation", sun.elevation.clip(min=-90, max=90))
sun = convert_coordinates_to_indexes(sun)
return sun
def create_metadata_dataset() -> xr.Dataset:
"""Create fake metadata dataset"""
d = {
"dims": ("t0_dt",),
"data": pd.date_range("2021-01-01", freq="5T", periods=1) + pd.Timedelta("30T"),
}
data = (xr.DataArray.from_dict(d)).to_dataset(name="data")
for v in ["x_meters_center", "y_meters_center", "object_at_center_label"]:
d: dict = {"dims": ("t0_dt",), "data": [np.random.randint(0, 1000)]}
d: xr.Dataset = (xr.DataArray.from_dict(d)).to_dataset(name=v)
data[v] = getattr(d, v)
return data
def create_datetime_dataset(
seq_length=19,
) -> xr.Dataset:
"""Create fake datetime dataset"""
ALL_COORDS = {
"time": pd.date_range("2021-01-01", freq="5T", periods=seq_length),
}
coords = [("time", ALL_COORDS["time"])]
data_array = xr.DataArray(
np.random.randn(
seq_length,
),
coords=coords,
) # Fake data
data = data_array.to_dataset()
ds = data.rename({"data": "day_of_year_cos"})
ds["day_of_year_sin"] = data.rename({"data": "day_of_year_sin"}).day_of_year_sin
ds["hour_of_day_cos"] = data.rename({"data": "hour_of_day_cos"}).hour_of_day_cos
ds["hour_of_day_sin"] = data.rename({"data": "hour_of_day_sin"}).hour_of_day_sin
return data
def join_list_data_array_to_batch_dataset(data_arrays: List[xr.DataArray]) -> xr.Dataset:
"""Join a list of xr.DataArrays into an xr.Dataset by concatenating on the example dim."""
datasets = [
convert_coordinates_to_indexes(data_arrays[i].to_dataset()) for i in range(len(data_arrays))
]
return join_list_dataset_to_batch_dataset(datasets)
| [
"nowcasting_dataset.data_sources.satellite.satellite_model.HRVSatellite",
"nowcasting_dataset.data_sources.gsp.gsp_model.GSP",
"nowcasting_dataset.dataset.xr_utils.join_list_dataset_to_batch_dataset",
"nowcasting_dataset.data_sources.pv.pv_model.PV",
"pandas.Timedelta",
"nowcasting_dataset.data_sources.me... | [((1372, 1433), 'nowcasting_dataset.dataset.xr_utils.convert_coordinates_to_indexes_for_list_datasets', 'convert_coordinates_to_indexes_for_list_datasets', (['xr_datasets'], {}), '(xr_datasets)\n', (1420, 1433), False, 'from nowcasting_dataset.dataset.xr_utils import convert_coordinates_to_indexes, convert_coordinates_to_indexes_for_list_datasets, join_list_dataset_to_batch_dataset\n'), ((1471, 1518), 'nowcasting_dataset.dataset.xr_utils.join_list_dataset_to_batch_dataset', 'join_list_dataset_to_batch_dataset', (['xr_datasets'], {}), '(xr_datasets)\n', (1505, 1518), False, 'from nowcasting_dataset.dataset.xr_utils import convert_coordinates_to_indexes, convert_coordinates_to_indexes_for_list_datasets, join_list_dataset_to_batch_dataset\n'), ((1531, 1546), 'nowcasting_dataset.data_sources.gsp.gsp_model.GSP', 'GSP', (['xr_dataset'], {}), '(xr_dataset)\n', (1534, 1546), False, 'from nowcasting_dataset.data_sources.gsp.gsp_model import GSP\n'), ((1826, 1871), 'nowcasting_dataset.dataset.xr_utils.join_list_dataset_to_batch_dataset', 'join_list_dataset_to_batch_dataset', (['xr_arrays'], {}), '(xr_arrays)\n', (1860, 1871), False, 'from nowcasting_dataset.dataset.xr_utils import convert_coordinates_to_indexes, convert_coordinates_to_indexes_for_list_datasets, join_list_dataset_to_batch_dataset\n'), ((1884, 1904), 'nowcasting_dataset.data_sources.metadata.metadata_model.Metadata', 'Metadata', (['xr_dataset'], {}), '(xr_dataset)\n', (1892, 1904), False, 'from nowcasting_dataset.data_sources.metadata.metadata_model import Metadata\n'), ((2478, 2493), 'nowcasting_dataset.data_sources.nwp.nwp_model.NWP', 'NWP', (['xr_dataset'], {}), '(xr_dataset)\n', (2481, 2493), False, 'from nowcasting_dataset.data_sources.nwp.nwp_model import NWP\n'), ((2936, 2997), 'nowcasting_dataset.dataset.xr_utils.convert_coordinates_to_indexes_for_list_datasets', 'convert_coordinates_to_indexes_for_list_datasets', (['xr_datasets'], {}), '(xr_datasets)\n', (2984, 2997), False, 'from nowcasting_dataset.dataset.xr_utils import convert_coordinates_to_indexes, convert_coordinates_to_indexes_for_list_datasets, join_list_dataset_to_batch_dataset\n'), ((3035, 3082), 'nowcasting_dataset.dataset.xr_utils.join_list_dataset_to_batch_dataset', 'join_list_dataset_to_batch_dataset', (['xr_datasets'], {}), '(xr_datasets)\n', (3069, 3082), False, 'from nowcasting_dataset.dataset.xr_utils import convert_coordinates_to_indexes, convert_coordinates_to_indexes_for_list_datasets, join_list_dataset_to_batch_dataset\n'), ((3095, 3109), 'nowcasting_dataset.data_sources.pv.pv_model.PV', 'PV', (['xr_dataset'], {}), '(xr_dataset)\n', (3097, 3109), False, 'from nowcasting_dataset.data_sources.pv.pv_model import PV\n'), ((3674, 3695), 'nowcasting_dataset.data_sources.satellite.satellite_model.Satellite', 'Satellite', (['xr_dataset'], {}), '(xr_dataset)\n', (3683, 3695), False, 'from nowcasting_dataset.data_sources.satellite.satellite_model import HRVSatellite, Satellite\n'), ((4278, 4302), 'nowcasting_dataset.data_sources.satellite.satellite_model.HRVSatellite', 'HRVSatellite', (['xr_dataset'], {}), '(xr_dataset)\n', (4290, 4302), False, 'from nowcasting_dataset.data_sources.satellite.satellite_model import HRVSatellite, Satellite\n'), ((4640, 4685), 'nowcasting_dataset.dataset.xr_utils.join_list_dataset_to_batch_dataset', 'join_list_dataset_to_batch_dataset', (['xr_arrays'], {}), '(xr_arrays)\n', (4674, 4685), False, 'from nowcasting_dataset.dataset.xr_utils import convert_coordinates_to_indexes, convert_coordinates_to_indexes_for_list_datasets, join_list_dataset_to_batch_dataset\n'), ((4698, 4713), 'nowcasting_dataset.data_sources.sun.sun_model.Sun', 'Sun', (['xr_dataset'], {}), '(xr_dataset)\n', (4701, 4713), False, 'from nowcasting_dataset.data_sources.sun.sun_model import Sun\n'), ((5364, 5387), 'nowcasting_dataset.data_sources.topographic.topographic_model.Topographic', 'Topographic', (['xr_dataset'], {}), '(xr_dataset)\n', (5375, 5387), False, 'from nowcasting_dataset.data_sources.topographic.topographic_model import Topographic\n'), ((9253, 9288), 'nowcasting_dataset.dataset.xr_utils.convert_coordinates_to_indexes', 'convert_coordinates_to_indexes', (['sun'], {}), '(sun)\n', (9283, 9288), False, 'from nowcasting_dataset.dataset.xr_utils import convert_coordinates_to_indexes, convert_coordinates_to_indexes_for_list_datasets, join_list_dataset_to_batch_dataset\n'), ((10931, 10975), 'nowcasting_dataset.dataset.xr_utils.join_list_dataset_to_batch_dataset', 'join_list_dataset_to_batch_dataset', (['datasets'], {}), '(datasets)\n', (10965, 10975), False, 'from nowcasting_dataset.dataset.xr_utils import convert_coordinates_to_indexes, convert_coordinates_to_indexes_for_list_datasets, join_list_dataset_to_batch_dataset\n'), ((1721, 1761), 'nowcasting_dataset.dataset.xr_utils.convert_coordinates_to_indexes', 'convert_coordinates_to_indexes', (['xr_array'], {}), '(xr_array)\n', (1751, 1761), False, 'from nowcasting_dataset.dataset.xr_utils import convert_coordinates_to_indexes, convert_coordinates_to_indexes_for_list_datasets, join_list_dataset_to_batch_dataset\n'), ((5623, 5683), 'pandas.date_range', 'pd.date_range', (['"""2021-01-01"""'], {'freq': '"""5T"""', 'periods': 'seq_length_5'}), "('2021-01-01', freq='5T', periods=seq_length_5)\n", (5636, 5683), True, 'import pandas as pd\n'), ((5698, 5757), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(1000)', 'size': 'image_size_pixels'}), '(low=0, high=1000, size=image_size_pixels)\n', (5715, 5757), True, 'import numpy as np\n'), ((5772, 5831), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(1000)', 'size': 'image_size_pixels'}), '(low=0, high=1000, size=image_size_pixels)\n', (5789, 5831), True, 'import numpy as np\n'), ((5853, 5871), 'numpy.array', 'np.array', (['channels'], {}), '(channels)\n', (5861, 5871), True, 'import numpy as np\n'), ((6950, 7008), 'pandas.date_range', 'pd.date_range', (['"""2021-01-01"""'], {'freq': 'freq', 'periods': 'seq_length'}), "('2021-01-01', freq=freq, periods=seq_length)\n", (6963, 7008), True, 'import pandas as pd\n'), ((7188, 7234), 'numpy.random.randn', 'np.random.randn', (['seq_length', 'number_of_systems'], {}), '(seq_length, number_of_systems)\n', (7203, 7234), True, 'import numpy as np\n'), ((8754, 8812), 'pandas.date_range', 'pd.date_range', (['"""2021-01-01"""'], {'freq': 'freq', 'periods': 'seq_length'}), "('2021-01-01', freq=freq, periods=seq_length)\n", (8767, 8812), True, 'import pandas as pd\n'), ((8913, 8940), 'numpy.random.randn', 'np.random.randn', (['seq_length'], {}), '(seq_length)\n', (8928, 8940), True, 'import numpy as np\n'), ((10006, 10064), 'pandas.date_range', 'pd.date_range', (['"""2021-01-01"""'], {'freq': '"""5T"""', 'periods': 'seq_length'}), "('2021-01-01', freq='5T', periods=seq_length)\n", (10019, 10064), True, 'import pandas as pd\n'), ((10155, 10182), 'numpy.random.randn', 'np.random.randn', (['seq_length'], {}), '(seq_length)\n', (10170, 10182), True, 'import numpy as np\n'), ((7622, 7656), 'numpy.random.randn', 'np.random.randn', (['number_of_systems'], {}), '(number_of_systems)\n', (7637, 7656), True, 'import numpy as np\n'), ((9445, 9494), 'pandas.date_range', 'pd.date_range', (['"""2021-01-01"""'], {'freq': '"""5T"""', 'periods': '(1)'}), "('2021-01-01', freq='5T', periods=1)\n", (9458, 9494), True, 'import pandas as pd\n'), ((9497, 9516), 'pandas.Timedelta', 'pd.Timedelta', (['"""30T"""'], {}), "('30T')\n", (9509, 9516), True, 'import pandas as pd\n'), ((9537, 9562), 'xarray.DataArray.from_dict', 'xr.DataArray.from_dict', (['d'], {}), '(d)\n', (9559, 9562), True, 'import xarray as xr\n'), ((4880, 4933), 'numpy.random.randn', 'np.random.randn', (['image_size_pixels', 'image_size_pixels'], {}), '(image_size_pixels, image_size_pixels)\n', (4895, 4933), True, 'import numpy as np\n'), ((9716, 9742), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (9733, 9742), True, 'import numpy as np\n'), ((9770, 9795), 'xarray.DataArray.from_dict', 'xr.DataArray.from_dict', (['d'], {}), '(d)\n', (9792, 9795), True, 'import xarray as xr\n'), ((5062, 5096), 'numpy.random.randn', 'np.random.randn', (['image_size_pixels'], {}), '(image_size_pixels)\n', (5077, 5096), True, 'import numpy as np\n'), ((7414, 7441), 'numpy.random.randn', 'np.random.randn', (['seq_length'], {}), '(seq_length)\n', (7429, 7441), True, 'import numpy as np\n'), ((5125, 5159), 'numpy.random.randn', 'np.random.randn', (['image_size_pixels'], {}), '(image_size_pixels)\n', (5140, 5159), True, 'import numpy as np\n')] |
# Importing libraries
import numpy as np
import pandas as pd
from datetime import datetime
from sklearn.preprocessing import RobustScaler
def feat_goal_duration(df:pd.DataFrame):
"""Converts goal to USD and computes the duration between project launch and deadline and the duration between project creation and launch."""
# Goal in USD
df["usd_goal"] = df.goal * df.fx_rate
# Project duration in days
df["duration_days"] = round((df.deadline - df.launched_at)/(60*60*24))
df["duration_days_prep"] = round((df.launched_at - df.created_at)/(60*60*24))
# Drop columns
df = df.drop(columns=["goal","currency","fx_rate","created_at", "deadline", "launched_at", "state_changed_at"])
return df
def feat_time(df:pd.DataFrame):
"""Computes the season of the project deadline and whether the launch and deadline were on the weekend or not."""
# Year, month and weekday
df["year_deadline"] = pd.DatetimeIndex(df['deadline_rd']).year
df["month_deadline"] = pd.DatetimeIndex(df['deadline_rd']).month
df["weekday_deadline"] = pd.DatetimeIndex(df['deadline_rd']).weekday
df["weekday_launched_at"] = pd.DatetimeIndex(df['launched_at_rd']).weekday
# Which season is the month in?
winter = [12,1,2]
spring = [3,4,5]
summer = [6,7,8]
df['winter_deadline'] = np.where(df['month_deadline'].isin(winter), True, False)
df['spring_deadline'] = np.where(df['month_deadline'].isin(spring), True, False)
df['summer_deadline'] = np.where(df['month_deadline'].isin(summer), True, False)
# Is the weekday on the weekend?
weekend = [5,6]
df['deadline_weekend'] = np.where(df['weekday_deadline'].isin(weekend), True, False)
df['launched_weekend'] = np.where(df['weekday_launched_at'].isin(weekend), True, False)
# Create dummies for the five relevant columns.
df = pd.get_dummies(df, columns=["winter_deadline","spring_deadline","summer_deadline","deadline_weekend","launched_weekend"],drop_first=True)
# Drop columns
df = df.drop(columns=["created_at_rd","deadline_rd","launched_at_rd","state_changed_at_rd","month_deadline","weekday_deadline","weekday_launched_at"])
return df
def feat_location(df:pd.DataFrame):
"""Defines whether location of creator is on the eastcoast."""
# Change location_state to a boolean operator in two columns for eastcoast
eastern = ["ME","NH","VT","NY","MA","RI","CT","NJ","PA","DE","MD","DC","MI","OH","IN","IL","WI","WV","VA","NC","TN","KY","SC","GA","AL","MS","FL"]
df['eastcoast'] = np.where(df['location_state'].isin(eastern), True, False)
# Create dummies
df = pd.get_dummies(df, columns=['eastcoast'], drop_first=True)
# Drop columns
df = df.drop(columns=["location_name","location_state"])
return df
def feat_text(df:pd.DataFrame):
"""Defines text features regarding blurb, project name, state and creator name."""
# Change blurb to a boolean operator for long or short blurb based on word count.
df['blurb_nwords'] = df['blurb'].str.count(' ') + 1
bmean = df.blurb_nwords.mean()
df['long_blurb'] = np.where(df['blurb_nwords'] >= bmean, True, False)
# Change name to a boolean operator for long or short project name based on word count.
df['name_nwords'] = df['name'].str.count(' ') + 1
nmean = df.name_nwords.mean()
df['long_name'] = np.where(df['name_nwords'] >= nmean, True, False)
# Change state to a boolean operator for successful and failed.
df['state_b'] = np.where(df['state'] == 'successful', True, False)
# Change creator name to a boolean operator for long or short creator name based on word count.
#df['creator_name_nwords'] = df['creator_name'].str.count(' ') + 1
#df['long_creator_name'] = np.where(df['creator_name_nwords'] > 2, True, False)
# Create dummies
df = pd.get_dummies(df, columns=['long_blurb','long_name','state_b'], drop_first=True)
# Drop columns
df = df.drop(columns=['blurb','blurb_nwords','name','name_nwords','slug','state'])
return df
def scale_X(X):
"""Scales the columns that do not contain dummy variables using robust scaler."""
# Define columns to scale
col_scale = ['category_parent_id','usd_goal', 'duration_days','duration_days_prep','year_deadline']
# Scale columns
scaler = RobustScaler()
X_scaled = scaler.fit_transform(X[col_scale])
# Concatenating scaled and dummy columns
X = np.concatenate([X_scaled, X.drop(col_scale, axis=1)], axis=1)
return X | [
"pandas.get_dummies",
"sklearn.preprocessing.RobustScaler",
"pandas.DatetimeIndex",
"numpy.where"
] | [((1859, 2010), 'pandas.get_dummies', 'pd.get_dummies', (['df'], {'columns': "['winter_deadline', 'spring_deadline', 'summer_deadline',\n 'deadline_weekend', 'launched_weekend']", 'drop_first': '(True)'}), "(df, columns=['winter_deadline', 'spring_deadline',\n 'summer_deadline', 'deadline_weekend', 'launched_weekend'], drop_first=True\n )\n", (1873, 2010), True, 'import pandas as pd\n'), ((2631, 2689), 'pandas.get_dummies', 'pd.get_dummies', (['df'], {'columns': "['eastcoast']", 'drop_first': '(True)'}), "(df, columns=['eastcoast'], drop_first=True)\n", (2645, 2689), True, 'import pandas as pd\n'), ((3106, 3156), 'numpy.where', 'np.where', (["(df['blurb_nwords'] >= bmean)", '(True)', '(False)'], {}), "(df['blurb_nwords'] >= bmean, True, False)\n", (3114, 3156), True, 'import numpy as np\n'), ((3360, 3409), 'numpy.where', 'np.where', (["(df['name_nwords'] >= nmean)", '(True)', '(False)'], {}), "(df['name_nwords'] >= nmean, True, False)\n", (3368, 3409), True, 'import numpy as np\n'), ((3499, 3549), 'numpy.where', 'np.where', (["(df['state'] == 'successful')", '(True)', '(False)'], {}), "(df['state'] == 'successful', True, False)\n", (3507, 3549), True, 'import numpy as np\n'), ((3837, 3924), 'pandas.get_dummies', 'pd.get_dummies', (['df'], {'columns': "['long_blurb', 'long_name', 'state_b']", 'drop_first': '(True)'}), "(df, columns=['long_blurb', 'long_name', 'state_b'],\n drop_first=True)\n", (3851, 3924), True, 'import pandas as pd\n'), ((4315, 4329), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (4327, 4329), False, 'from sklearn.preprocessing import RobustScaler\n'), ((934, 969), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["df['deadline_rd']"], {}), "(df['deadline_rd'])\n", (950, 969), True, 'import pandas as pd\n'), ((1002, 1037), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["df['deadline_rd']"], {}), "(df['deadline_rd'])\n", (1018, 1037), True, 'import pandas as pd\n'), ((1073, 1108), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["df['deadline_rd']"], {}), "(df['deadline_rd'])\n", (1089, 1108), True, 'import pandas as pd\n'), ((1149, 1187), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["df['launched_at_rd']"], {}), "(df['launched_at_rd'])\n", (1165, 1187), True, 'import pandas as pd\n')] |
# Not consistent with test passing
import numpy as np
import path_plan
from path_plan import compute_probability
from path_plan import model_polyfit
from numpy import interp
import sys
def main():
# Indian Road congress (INC)
V_lane_width = [2.0, 23.5]
# https://nptel.ac.in/content/storage2/courses/105101008/downloads/cete_24.pdf
# Break point of speed
BP_lane_width = [0.0, 7.3]
speed = [0.0, 10.0]
lane_width = interp(V_lane_width, BP_lane_width, speed)
half_lane = np.array([0. , 0., 0., lane_width // 2.])
print(lane_width, half_lane)
left_path_weight = 1.
right_path_weight = 1.
l_probability = 0.006
r_probability = 0.123
left_polyfit = 0.1
right_polyfit = 0.22
ss = compute_probability(speed, left_polyfit, right_polyfit, l_probability, r_probability)
print(ss)
if __name__ == '__main__':
main() | [
"numpy.array",
"numpy.interp",
"path_plan.compute_probability"
] | [((429, 471), 'numpy.interp', 'interp', (['V_lane_width', 'BP_lane_width', 'speed'], {}), '(V_lane_width, BP_lane_width, speed)\n', (435, 471), False, 'from numpy import interp\n'), ((485, 529), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, lane_width // 2.0]'], {}), '([0.0, 0.0, 0.0, lane_width // 2.0])\n', (493, 529), True, 'import numpy as np\n'), ((698, 787), 'path_plan.compute_probability', 'compute_probability', (['speed', 'left_polyfit', 'right_polyfit', 'l_probability', 'r_probability'], {}), '(speed, left_polyfit, right_polyfit, l_probability,\n r_probability)\n', (717, 787), False, 'from path_plan import compute_probability\n')] |
import copy
import os
import sqlite3
import urllib
import shutil
import urllib.request
import numpy as np
import pandas as pd
from basinmaker.utilities.utilities import *
def GenerateRavenInput(
Path_final_hru_info="#",
lenThres=1,
iscalmanningn=-1,
Startyear=-1,
EndYear=-1,
CA_HYDAT="#",
WarmUp=0,
time_step = 1,
Template_Folder="#",
Lake_As_Gauge=False,
WriteObsrvt=False,
DownLoadObsData=True,
Model_Name="test",
Old_Product=False,
SubBasinGroup_NM_Channel=["Allsubbasins"],
SubBasinGroup_Length_Channel=[-1],
SubBasinGroup_NM_Lake=["AllLakesubbasins"],
SubBasinGroup_Area_Lake=[-1],
OutputFolder="#",
Forcing_Input_File="#",
aspect_from_gis = 'grass',
lake_out_flow_method = 'broad_crest',
):
"""Generate Raven input files.
Function that used to generate Raven input files. All output will be stored in folder
"<OutputFolder>/RavenInput".
Parameters
----------
Path_final_hru_info : string
Path of the output shapefile from routing toolbox which includes
all required parameters; Each row in the attribute table of this
shapefile represent a HRU. Different HRU in the same subbasin has
the same subbasin related attribute values.
The shapefile should at least contains following columns
##############Subbasin related attributes###########################
SubId - integer, The subbasin Id
DowSubId - integer, The downstream subbasin ID of this
subbasin
IsLake - integer, If the subbasin is a lake / reservior
subbasin. 1 yes, <0, no
IsObs - integer, If the subbasin contains a observation
gauge. 1 yes, < 0 no.
RivLength - float, The length of the river in current
subbasin in m
RivSlope - float, The slope of the river path in
current subbasin, in m/m
FloodP_n - float, Flood plain manning's coefficient, in -
Ch_n - float, main channel manning's coefficient, in -
BkfWidth - float, the bankfull width of the main channel
in m
BkfDepth - float, the bankfull depth of the main channel
in m
HyLakeId - integer, the lake id
LakeVol - float, the Volume of the lake in km3
LakeDepth - float, the average depth of the lake m
LakeArea - float, the area of the lake in m2
############## HRU related attributes ###########################
HRU_S_mean - float, the slope of the HRU in degree
HRU_A_mean - float, the aspect of the HRU in degree
HRU_E_mean - float, the mean elevation of the HRU in m
HRU_ID - integer, the id of the HRU
HRU_Area - integer, the area of the HRU in m2
HRU_IsLake - integer, the 1 the HRU is a lake hru, -1 not
LAND_USE_C - string, the landuse class name for this HRU, the
name will be used in Raven rvh, and rvp
file
VEG_C - string, the Vegetation class name for this HRU, the
name will be used in Raven rvh, and rvp
file
SOIL_PROF - string, the soil profile name for this HRU, the
name will be used in Raven rvh, and rvp
file
HRU_CenX - float, the centroid coordinates for HRU in x
dimension
HRU_CenY - float, the centroid coordinates for HRU in y
dimension
lenThres : float
River length threshold; river length smaller than
this will write as zero in Raven rvh file
iscalmanningn : integer
If "1", use manning's coefficient in the shpfile table
and set to default value (0.035).
If "-1", do not use manning's coefficients.
Lake_As_Gauge : Bool
If "True", all lake subbasins will labeled as gauged
subbasin such that Raven will export lake balance for
this lake. If "False", lake subbasin will not be labeled
as gauge subbasin.
CA_HYDAT : string, optional
path and filename of downloaded
external database containing streamflow observations,
e.g. HYDAT for Canada ("Hydat.sqlite3").
Startyear : integer, optional
Start year of simulation. Used to
read streamflow observations from external databases.
EndYear : integer, optional
End year of simulation. Used to
read streamflow observations from external databases.
WarmUp : integer, optional
The warmup time (in years) used after
startyear. Values in output file "obs/xxx.rvt" containing
observations will be set to NoData value "-1.2345" from
model start year to end of WarmUp year.
Template_Folder : string, optional
Input that is used to copy raven template files. It is a
folder name containing raven template files. All
files from that folder will be copied (unchanged)
to the "<OutputFolder>/RavenInput".
WriteObsrvt : Bool, optional
Input that used to indicate if the observation data file needs
to be generated.
DownLoadObsData : Bool, optional
Input that used to indicate if the observation data will be Download
from usgs website or read from hydat database for streamflow Gauge
in US or Canada,respectively. If this parameter is False,
while WriteObsrvt is True. The program will write the observation data
file with "-1.2345" for each observation gauges.
Model_Name : string
The Raven model base name. File name of the raven input will be
Model_Name.xxx.
Old_Product : bool
True, the input polygon is coming from the first version of routing product
SubBasinGroup_NM_Channel : List
It is a list of names for subbasin groups, which are grouped based
on channel length of each subbsin. Should at least has one name
SubBasinGroup_Length_Channel : List
It is a list of float channel length thresthold in meter, to divide
subbasin into different groups. for example, [1,10,20] will divide
subbasins into four groups, group 1 with channel length (0,1];
group 2 with channel length (1,10],
group 3 with channel length (10,20],
group 4 with channel length (20,Max channel length].
SubBasinGroup_NM_Lake : List
It is a list of names for subbasin groups, which are grouped based
on Lake area of each subbsin. Should at least has one name
SubBasinGroup_Area_Lake : List
It is a list of float lake area thresthold in m2, to divide
subbasin into different groups. for example, [1,10,20] will divide
subbasins into four groups, group 1 with lake area (0,1];
group 2 with lake are (1,10],
group 3 with lake are (10,20],
group 4 with lake are (20,Max channel length].
OutputFolder : string
Folder name that stores generated Raven input files. The raven
input file will be generated in "<OutputFolder>/RavenInput"
Notes
-------
Following ouput files will be generated in "<OutputFolder>/RavenInput"
modelname.rvh - contains subbasins and HRUs
Lakes.rvh - contains definition and parameters of lakes
channel_properties.rvp - contains definition and parameters for channels
xxx.rvt - (optional) streamflow observation for each gauge
in shapefile database will be automatically
generagted in folder "OutputFolder/RavenInput/obs/".
obsinfo.csv - information file generated reporting drainage area
difference between observed in shapefile and
standard database as well as number of missing
values for each gauge
Returns:
-------
None
Examples
-------
"""
Raveinputsfolder = os.path.join(OutputFolder, "RavenInput")
Obs_Folder = os.path.join(Raveinputsfolder, "obs")
if not os.path.exists(OutputFolder):
os.makedirs(OutputFolder)
shutil.rmtree(Raveinputsfolder, ignore_errors=True)
### check if there is a model input template provided
if Template_Folder != "#":
fromDirectory = Template_Folder
toDirectory = Raveinputsfolder
copy_tree(fromDirectory, toDirectory)
if not os.path.exists(Raveinputsfolder):
os.makedirs(Raveinputsfolder)
if not os.path.exists(Obs_Folder):
os.makedirs(Obs_Folder)
if Forcing_Input_File != "#":
fromDirectory = Forcing_Input_File
toDirectory = os.path.join(Raveinputsfolder, "GriddedForcings2.txt")
copyfile(fromDirectory, toDirectory)
finalcatchpath = Path_final_hru_info
tempinfo = Dbf5(finalcatchpath[:-3] + "dbf")
ncatinfo = tempinfo.to_dataframe()
ncatinfo2 = ncatinfo.drop_duplicates("HRU_ID", keep="first")
ncatinfo2 = ncatinfo2.loc[(ncatinfo2["HRU_ID"] > 0) & (ncatinfo2["SubId"] > 0)]
if Old_Product == True:
ncatinfo2["RivLength"] = ncatinfo2["Rivlen"].values
# ncatinfo2['RivSlope'] = ncatinfo2['Rivlen'].values
# ncatinfo2['RivLength'] = ncatinfo2['Rivlen'].values
# ncatinfo2['RivLength'] = ncatinfo2['Rivlen'].values
# ncatinfo2['RivLength'] = ncatinfo2['Rivlen'].values
(
Channel_rvp_file_path,
Channel_rvp_string,
Model_rvh_file_path,
Model_rvh_string,
Model_rvp_file_path,
Model_rvp_string_modify,
) = Generate_Raven_Channel_rvp_rvh_String(
ncatinfo2,
Raveinputsfolder,
lenThres,
iscalmanningn,
Lake_As_Gauge,
Model_Name,
SubBasinGroup_NM_Lake,
SubBasinGroup_Area_Lake,
SubBasinGroup_NM_Channel,
SubBasinGroup_Length_Channel,
time_step,
aspect_from_gis
)
WriteStringToFile(Channel_rvp_string + '\n \n', Channel_rvp_file_path, "w")
WriteStringToFile(Model_rvh_string + '\n \n', Model_rvh_file_path, "w")
WriteStringToFile(Model_rvp_string_modify + '\n \n', Model_rvp_file_path, "a")
Lake_rvh_string, Lake_rvh_file_path = Generate_Raven_Lake_rvh_String(
ncatinfo2, Raveinputsfolder, Model_Name,lake_out_flow_method
)
WriteStringToFile(Lake_rvh_string, Lake_rvh_file_path, "w")
if WriteObsrvt > 0:
(
obs_rvt_file_path_gauge_list,
obs_rvt_file_string_gauge_list,
Model_rvt_file_path,
Model_rvt_file_string_modify_gauge_list,
obsnms,
) = Generate_Raven_Obs_rvt_String(
ncatinfo2,
Raveinputsfolder,
Obs_Folder,
Startyear + WarmUp,
EndYear,
CA_HYDAT,
DownLoadObsData,
Model_Name,
)
for i in range(0, len(obs_rvt_file_path_gauge_list)):
WriteStringToFile(
obs_rvt_file_string_gauge_list[i] + '\n \n',
obs_rvt_file_path_gauge_list[i],
"w",
)
WriteStringToFile(
Model_rvt_file_string_modify_gauge_list[i] + '\n \n', Model_rvt_file_path, "a"
)
obsnms.to_csv(os.path.join(Obs_Folder, "obsinfo.csv"))
####
# Inputs
####
def DownloadStreamflowdata_CA(Station_NM, CA_HYDAT, StartYear, EndYear):
"""Return streamflow data from HYDAT
Function that used to obtain streamflow data of certain gauge from HYDAT database
Parameters
----------
Station_NM : string
The name of the gauge, "05PB019"
CA_HYDAT : string
Path and filename of previously downloaded
external database containing streamflow observations,
e.g. HYDAT for Canada ("Hydat.sqlite3").
Startyear : integer
Start year of simulation. Used to
read streamflow observations from external databases.
EndYear : integer
End year of simulation. Used to
read streamflow observations from external databases.
Notes
------
None
Returns
-------
flowdata : data-type
obtained streamflow observation dataframe between
Startyear and EndYear.
obtaindata : bool
True indicate successfully obtain data, False indicate no data are founded
for this gauge
obs_DA : float
The drainage area of this gauge read from HYDAT database
Examples
--------
>>> from WriteRavenInputs import DownloadStreamflowdata_CA
>>> Station_NM = '05PC019'
>>> StartYear = 2010
>>> EndYear = 2011
>>> CA_HYDAT = HYDAT_Path
>>> flowdata,obs_DA,obtaindata = DownloadStreamflowdata_CA(Station_NM,CA_HYDAT,StartYear,EndYear)
"""
obtaindata = True
con = sqlite3.connect(CA_HYDAT)
### obtain station info
sqlstat = "SELECT STATION_NUMBER, DRAINAGE_AREA_GROSS, DRAINAGE_AREA_EFFECT from STATIONS WHERE STATION_NUMBER=?"
Station_info = pd.read_sql_query(sqlstat, con, params=[Station_NM])
if len(Station_info) == 0:
flowdata = -1
obs_DA = -9999
obtaindata = False
return flowdata, obs_DA, obtaindata
DAS = np.array(
[
-1.2345,
Station_info["DRAINAGE_AREA_GROSS"].values[0],
Station_info["DRAINAGE_AREA_EFFECT"].values[0],
]
)
DAS = DAS[DAS != None]
if len(DAS) > 0:
obs_DA = np.nanmax(DAS)
else:
obs_DA = -1.2345
## obtain streamflow data
sqlstat = "select * from DLY_FLOWS WHERE STATION_NUMBER = ?"
Readed_Streamflow = pd.read_sql_query(sqlstat, con, params=[Station_NM])
Readed_Streamflow = Readed_Streamflow[Readed_Streamflow["YEAR"] >= StartYear]
Readed_Streamflow = Readed_Streamflow[Readed_Streamflow["YEAR"] <= EndYear]
## Initial dataframe
if len(Readed_Streamflow) == 0:
flowdata = -1
obs_DA = -9999
obtaindata = False
return flowdata, obs_DA, obtaindata
year_ini = Readed_Streamflow["YEAR"].values[0]
mon_ini = Readed_Streamflow["MONTH"].values[0]
year_end = Readed_Streamflow["YEAR"].values[len(Readed_Streamflow) - 1]
mon_end = Readed_Streamflow["MONTH"].values[len(Readed_Streamflow) - 1]
ndays_end = Readed_Streamflow["NO_DAYS"].values[len(Readed_Streamflow) - 1]
Date_ini = str(year_ini) + "-" + str(mon_ini) + "-" + "01"
Date_end = str(year_end) + "-" + str(mon_end) + "-" + str(ndays_end)
Date = pd.date_range(start=Date_ini, end=Date_end, freq="D")
flowdata = pd.DataFrame(
np.full((len(Date), 2), -1.2345), columns=["Flow", "QC"], index=Date
)
### loop read streamflow data
for index, row in Readed_Streamflow.iterrows():
NDays = row["NO_DAYS"]
for iday in range(1, NDays + 1):
cdate = pd.to_datetime(
{"year": [row["YEAR"]], "month": [row["MONTH"]], "day": [iday]}
).values
# cdates = pd.to_datetime(str(row['YEAR'])+'-'+str(row['MONTH'])+'-'+str(iday))
if (
row["FLOW" + str(iday)] != np.nan
and row["FLOW" + str(iday)] != None
and float(row["FLOW" + str(iday)]) > 0
):
flowdata.loc[cdate, "Flow"] = row["FLOW" + str(iday)]
flowdata.loc[cdate, "QC"] = row["FLOW_SYMBOL" + str(iday)]
return flowdata, obs_DA, obtaindata
def DownloadStreamflowdata_US(Station_NM, StartYear, EndYear):
"""Return streamflow data from USGS website
Function that used to obtain streamflow data of certain gauge from USGS website
Parameters
----------
Station_NM : string
The name of the gauge, "05127000"
Startyear : integer
Start year of simulation. Used to
read streamflow observations from external databases.
EndYear : integer
End year of simulation. Used to
read streamflow observations from external databases.
Notes
------
None
Returns
-------
flowdata : data-type
obtained streamflow observation dataframe between
Startyear and EndYear.
obtaindata : bool
True indicate successfully obtain data, False indicate no data are founded
for this gauge
obs_DA : float
The drainage area of this gauge read from HYDAT database
Examples
--------
>>> from WriteRavenInputs import DownloadStreamflowdata_US
>>> Station_NM = '05127000'
>>> StartYear = 2010
>>> EndYear = 2011
>>> flowdata,obs_DA,obtaindata = DownloadStreamflowdata_CA(Station_NM,StartYear,EndYear)
"""
obtaindata = True
#### Obtain station info
urlstlist = "https://waterdata.usgs.gov/nwis/inventory/?format=rdb&site_no=" + str(
int(Station_NM)
).zfill(8)
Reslist = urllib.request.urlopen(urlstlist)
stlistdata = Reslist.read()
stlistdata = stlistdata.splitlines()
station_info_name = stlistdata[len(stlistdata) - 3].split()
station_info_value = stlistdata[len(stlistdata) - 1].split()
if (
station_info_name[len(station_info_name) - 1].decode("utf-8")
!= "contrib_drain_area_va"
):
obs_DA = -1.2345
else:
try:
obs_DA = (
float(station_info_value[len(station_info_value) - 1].decode("utf-8"))
* 2.58999
) # square miles to square km
except:
try:
obs_DA = (
float(station_info_value[len(station_info_value) - 2].decode("utf-8"))
* 2.58999
) # square miles to square km
except:
obs_DA = -1.2345
## try to obtain data with in this period
Date_ini = str(StartYear) + "-" + "01" + "-" + "01"
Date_end = str(EndYear) + "-" + "12" + "-" + "31"
urlstlist = (
"https://waterdata.usgs.gov/nwis/dv?cb_00060=on&format=rdb&site_no="
+ str(int(Station_NM)).zfill(8)
+ "&referred_module=sw&begin_date="
+ Date_ini
+ "&end_date="
+ Date_end
)
# print(urlstlist)
Reslist = urllib.request.urlopen(urlstlist)
stlistdata = Reslist.read()
stlistdata = stlistdata.splitlines()
##obtain start of the data rows
datarow = -1
for i in range(0, len(stlistdata)):
istlistdata = stlistdata[i].split()
if len(istlistdata) == 0:
return -1.2345,-1.2345, False
if istlistdata[0] == "#" or len(istlistdata) != 5:
continue
if istlistdata[1].decode("utf-8") == str(int(Station_NM)).zfill(8):
datarow = i
break
Date_ini = stlistdata[datarow].split()[2].decode("utf-8")
Date_end = stlistdata[len(stlistdata) - 1].split()[2].decode("utf-8")
Date = pd.date_range(start=Date_ini, end=Date_end, freq="D")
flowdata = pd.DataFrame(
np.full((len(Date), 2), -1.2345), columns=["Flow", "QC"], index=Date
)
for i in range(datarow, len(stlistdata)):
istlistdata = stlistdata[i].split()
if len(istlistdata) < 5 or istlistdata[3].decode("utf-8") == "Ice":
continue
else:
date = istlistdata[2].decode("utf-8")
cdate = pd.to_datetime(
{"year": [date[0:4]], "month": [date[5:7]], "day": [date[8:10]]}
).values
flowdata.loc[cdate, "Flow"] = (
float(istlistdata[3].decode("utf-8")) * 0.0283168
) # cubic feet per second to cubic meters per second
flowdata.loc[cdate, "QC"] = istlistdata[4].decode("utf-8")
return flowdata, obs_DA, obtaindata
def Generate_Raven_Obsrvt_String(
flowdata, obsnm, outObsfileFolder
): # Writeobsrvtfile(flowdata,obsnm,outObsfileFolder):
"""Generate a string in Raven observation rvt input file format
Function that is used to subbasin id and observation guage name from obsnm
and reformat the streamflow observation data in flowdata
generate a string that follows the raven observation rvt input file format
Parameters
----------
flowdata : data-type
Obtained streamflow observation dataframe between
Startyear and EndYear. The index of the dataframe should be Date in
'%Y-%m-%d' format, and the streamflow observation data in m3/s should
in 'Flow' column
obsnm : data-type
Dataframe of observation gauge information for this gauge including
at least following two columns
'Obs_NM': the name of the stream flow obsrvation gauge
'SubId' : the subbasin Id of this stremflow gauge located at.
outObsfileFolder : string
Path and name of the output folder to save obervation rvt file
of each gauge
Notes
------
None
Returns
-------
obs_rvt_file_path : string
It is the file path inclding file names of the raven rvt input file
for this gauge
output_string : string
It is the string that contains the content of the raven rvt input
file of this gauge
See Also
--------
DownloadStreamflowdata_US : Generate flowdata inputs needed by this function
DownloadStreamflowdata_CA : Generate flowdata inputs needed by this function
Examples
--------
>>> from WriteRavenInputs import DownloadStreamflowdata_US,Generate_Raven_Obsrvt_String
>>> import pandas as pd
>>> Station_NM = '05127000'
>>> StartYear = 2010
>>> EndYear = 2011
>>> Subbasin_ID = 1
>>> flowdata_read, DA_obs_data,Finddata = DownloadStreamflowdata_US(Station_NM = iobs_nm,StartYear = startyear,EndYear = endyear)
>>> Date = pd.date_range(start=str(startyear)+'-'+'01'+'-'+'01', end=str(endyear)+'-'+'12'+'-'+'31', freq='D')
>>> flowdata = pd.DataFrame(np.full((len(Date),2),-1.2345),columns = ['Flow','QC'],index = Date)
>>> flowdata.loc[flowdata.index.isin(flowdata_read.index), ['Flow', 'QC']] = flowdata_read[['Flow', 'QC']]
>>> obsnms = pd.DataFrame(data=[Subbasin_ID,Station_NM],columns=['SubId','Obs_NM'])
>>> Outputfolderrvt = 'c:/some_folder_to_store_raven_rvt_file'
>>> obs_rvt_file_path, output_string = Generate_Raven_Obsrvt_String(flowdata = flowdata,obsnm = obsnms,outObsfileFolder = Outputfolderrvt)
"""
output_string_list = []
obs_rvt_file_path = os.path.join(
outObsfileFolder, obsnm["Obs_NM"] + "_" + str(obsnm["SubId"]) + ".rvt"
)
output_string_list.append(
":ObservationData HYDROGRAPH " + str(obsnm["SubId"]) + " m3/s"
)
output_string_list.append(
flowdata.index[0].strftime("%Y-%m-%d")
+ " "
+ "00:00:00 "
+ "1 "
+ str(len(flowdata))
)
for id in range(0, len(flowdata)):
output_string_list.append(" " + str(flowdata["Flow"].values[id]))
output_string_list.append(":EndObservationData" + "\n")
output_string = "\n".join(output_string_list)
return obs_rvt_file_path, output_string
def Generate_Raven_Timeseries_rvt_String(
outFolderraven, outObsfileFolder, obsnm, Model_Name
): # Modify_template_rvt(outFolderraven,outObsfileFolder,obsnm):
"""Generate a string in Raven time series rvt input file format
Function that used to modify raven model timeseries rvt file (Model_Name.rvt)
Add ":RedirectToFile ./obs/guagename_subbasinid.rvt"
for each gauge in the end of model rvt file (Model_Name.rvt)
Parameters
----------
outFolderraven : String
Path and name of the output folder of Raven input files
outObsfileFolder : String
Path and name of the output folder to save obervation rvt file
of each gauge
obsnm : data-type
Dataframe of observation gauge information for this gauge including
at least following two columns
'Obs_NM': the name of the stream flow obsrvation gauge
'SubId' : the subbasin Id of this stremflow gauge located at.
Model_Name : string
The Raven model base name. File name of the raven input will be
Model_Name.xxx.
Notes
------
None
See Also
--------
DownloadStreamflowdata_US : Generate flowdata inputs
needed by this function
DownloadStreamflowdata_CA : Generate flowdata inputs
needed by this function
Returns
-------
output_string : string
It is the string that contains the content that will be used to
modify the raven time series rvt input file of this gauge
Examples
--------
>>> from WriteRavenInputs import Generate_Raven_Timeseries_rvt_String
>>> outFolderraven = 'c:/path_to_the_raven_input_folder/'
>>> outObsfileFolder = 'c:/path_to_the_raven_streamflow_observation gauge_folder/'
>>> Subbasin_ID = 1
>>> Station_NM = '05127000'
>>> obsnms = pd.DataFrame(data=[Subbasin_ID,Station_NM],columns=['SubId','Obs_NM'])
>>> Model_Name = 'test'
>>> output_string = Generate_Raven_Timeseries_rvt_String(outFolderraven,outObsfileFolder,obsnm,Model_Name)
"""
toobsrvtfile = os.path.join(outFolderraven, Model_Name + ".rvt")
obsflodername = "./" + os.path.split(outObsfileFolder)[1] + "/"
output_string = (
" \n"
+ ":RedirectToFile "
+ obsflodername
+ obsnm["Obs_NM"]
+ "_"
+ str(obsnm["SubId"])
+ ".rvt"
+ " \n"
)
return output_string
def Generate_Raven_Obs_rvt_String(
catinfo,
outFolderraven,
outObsfileFolder,
startyear,
endyear,
CA_HYDAT="#",
DownLoadObsData=True,
Model_Name="test",
):
"""Generate Raven streamflow observation conent
Function that used to generate content of Raven streamflow observation input file.
Parameters
----------
catinfo : data-type
Dataframe of a routing structure that needes to define a Raven model.
Can be directly read from the database of the hru shpfile generated by
the toolbox. At least include following columns:
'Obs_NM' - the name of the stream flow obsrvation gauge
'SubId' - the subbasin Id of this stremflow gauge located at.
'SRC_obs' - the country of the gauge located at
'DrainArea' - the drainage area controlled by the gauge obtained from
the routing structure
outFolderraven : String
Path and name of the output folder of Raven input files
outObsfileFolder : String
Path and name of the output folder to save obervation rvt file
of each gauge
startyear : integer
Start year of simulation. Used to
read streamflow observations from external databases.
endyear : integer
End year of simulation. Used to
read streamflow observations from external databases.
CA_HYDAT : string, optional
path and filename of previously downloaded
external database containing streamflow observations,
e.g. HYDAT for Canada ("Hydat.sqlite3").
DownLoadObsData : Bool, optional
Input that used to indicate if the observation data will be Download
from usgs website or read from hydat database for streamflow Gauge
in US or Canada,respectively. If this parameter is False,
while WriteObsrvt is True. The program will write the observation data
file with "-1.2345" for each observation gauges.
Model_Name : string
The Raven model base name. File name of the raven input will be
Model_Name.xxx.
Notes
------
None
See Also
--------
DownloadStreamflowdata_US : Generate flowdata inputs
needed by this function
DownloadStreamflowdata_CA : Generate flowdata inputs
needed by this function
Generate_Raven_Obsrvt_String : Generate a string in Raven
observation rvt input file format
Generate_Raven_Timeseries_rvt_String : Generate a string in Raven
time series rvt input file format
Returns
-------
obs_rvt_file_path_gauge_list : string
It is the list of string, each of them contains the content
that will be used to define the path of raven observation
input file for one streamflow gauge
obs_rvt_file_string_gauge_list : string
It is the list of string, each of them define the content of
raven obaervation input file(xxx_obs.rvt) for one gauge
Model_rvt_file_path : string
It is the string that define the path of
the raven model time series input file
Model_rvt_file_string_modify_gauge_list : string
It is the list of string, each of them define the content
that needs to be added into the Raven model time series
file the path of
obsnm : DataFrame
Dataframe of observation gauge information for all streamflow gauges
Examples
--------
>>> from WriteRavenInputs import Generate_Raven_Obs_rvt_String
>>> outFolderraven = 'c:/path_to_the_raven_input_folder/'
>>> DataFolder = "C:/Path_to_foldr_of_example_dataset_provided_in_Github_wiki/"
>>> Model_Folder = os.path.join(DataFolder,'Model')
>>> Raveinputsfolder = os.path.join(Model_Folder,'RavenInput')
>>> Obs_Folder = os.path.join(Raveinputsfolder,'obs')
>>> finalcatchpath = os.path.join(DataFolder,'finalcat_hru_info.shp')
>>> tempinfo = Dbf5(finalcatchpath[:-3] + "dbf")
>>> ncatinfo = tempinfo.to_dataframe()
>>> Model_Name = 'test'
>>> Startyear = 2010
>>> EndYear = 2017
>>> CA_HYDAT = 'c/path_to_your_HYDAT_database/'
>>> WarmUp = 1
>>> DownLoadObsData = True
>>> ncatinfo2 = ncatinfo.drop_duplicates('HRU_ID', keep='first')
>>> obs_rvt_file_path_gauge_list,obs_rvt_file_string_gauge_list,Model_rvt_file_path,Model_rvt_file_string_modify_gauge_list,obsnms = Generate_Raven_Obs_rvt_String(ncatinfo2,Raveinputsfolder,Obs_Folder,
... Startyear + WarmUp,EndYear,CA_HYDAT,
... DownLoadObsData,
... Model_Name)
>>>
"""
obsnms = catinfo[["Obs_NM", "SRC_obs", "SubId", "DrainArea"]]
obsnms = obsnms.drop_duplicates("Obs_NM", keep="first")
obsnms = obsnms.replace(np.nan, '-9999.0', regex=True)
obsnms = obsnms.loc[obsnms["Obs_NM"] != "-9999.0"]
obsnms.loc[:, "DrainArea"] = obsnms["DrainArea"].values / 1000 / 1000 # m2 to km2
index = obsnms.index
Date = pd.date_range(
start=str(startyear) + "-" + "01" + "-" + "01",
end=str(endyear) + "-" + "12" + "-" + "31",
freq="D",
)
obsnms["Obs_DA_data"] = -1.2345
obsnms["Missing_V"] = -1.2345
obs_rvt_file_path_gauge_list = []
obs_rvt_file_string_gauge_list = []
Model_rvt_file_path = os.path.join(outFolderraven, Model_Name + ".rvt")
Model_rvt_file_string_modify_gauge_list = []
for idx in index:
obsnm = obsnms.loc[idx, :]
iobs_nm = obsnms.loc[idx, "Obs_NM"]
iobs_src = obsnms.loc[idx, "SRC_obs"]
flowdata = pd.DataFrame(
np.full((len(Date), 2), -1.2345), columns=["Flow", "QC"], index=Date
)
if iobs_src == "US":
if DownLoadObsData == True:
flowdata_read, DA_obs_data, Finddata = DownloadStreamflowdata_US(
Station_NM=iobs_nm, StartYear=startyear, EndYear=endyear
)
else:
Finddata = False
elif iobs_src == "CA":
if CA_HYDAT != "#" and DownLoadObsData == True:
flowdata_read, DA_obs_data, Finddata = DownloadStreamflowdata_CA(
Station_NM=iobs_nm,
CA_HYDAT=CA_HYDAT,
StartYear=startyear,
EndYear=endyear,
)
else:
Finddata = False
else:
Finddata = False
####check if data are founded, and assign it to the output dataframes
if Finddata == False:
print(iobs_nm + " not find data")
else:
flowdata.loc[
flowdata.index.isin(flowdata_read.index), ["Flow", "QC"]
] = flowdata_read[["Flow", "QC"]]
obsnms.loc[idx, "Obs_DA_data"] = DA_obs_data
obsnms.loc[idx, "Missing_V"] = len(flowdata[flowdata["Flow"] == -1.2345])
obs_rvt_file_path, output_string = Generate_Raven_Obsrvt_String(
flowdata, obsnm, outObsfileFolder
)
# WriteStringToFile(Out_String = output_string,File_Path = obs_rvt_file_path, WriteMethod = "w")
obs_rvt_file_path_gauge_list.append(obs_rvt_file_path)
obs_rvt_file_string_gauge_list.append(output_string)
output_string = Generate_Raven_Timeseries_rvt_String(
outFolderraven, outObsfileFolder, obsnm, Model_Name
)
# WriteStringToFile(Out_String = output_string,File_Path = os.path.join(outFolderraven,Model_Name+'.rvt'), WriteMethod = "a")
Model_rvt_file_string_modify_gauge_list.append(output_string)
return (
obs_rvt_file_path_gauge_list,
obs_rvt_file_string_gauge_list,
Model_rvt_file_path,
Model_rvt_file_string_modify_gauge_list,
obsnms,
)
def Generate_Raven_Channel_rvp_string_sub(
chname, chwd, chdep, chslope, elev, floodn, channeln, iscalmanningn
): # writechanel(chname,chwd,chdep,chslope,orchnl,elev,floodn,channeln,iscalmanningn):
"""Generate string of each subbasin for raven chennel rvp inputs
Function that used to generate a string to define a channel profile
for each subbasin in Raven channel rvp input file format.
Parameters
----------
chname :String
the name of the channel for each SubBasins
information for this gauge as the station
name of this gauge
chwd :Float
channel width
chdep :Float
channel depth
chslope :Float
channel slope
elev :Float
channel elevation
floodn :Float
channel flood plain manning's coefficient
channeln :Float
main channnel manning's coefficient
iscalmanningn :Bool
True use channeln or False use 0.035 as main channel
manning's coefficient
Notes
------
None
Returns
-------
output_string : string
It is the string that contains the content that will be used to
to define a channel profile for given subbasin in
Raven channel rvp input file format.
"""
output_string_list = []
### Following SWAT instructions, assume a trapezoidal shape channel, with channel sides has depth and width ratio of 2. zch = 2
zch = 2
sidwd = zch * chdep ###river side width
tab = " "
botwd = chwd - 2 * sidwd ### river
if botwd < 0:
botwd = 0.5 * chwd
sidwd = 0.5 * 0.5 * chwd
zch = (chwd - botwd) / 2 / chdep
if iscalmanningn == True:
mann = '{:>10.8f}'.format(channeln) #str(channeln)
else:
mann = '{:>10.8f}'.format(0.035) #str(0.035)
zfld = 4 + elev
zbot = elev - chdep
sidwdfp = 4 / 0.25
Channame = ":ChannelProfile" + tab + chname + tab
output_string_list.append(Channame) # orchnl.write(Channame+"\n")
Chanslop = " :Bedslope" + tab + '{:>15.10f}'.format(chslope) #str(chslope)
output_string_list.append(Chanslop) # orchnl.write(Chanslop+"\n")
output_string_list.append(" :SurveyPoints") # orchnl.write(" :SurveyPoints"+"\n")
output_string_list.append(
" 0" + tab + '{:>10.4f}'.format(zfld) #str(zfld)
) # orchnl.write(" 0"+tab+str(zfld)+"\n")
output_string_list.append(
" " + '{:>10.4f}'.format(sidwdfp) + tab + '{:>10.4f}'.format(elev) #" " + str(sidwdfp) + tab + str(elev)
) # orchnl.write(" "+str(sidwdfp)+tab+str(elev)+"\n")
output_string_list.append(
" " + '{:>10.4f}'.format(sidwdfp + 2 * chwd) + tab + '{:>10.4f}'.format(elev) #" " + str(sidwdfp + 2 * chwd) + tab + str(elev)
) # orchnl.write(" "+str(sidwdfp + 2*chwd)+tab+str(elev)+"\n")
output_string_list.append(
" " + '{:>10.4f}'.format(sidwdfp + 2 * chwd + sidwd) + tab + '{:>10.4f}'.format(zbot) #" " + str(sidwdfp + 2 * chwd + sidwd) + tab + str(zbot)
) # orchnl.write(" "+str(sidwdfp + 2*chwd + sidwd)+tab+str(zbot)+"\n")
output_string_list.append(
" " + '{:>10.4f}'.format(sidwdfp + 2 * chwd + sidwd + botwd) + tab + '{:>10.4f}'.format(zbot) #" " + str(sidwdfp + 2 * chwd + sidwd + botwd) + tab + str(zbot)
) # orchnl.write(" "+str(sidwdfp + 2*chwd + sidwd + botwd)+tab+str(zbot)+"\n")
output_string_list.append(
" " + '{:>10.4f}'.format(sidwdfp + 2 * chwd + 2 * sidwd + botwd) + tab + '{:>10.4f}'.format(elev) #" " + str(sidwdfp + 2 * chwd + 2 * sidwd + botwd) + tab + str(elev)
) # orchnl.write(" "+str(sidwdfp + 2*chwd + 2*sidwd + botwd)+tab+str(elev)+"\n")
output_string_list.append(
" " + '{:>10.4f}'.format(sidwdfp + 4 * chwd + 2 * sidwd + botwd) + tab + '{:>10.4f}'.format(elev) #" " + str(sidwdfp + 4 * chwd + 2 * sidwd + botwd) + tab + str(elev)
) # orchnl.write(" "+str(sidwdfp + 4*chwd + 2*sidwd + botwd)+tab+str(elev)+"\n")
output_string_list.append(
" " + '{:>10.4f}'.format(2 * sidwdfp + 4 * chwd + 2 * sidwd + botwd) + tab + '{:>10.4f}'.format(zfld) # " " + str(2 * sidwdfp + 4 * chwd + 2 * sidwd + botwd) + tab + str(zfld)
) # orchnl.write(" "+str(2*sidwdfp + 4*chwd + 2*sidwd + botwd)+tab+str(zfld)+"\n")
output_string_list.append(
" :EndSurveyPoints"
) # orchnl.write(" :EndSurveyPoints"+"\n")
output_string_list.append(
" :RoughnessZones"
) # orchnl.write(" :RoughnessZones"+"\n")
output_string_list.append(
" 0" + tab + '{:>10.8f}'.format(floodn) #str(floodn)
) # orchnl.write(" 0" + tab + str(floodn) +"\n")
output_string_list.append(
" " + '{:>10.8f}'.format(sidwdfp + 2 * chwd) + tab + mann #" " + str(sidwdfp + 2 * chwd) + tab + mann
) # orchnl.write(" " + str(sidwdfp + 2*chwd)+ tab + mann +"\n")
output_string_list.append(
" " + '{:>10.8f}'.format(sidwdfp + 2 * chwd + 2 * sidwd + botwd) + tab + '{:>10.8f}'.format(floodn) #" " + str(sidwdfp + 2 * chwd + 2 * sidwd + botwd) + tab + str(floodn)
) # orchnl.write(" " + str(sidwdfp + 2*chwd + 2*sidwd + botwd)+ tab + str(floodn) +"\n")
output_string_list.append(
" :EndRoughnessZones"
) # orchnl.write(" :EndRoughnessZones"+"\n")
output_string_list.append(
":EndChannelProfile"
) # orchnl.write(":EndChannelProfile"+"\n")
output_string_list.append("\n") # orchnl.write("\n")
output_string_list.append(
"##############new channel ##############################"
) # orchnl.write("##############new channel ##############################\n")
output_string = "\n".join(output_string_list)
return output_string
#########################################################################################################33
def Generate_Raven_Lake_rvh_String(catinfo, Raveinputsfolder, Model_Name,lake_out_flow_method):
"""Generate string of raven lake rvh input
Function that used to generate the content for
Raven lake definition Model_Name_Lake.rvh input file.
Parameters
----------
catinfo : DataFrame
A dataframe includes all attribute for each HRU
read from polygon shpfile generated by the toolbox
Raveinputsfolder : string
Folder path and name that save outputs
Notes
------
None
See Also
--------
None
Returns
-------
Lake_rvh_string : string
It is the string that contains the content that will be used to
to define lake parameters for all lakes in
Raven lake rvh input file format.
Lake_rvh_file_path : string
It is the string that define the path of
the raven channel rvp input file.
Examples
--------
>>> from WriteRavenInputs import Generate_Raven_Lake_rvh_String
>>> outFolderraven = 'c:/path_to_the_raven_input_folder/'
>>> DataFolder = "C:/Path_to_foldr_of_example_dataset_provided_in_Github_wiki/"
>>> Model_Folder = os.path.join(DataFolder,'Model')
>>> Raveinputsfolder = os.path.join(Model_Folder,'RavenInput')
>>> finalcatchpath = os.path.join(DataFolder,'finalcat_hru_info.shp')
>>> tempinfo = Dbf5(finalcatchpath[:-3] + "dbf")
>>> ncatinfo = tempinfo.to_dataframe()
>>> Model_Name = 'test'
>>> ncatinfo2 = ncatinfo.drop_duplicates('HRU_ID', keep='first')
>>> Lake_rvh_string, Lake_rvh_file_path= Generate_Raven_Lake_rvh_String(ncatinfo2,Raveinputsfolder,lenThres,Model_Name)
"""
Lake_rvh_file_path = os.path.join(Raveinputsfolder, "Lakes.rvh")
Lake_rvh_string_list = []
tab = " "
Lake_rvh_string_list.append("#----------------------------------------------")
Lake_rvh_string_list.append("# This is a Raven lake rvh file generated")
Lake_rvh_string_list.append("# by BasinMaker v2.0")
Lake_rvh_string_list.append("#----------------------------------------------")
for i in range(0, len(catinfo.index)):
if catinfo.iloc[i]["HRU_IsLake"] > 0: ## lake hru
lakeid = int(catinfo.iloc[i]["HyLakeId"])
catid = catinfo.iloc[i]["SubId"]
A = catinfo.iloc[i]["HRU_Area"] ### in meters
h0 = catinfo.iloc[i]["LakeDepth"] ## m
WeirCoe = 0.6
hruid = int(catinfo.iloc[i]["HRU_ID"])
Crewd = catinfo.iloc[i]["BkfWidth"] ##3 m
has_obs = catinfo.iloc[i]["Has_Gauge"] ##3 m
# if slakeinfo.iloc[0]['Wshd_area'] < 6000 and slakeinfo.iloc[0]['Wshd_area'] > 0:
if has_obs < 1 or lake_out_flow_method == 'broad_crest':
Lake_rvh_string_list.append(
"#############################################"
) # f2.write("#############################################"+"\n")
Lake_rvh_string_list.append(
"# New Lake starts"
) # f2.write("###New Lake starts"+"\n")
Lake_rvh_string_list.append(
"#############################################"
) # f2.write("#############################################"+"\n")
######write lake information to file
Lake_rvh_string_list.append(
":Reservoir" + " Lake_" + str(int(lakeid))
) # f2.write(":Reservoir"+ " Lake_"+ str(int(lakeid))+ " ######## " +"\n")
Lake_rvh_string_list.append(
" :SubBasinID " + str(int(catid))
) # f2.write(" :SubBasinID "+str(int(catid))+ "\n")
Lake_rvh_string_list.append(
" :HRUID " + str(int(hruid))
) # f2.write(" :HRUID "+str(int(hruid))+ "\n")
Lake_rvh_string_list.append(
" :Type RESROUTE_STANDARD "
) # f2.write(" :Type RESROUTE_STANDARD "+"\n")
Lake_rvh_string_list.append(
" :WeirCoefficient " + str(WeirCoe)
) # f2.write(" :WeirCoefficient "+str(WeirCoe)+ "\n")
Lake_rvh_string_list.append(
" :CrestWidth " + '{:>10.4f}'.format(Crewd) #"{:.4f}".format(Crewd) #str(Crewd)
) # f2.write(" :CrestWidth "+str(Crewd)+ "\n")
Lake_rvh_string_list.append(
" :MaxDepth " + str(h0)
) # f2.write(" :MaxDepth "+str(h0)+ "\n")
Lake_rvh_string_list.append(
" :LakeArea " + str(A)
) # f2.write(" :LakeArea "+str(A)+ "\n")
Lake_rvh_string_list.append(
" :SeepageParameters 0 0 "
) # f2.write(" :LakeArea "+str(A)+ "\n")
Lake_rvh_string_list.append(
":EndReservoir "
) # f2.write(":EndReservoir "+"\n")
elif has_obs >= 1 and lake_out_flow_method == 'power_law':
Lake_rvh_string_list.append(
"#############################################"
) # f2.write("#############################################"+"\n")
Lake_rvh_string_list.append(
"# New Lake starts"
) # f2.write("###New Lake starts"+"\n")
Lake_rvh_string_list.append(
"#############################################"
) # f2.write("#############################################"+"\n")
######write lake information to file
Lake_rvh_string_list.append(
":Reservoir" + " Lake_" + str(int(lakeid))
) # f2.write(":Reservoir"+ " Lake_"+ str(int(lakeid))+ " ######## " +"\n")
Lake_rvh_string_list.append(
" :SubBasinID " + str(int(catid))
) # f2.write(" :SubBasinID "+str(int(catid))+ "\n")
Lake_rvh_string_list.append(
" :HRUID " + str(int(hruid))
) # f2.write(" :HRUID "+str(int(hruid))+ "\n")
Lake_rvh_string_list.append(
" :Type RESROUTE_STANDARD "
) # f2.write(" :Type RESROUTE_STANDARD "+"\n")
Lake_rvh_string_list.append(
" :MaxDepth " + str(h0)
) # f2.write(" :MaxDepth "+str(h0)+ "\n")
Lake_rvh_string_list.append(
" :SeepageParameters 0 0 "
) # f2.write(" :LakeArea "+str(A)+ "\n")
Lake_rvh_string_list.append(
" :OutflowStageRelation POWER_LAW "
)
Lake_rvh_string_list.append(
" %s %s " %(str(Crewd*2/3*(9.80616**(0.5))),str(1.5))
)
Lake_rvh_string_list.append(
" :EndOutflowStageRelation "
)
Lake_rvh_string_list.append(
" :VolumeStageRelation POWER_LAW "
)
Lake_rvh_string_list.append(
" %s %s " %(str(A),str(1))
)
Lake_rvh_string_list.append(
" :EndVolumeStageRelation "
)
Lake_rvh_string_list.append(
" :AreaStageRelation POWER_LAW "
)
Lake_rvh_string_list.append(
" %s %s " %(str(A),str(0))
)
Lake_rvh_string_list.append(
" :EndAreaStageRelation "
)
Lake_rvh_string_list.append(
":EndReservoir "
) # f2.write(":EndReservoir "+"\n")
Lake_rvh_string = "\n".join(Lake_rvh_string_list)
return Lake_rvh_string, Lake_rvh_file_path
#### write lake input files for different lake zone
#########################
def Return_Group_Name_Based_On_Value(value, GroupNames, Group_Thresthold_Values):
"""Return group name
It is a function to return group name in GroupNames, based on value and
Group_Thresthold_Values
Parameters
----------
value : float
it is a value that used to identify which group this value belong to
GroupNames : list
it is a list contain group names
Group_Thresthold_Values : list
it is a list contains thresthold to define different groups, the value
of Group_Thresthold_Values should increase from 0 to end.
Notes
------
The dimension of the GroupNames should equal to 1 or
len(Group_Thresthold_Values) + 1
See Also
--------
None
Returns
-------
GroupName : String
the group name determined by value
"""
### only one group
if len(GroupNames) == 1:
GroupName = GroupNames[0]
elif len(GroupNames) > 1:
for i in range(0, len(Group_Thresthold_Values)):
# print(value,Group_Thresthold_Values,GroupNames[i])
###
if value < Group_Thresthold_Values[i]:
GroupName = GroupNames[i]
break
## value larger than all thresthold value
elif i == len(Group_Thresthold_Values) - 1:
GroupName = GroupNames[i + 1]
return GroupName
def Generate_Raven_Channel_rvp_rvh_String(
ocatinfo,
Raveinputsfolder,
lenThres,
iscalmanningn,
Lake_As_Gauge,
Model_Name,
SubBasinGroup_NM_Lake,
SubBasinGroup_Area_Lake,
SubBasinGroup_NM_Channel,
SubBasinGroup_Length_Channel,
Tr = 1,
aspect_from_gis = 'grass'
): # Writervhchanl(ocatinfo,Raveinputsfolder,lenThres,iscalmanningn,HRU_ID_NM,HRU_Area_NM,Sub_ID_NM,Lake_As_Gauge = False,Model_Name = 'test'):
"""Generate string of raven chennel rvp input and rvh input
Function that used to generate the content of raven rvh file and
channel rvp file.
Parameters
----------
ocatinfo : DataFrame
A dataframe includes all attribute for each HRU
read from polygon shpfile generated by the toolbox
Raveinputsfolder : string
Folder path and name that save outputs
lenThres : float
River length threshold; river length smaller than
this will write as zero in Raven rvh file
iscalmanningn : integer
If "1", use manning's coefficient in the shpfile table
and set to default value (0.035).
If "-1", do not use manning's coefficients.
Lake_As_Gauge : Bool
If "True", all lake subbasins will labeled as gauged
subbasin such that Raven will export lake balance for
this lake. If "False", lake subbasin will not be labeled
as gauge subbasin.
Model_Name : string
The Raven model base name. File name of the raven input will be
Model_Name.xxx.
SubBasinGroup_NM_Channel : List
It is a list of names for subbasin groups, which are grouped based
on channel length of each subbsin. Should at least has one name
SubBasinGroup_Length_Channel : List
It is a list of float channel length thresthold in meter, to divide
subbasin into different groups. for example, [1,10,20] will divide
subbasins into four groups, group 1 with channel length (0,1];
group 2 with channel length (1,10],
group 3 with channel length (10,20],
group 4 with channel length (20,Max channel length].
SubBasinGroup_NM_Lake : List
It is a list of names for subbasin groups, which are grouped based
on Lake area of each subbsin. Should at least has one name
SubBasinGroup_Area_Lake : List
It is a list of float lake area thresthold in m2, to divide
subbasin into different groups. for example, [1,10,20] will divide
subbasins into four groups, group 1 with lake area (0,1];
group 2 with lake are (1,10],
group 3 with lake are (10,20],
group 4 with lake are (20,Max channel length].
Notes
------
None
See Also
--------
Generate_Raven_Channel_rvp_string_sub : Generate a string to define channel
for given subbasin in Raven channel
rvp input format.
Returns
-------
Channel_rvp_string : string
It is the string that contains the content that will be used to
to define channel profiles for all subbasin in
Raven channel rvp input file format.
Channel_rvp_file_path : string
It is the string that define the path of
the raven channel rvp input file.
Model_rvh_string : string
It is the string that contains the content that will be used to
to define routing and hru inputs for all subbasin in
Raven channel rvh input file format.
Model_rvh_file_path : string
It is the string that define the path of
the raven channel rvh input file.
Model_rvp_string_modify : string
It is the string that contains the content that will be used to
to modify model rvp file.
Model_rvp_file_path : string
It is the string that define the path of
the raven channel rvp input file.
Examples
--------
>>> from WriteRavenInputs import Generate_Raven_Channel_rvp_rvh_String
>>> outFolderraven = 'c:/path_to_the_raven_input_folder/'
>>> DataFolder = "C:/Path_to_foldr_of_example_dataset_provided_in_Github_wiki/"
>>> Model_Folder = os.path.join(DataFolder,'Model')
>>> Raveinputsfolder = os.path.join(Model_Folder,'RavenInput')
>>> finalcatchpath = os.path.join(DataFolder,'finalcat_hru_info.shp')
>>> tempinfo = Dbf5(finalcatchpath[:-3] + "dbf")
>>> ncatinfo = tempinfo.to_dataframe()
>>> Model_Name = 'test'
>>> lenThres = 1
>>> iscalmanningn = -1
>>> Lake_As_Gauge = -1
>>> ncatinfo2 = ncatinfo.drop_duplicates('HRU_ID', keep='first')
>>> Channel_rvp_file_path,Channel_rvp_string,Model_rvh_file_path,Model_rvh_string,Model_rvp_file_path,Model_rvp_string_modify = Generate_Raven_Channel_rvp_rvh_String(ncatinfo2,Raveinputsfolder,lenThres,
... iscalmanningn,Lake_As_Gauge,Model_Name
... )
>>>
"""
Channel_rvp_file_path = os.path.join(Raveinputsfolder, "channel_properties.rvp")
Channel_rvp_string_list = []
Model_rvh_file_path = os.path.join(Raveinputsfolder, Model_Name + ".rvh")
Model_rvh_string_list = []
Model_rvp_file_path = os.path.join(Raveinputsfolder, Model_Name + ".rvp")
Model_rvp_string_modify = (
"\n" + ":RedirectToFile " + "channel_properties.rvp" + "\n"
)
tab = " "
Channel_rvp_string_list.append("#----------------------------------------------")
Channel_rvp_string_list.append(
"# This is a Raven channel properties file generated"
)
Channel_rvp_string_list.append("# by BasinMaker v2.0")
Channel_rvp_string_list.append("#----------------------------------------------")
Model_rvh_string_list.append("#----------------------------------------------")
Model_rvh_string_list.append("# This is a Raven HRU rvh input file generated")
Model_rvh_string_list.append("# by BasinMaker v2.0")
Model_rvh_string_list.append("#----------------------------------------------")
catinfo_hru = copy.copy(ocatinfo)
catinfo = copy.copy(ocatinfo)
# print int(catinfo.iloc[0]['SUBID']),len(catinfo.index)
##################3
Model_rvh_string_list.append(":SubBasins") # orvh.write(":SubBasins"+"\n")
Model_rvh_string_list.append(
" :Attributes NAME DOWNSTREAM_ID PROFILE REACH_LENGTH GAUGED"
) # orvh.write(" :Attributes NAME DOWNSTREAM_ID PROFILE REACH_LENGTH GAUGED"+"\n")
Model_rvh_string_list.append(
" :Units none none none km none"
) # orvh.write(" :Units none none none km none"+"\n")
catinfo_sub = catinfo.drop_duplicates(
"SubId", keep="first"
) ### remove duplicated subids, beacuse of including hrus in the dataframe
SubBasin_Group_Channel = pd.DataFrame(
data=np.full((len(catinfo_sub), 2), np.nan),
columns=["SubId", "SubBasin_Group_NM"],
)
SubBasin_Group_Lake = pd.DataFrame(
data=np.full((len(catinfo_sub), 2), np.nan),
columns=["SubId", "SubBasin_Group_NM"],
)
# print(catinfo_sub[['SubId','DowSubId']])
print(
"Total number of Subbasins are "
+ str(int((len(catinfo_sub))))
+ " "
+ "SubId"
)
not_write_default_channel = True
for i in range(0, len(catinfo_sub)):
### Get catchment width and dpeth
catid = int(catinfo_sub["SubId"].values[i])
downcatid = int(catinfo_sub["DowSubId"].values[i])
temp = catinfo_sub["RivLength"].values[i]
if float(temp) > lenThres:
catlen = float(temp) / 1000 #### in km
strRlen = '{:>10.4f}'.format(catlen) #str(catlen)
else:
catlen = -9999
strRlen = "ZERO-"
if (
catinfo_sub["Lake_Cat"].values[i] > 0
): # and catinfo_sub['HRU_Type'].values[i] == 1:
strRlen = "ZERO-"
#####################################################3
Strcat = str(catid)
# print(catid,downcatid,len(catinfo_sub.loc[catinfo_sub['SubId'] == downcatid]))
if catid == downcatid:
StrDid = str(-1)
elif len(catinfo_sub.loc[catinfo_sub["SubId"] == downcatid]) == 0:
StrDid = str(-1)
else:
StrDid = str(int(catinfo_sub["DowSubId"].values[i]))
GroupName = Return_Group_Name_Based_On_Value(
catinfo_sub["RivLength"].values[i],
SubBasinGroup_NM_Channel,
SubBasinGroup_Length_Channel,
)
SubBasin_Group_Channel.loc[i, "SubId"] = catid
SubBasin_Group_Channel.loc[i, "SubBasin_Group_NM"] = GroupName
if strRlen != "ZERO-":
pronam = "Chn_" + Strcat
else:
pronam = "Chn_ZERO_LENGTH"
if strRlen != "ZERO-":
chslope = max(catinfo_sub["RivSlope"].values[i], min_riv_slope)
else:
chslope = 0.12345
if strRlen != "ZERO-":
nchn = catinfo_sub["Ch_n"].values[i]
else:
nchn = 0.12345
if strRlen != "ZERO-":
floodn = catinfo_sub["FloodP_n"].values[i]
else:
floodn = 0.12345
if strRlen != "ZERO-":
bkf_width = max(catinfo_sub["BkfWidth"].values[i], 1)
bkf_depth = max(catinfo_sub["BkfDepth"].values[i], 1)
else:
bkf_width = 0.12345
bkf_depth = 0.12345
if strRlen != "ZERO-":
output_string_chn_rvp_sub = Generate_Raven_Channel_rvp_string_sub(
pronam,
bkf_width,
bkf_depth,
chslope,
catinfo_sub["MeanElev"].values[i],
floodn,
nchn,
iscalmanningn,
)
elif strRlen == "ZERO-" and not_write_default_channel:
output_string_chn_rvp_sub = Generate_Raven_Channel_rvp_string_sub(
pronam,
bkf_width,
bkf_depth,
chslope,
catinfo_sub["MeanElev"].values[i],
floodn,
nchn,
iscalmanningn,
)
not_write_default_channel = False
else:
output_string_chn_rvp_sub = []
output_string_chn_rvp_sub.append("# Sub "+ Strcat + " refer to Chn_ZERO_LENGTH ")
output_string_chn_rvp_sub.append("##############new channel ##############################")
output_string_chn_rvp_sub = "\n".join(output_string_chn_rvp_sub)
Channel_rvp_string_list.append(output_string_chn_rvp_sub)
if catinfo_sub["Has_Gauge"].values[i] > 0:
Guage = "1"
elif (
catinfo_sub["Lake_Cat"].values[i] > 0 and Lake_As_Gauge == True
):
Guage = "1"
else:
Guage = "0"
Model_rvh_string_list.append(
" "
+ Strcat
+ tab
+ "sub"
+ Strcat
+ tab
+ StrDid
+ tab
+ pronam
+ tab
+ strRlen
+ tab
+ Guage
)
Model_rvh_string_list.append(":EndSubBasins") # orvh.write(":EndSubBasins"+"\n")
Model_rvh_string_list.append("\n") # orvh.write("\n")
##########################################
# Model_rvh_string_list.append(":SubBasinProperties")
# Model_rvh_string_list.append(":Parameters, TIME_TO_PEAK, TIME_CONC, TIME_LAG,")
# Model_rvh_string_list.append(":Units , d , d, d,")
#
#
# for i in range(0, len(catinfo_sub)):
# ### Get catchment width and dpeth
# catid = int(catinfo_sub["SubId"].values[i])
# subarea = int(catinfo_sub["BasArea"].values[i]/1000/1000)
# if (catinfo_sub["Lake_Cat"].values[i] <= 0):
# routing_area = subarea
# else:
# routing_area = max(0.0001,subarea - catinfo_sub["LakeArea"].values[i]/1000/1000)
#
# Tc = max(0.01,0.76*routing_area**0.38/24)
# Tl = 0.6*Tc
# Tp = Tr/2 +Tl
#
# Tc = '{:>10.4f}'.format(Tc) + "," + tab
# Tl = '{:>10.4f}'.format(Tl) + "," + tab
# Tp = '{:>10.4f}'.format(Tp) + "," + tab
# Model_rvh_string_list.append(tab + str(catid) + "," + tab + Tp + Tc + Tl)
#
# Model_rvh_string_list.append(":EndSubBasinProperties")
#
# Model_rvh_string_list.append("\n") # orvh.write("\n")
##########################################
Model_rvh_string_list.append(":HRUs") # orvh.write(":HRUs"+"\n")
Model_rvh_string_list.append(
" :Attributes AREA ELEVATION LATITUDE LONGITUDE BASIN_ID LAND_USE_CLASS VEG_CLASS SOIL_PROFILE AQUIFER_PROFILE TERRAIN_CLASS SLOPE ASPECT"
) # orvh.write(" :Attributes AREA ELEVATION LATITUDE LONGITUDE BASIN_ID LAND_USE_CLASS VEG_CLASS SOIL_PROFILE AQUIFER_PROFILE TERRAIN_CLASS SLOPE ASPECT"+"\n")
Model_rvh_string_list.append(
" :Units km2 m deg deg none none none none none none deg deg"
) # orvh.write(" :Units km2 m deg deg none none none none none none deg deg"+"\n")
Lake_HRU_Name = None
for i in range(0, len(catinfo_hru.index)):
hruid = int(catinfo_hru["HRU_ID"].values[i])
catslope = catinfo_hru["HRU_S_mean"].values[i]
cataspect = catinfo_hru["HRU_A_mean"].values[i]
catarea2 = max(
0.0001, catinfo_hru["HRU_Area"].values[i] / 1000 / 1000
) ### in km2
StrGid = str(hruid) # str( catinfo_hru.iloc[i][HRU_Area_NM])+tab
catid = str(int(catinfo_hru["SubId"].values[i])) + tab
StrGidarea = '{:>10.4f}'.format(catarea2) + tab #str(catarea2) + tab
StrGidelev = '{:>10.4f}'.format(catinfo_hru["HRU_E_mean"].values[i]) + tab #str(catinfo_hru["HRU_E_mean"].values[i]) + tab
lat = '{:>10.4f}'.format(catinfo_hru["HRU_CenY"].values[i]) + tab #str(catinfo_hru["HRU_CenY"].values[i]) + tab
lon = '{:>10.4f}'.format(catinfo_hru["HRU_CenX"].values[i]) + tab #str(catinfo_hru["HRU_CenX"].values[i]) + tab
LAND_USE_CLASS = catinfo_hru["LAND_USE_C"].values[i] + tab
VEG_CLASS = catinfo_hru["VEG_C"].values[i] + tab
SOIL_PROFILE = catinfo_hru["SOIL_PROF"].values[i] + tab
AQUIFER_PROFILE = "[NONE]" + tab
TERRAIN_CLASS = "[NONE]" + tab
SLOPE = '{:>10.6f}'.format(catslope) + tab #str(catslope) + tab
if aspect_from_gis == 'grass':
asp_temp = 270 + cataspect
if asp_temp > 360:
asp_temp = asp_temp - 360
ASPECT = '{:>10.4f}'.format(asp_temp) + tab # str(asp_temp) + tab
elif aspect_from_gis == 'arcgis' or aspect_from_gis == 'qgis':
asp_temp = -(-360 + cataspect)
if asp_temp > 360:
asp_temp = asp_temp - 360
ASPECT = str(asp_temp) + tab
else:
ASPECT = '{:>10.4f}'.format(cataspect) + tab #str(cataspect) + tab
Model_rvh_string_list.append(
" "
+ StrGid
+ tab
+ StrGidarea
+ StrGidelev
+ lat
+ lon
+ catid
+ LAND_USE_CLASS
+ VEG_CLASS
+ SOIL_PROFILE
+ AQUIFER_PROFILE
+ TERRAIN_CLASS
+ SLOPE
+ ASPECT
) # orvh.write(" "+StrGid+tab+StrGidarea+StrGidelev+lat+lon+catid+LAND_USE_CLASS+VEG_CLASS+SOIL_PROFILE+AQUIFER_PROFILE+TERRAIN_CLASS+SLOPE+ASPECT+"\n")
if catinfo_hru["HRU_IsLake"].values[i] == 1:
GroupName = Return_Group_Name_Based_On_Value(
catinfo_hru["HRU_Area"].values[i]/1000/1000,
SubBasinGroup_NM_Lake,
SubBasinGroup_Area_Lake,
)
SubBasin_Group_Lake.loc[i, "SubId"] = catinfo_hru["SubId"].values[i]
SubBasin_Group_Lake.loc[i, "SubBasin_Group_NM"] = GroupName
Lake_HRU_Name = LAND_USE_CLASS
Model_rvh_string_list.append(":EndHRUs") # orvh.write(":EndHRUs"+"\n")
# no lake hru in this watershed
if Lake_HRU_Name != None:
Model_rvh_string_list.append(
":PopulateHRUGroup Lake_HRUs With LANDUSE EQUALS " + Lake_HRU_Name
) # orvh.write(":PopulateHRUGroup Lake_HRUs With LANDUSE EQUALS Lake_HRU" + "\n")
Model_rvh_string_list.append(
":RedirectToFile " + "Lakes.rvh"
) # orvh.write(":RedirectToFile TestLake.rvh")
for i in range(0, len(SubBasinGroup_NM_Channel)):
Model_rvh_string_list.append(":SubBasinGroup " + SubBasinGroup_NM_Channel[i])
SubBasin_Group_Channel_i = SubBasin_Group_Channel.loc[
SubBasin_Group_Channel["SubBasin_Group_NM"] == SubBasinGroup_NM_Channel[i]
]
SubIDs_In_Group = SubBasin_Group_Channel_i["SubId"].values
nsubbasin = 0
for j in range(0, len(SubIDs_In_Group)):
if nsubbasin == 0:
SubIDs_In_Group_Str_list = [" "]
SubIDs_In_Group_Str_list.append(str(int(SubIDs_In_Group[j])))
nsubbasin = nsubbasin + 1
if nsubbasin == 10 or j == len(SubIDs_In_Group) - 1:
SubIDs_In_Group_Str = " ".join(SubIDs_In_Group_Str_list)
Model_rvh_string_list.append(SubIDs_In_Group_Str)
nsubbasin = 0
Model_rvh_string_list.append(":EndSubBasinGroup ")
Model_rvh_string_list.append(
"# :SBGroupPropertyOverride "
+ SubBasinGroup_NM_Channel[i]
+ " MANNINGS_N 0.001"
)
Model_rvh_string_list.append(
"# :SBGroupPropertyMultiplier "
+ SubBasinGroup_NM_Channel[i]
+ " MANNINGS_N 1.0"
)
for i in range(0, len(SubBasinGroup_NM_Lake)):
Model_rvh_string_list.append(":SubBasinGroup " + SubBasinGroup_NM_Lake[i])
SubBasin_Group_Lake_i = SubBasin_Group_Lake.loc[
SubBasin_Group_Lake["SubBasin_Group_NM"] == SubBasinGroup_NM_Lake[i]
]
SubIDs_In_Group = SubBasin_Group_Lake_i["SubId"].values
nsubbasin = 0
for j in range(0, len(SubIDs_In_Group)):
if nsubbasin == 0:
SubIDs_In_Group_Str_list = [" "]
SubIDs_In_Group_Str_list.append(str(int(SubIDs_In_Group[j])))
nsubbasin = nsubbasin + 1
if nsubbasin == 10 or j == len(SubIDs_In_Group) - 1:
SubIDs_In_Group_Str = " ".join(SubIDs_In_Group_Str_list)
Model_rvh_string_list.append(SubIDs_In_Group_Str)
nsubbasin = 0
Model_rvh_string_list.append(":EndSubBasinGroup ")
Model_rvh_string_list.append(
"# :SBGroupPropertyOverride "
+ SubBasinGroup_NM_Lake[i]
+ " RESERVOIR_CREST_WIDTH 12.0"
)
Model_rvh_string_list.append(
"# :SBGroupPropertyMultiplier "
+ SubBasinGroup_NM_Lake[i]
+ " RESERVOIR_CREST_WIDTH 1.0"
)
Channel_rvp_string = "\n".join(Channel_rvp_string_list)
Model_rvh_string = "\n".join(Model_rvh_string_list)
return (
Channel_rvp_file_path,
Channel_rvp_string,
Model_rvh_file_path,
Model_rvh_string,
Model_rvp_file_path,
Model_rvp_string_modify,
)
####
# Outputs
####
def plotGuagelineobs(scenario, data, outfilename):
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
plt.rc("font", family="serif")
plt.rc("xtick", labelsize="x-small")
plt.rc("ytick", labelsize="x-small")
fig = plt.figure(figsize=(7.48031, 3))
ax = fig.add_subplot(1, 1, 1)
colors = np.array(["b", "k", "g", "y", "c"])
for i in range(0, len(scenario)):
ax.plot(
data.index,
data[scenario[i]],
color=colors[i],
ls="solid",
linewidth=1,
label=scenario[i],
)
ax.scatter(data.index, data["Obs"], color="grey", s=0.1, label="Observation")
plt.legend(loc="upper left", frameon=False, ncol=2, prop={"size": 6})
plt.ylim(0, max(data[scenario[i]].values) + max(data[scenario[i]].values) * 0.1)
plt.xlabel("Model Time")
plt.ylabel("Discharge (m3/s)")
plt.savefig(outfilename, bbox_inches="tight", dpi=300)
plt.close()
def plotGuageerror(basename, scenario, data, Diagno):
for i in range(0, len(scenario)):
plt.rc("font", family="serif")
plt.rc("xtick", labelsize="x-small")
plt.rc("ytick", labelsize="x-small")
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1)
results = []
for k in range(0, len(data)):
if not math.isnan(data[basename + "_" + scenario[0] + "_obs"].values[k]):
results.append(
(
data[basename + "_" + scenario[0] + "_obs"].values[k],
data[basename + "_" + scenario[i] + "_sim"].values[k],
data[basename + "_" + scenario[i] + "_sim"].values[k]
- data[basename + "_" + scenario[0] + "_obs"].values[k],
)
)
results = np.array(results)
if len(results) <= 0:
print(scenario[i])
plt.close()
continue
plt.hist(results[:, 2], bins="auto")
plt.savefig(
"./Figures/" + basename + scenario[i] + "_errhist.pdf",
bbox_inches="tight",
dpi=300,
)
plt.close()
#######
plt.rc("font", family="serif")
plt.rc("xtick", labelsize="x-small")
plt.rc("ytick", labelsize="x-small")
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1)
ax.scatter(
results[0 : len(results) - 2, 2],
results[1 : len(results) - 1, 2],
color="grey",
s=0.1,
)
plt.savefig(
"./Figures/" + basename + scenario[i] + "_errt1t2.pdf",
bbox_inches="tight",
dpi=300,
)
plt.close()
########
plt.rc("font", family="serif")
plt.rc("xtick", labelsize="x-small")
plt.rc("ytick", labelsize="x-small")
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1)
plot_acf(results[:, 2])
plt.savefig(
"./Figures/" + basename + scenario[i] + "_erracf.pdf",
bbox_inches="tight",
dpi=300,
)
plt.close()
def plotGuageerrorquainy(basename, scenario, data, Diagno, errpars):
for i in range(0, len(scenario)):
results = []
for k in range(0, len(data)):
if not math.isnan(data[basename + "_" + scenario[0] + "_obs"].values[k]):
results.append(
(
data[basename + "_" + scenario[0] + "_obs"].values[k],
data[basename + "_" + scenario[i] + "_sim"].values[k],
data[basename + "_" + scenario[i] + "_sim"].values[k]
- data[basename + "_" + scenario[0] + "_obs"].values[k],
)
)
results = np.array(results)
if len(results) <= 0:
continue
plt.rc("font", family="serif")
plt.rc("xtick", labelsize="x-small")
plt.rc("ytick", labelsize="x-small")
fig = plt.figure(figsize=(7.48031, 3))
ax = fig.add_subplot(1, 1, 1)
colors = np.array(["b", "k", "g", "y", "c"])
for j in range(0, len(results)):
jsim = results[j, 1]
jerror = results[j, 2] / (
errpars["a"].values[i] * jsim + errpars["b"].values[i]
)
nlarge = results[results[:, 1] <= jsim]
ax.scatter(
float(len(nlarge)) / float(len(results)), jerror, color="grey", s=0.1
)
plt.savefig(
"./Figures/" + basename + scenario[i] + "_errqutiv.pdf",
bbox_inches="tight",
dpi=300,
)
plt.close()
def NSE(obs, sim):
import numpy as np
obsmean = np.mean(obs)
diffobsmean = (sim - obsmean) ** 2
diffsim = (sim - obs) ** 2
NSE = 1 - np.sum(diffsim) / np.sum(diffobsmean)
return NSE
def PlotHydrography_Raven_alone(
Path_rvt_Folder="#",
Path_Hydrographs_output_file=["#"],
Scenario_NM=["#", "#"],
OutputFolder="./",
):
Obs_rvt_NMS = []
###obtain obs rvt file name
for file in os.listdir(Path_rvt_Folder):
if file.endswith(".rvt"):
Obs_rvt_NMS.append(file)
Metric_OUT = pd.DataFrame(Obs_rvt_NMS, columns=["Obs_NM"])
Metric_OUT["SubId"] = np.nan
Metric_OUT["NSE"] = np.nan
print(Metric_OUT)
Obs_subids = []
for i in range(0, len(Obs_rvt_NMS)):
###find subID
obs_nm = Obs_rvt_NMS[i]
ifilepath = os.path.join(Path_rvt_Folder, obs_nm)
f = open(ifilepath, "r")
# print(ifilepath)
for line in f:
firstline_info = line.split()
# print(firstline_info)
if firstline_info[0] == ":ObservationData":
obssubid = int(firstline_info[2])
break ### only read first line
else:
obssubid = -1.2345
break ### only read first line
# print(obssubid)
Obs_subids.append(obssubid)
Metric_OUT.loc[i, "SubId"] = obssubid
## this is not a observation rvt file
if obssubid == -1.2345:
continue
####assign column name in the hydrography.csv
colnm_obs = "sub" + str(obssubid) + " (observed) [m3/s]"
colnm_sim = "sub" + str(obssubid) + " [m3/s]"
colnm_Date = "date"
colnm_hr = "hour"
##obtain data from all provided hydrograpy csv output files each hydrograpy csv need has a coorespond scenario name
Initial_data_frame = 1
readed_data_correc = 1
data_len = []
# print(Metric_OUT)
for j in range(0, len(Path_Hydrographs_output_file)):
Path_Hydrographs_output_file_j = Path_Hydrographs_output_file[j]
# print(Path_Hydrographs_output_file_j)#
i_simresult = pd.read_csv(Path_Hydrographs_output_file[j], sep=",")
colnames = i_simresult.columns
## check if obs name exist in the hydrograpy csv output files
if colnm_obs in colnames:
## Initial lize the reaed in data frame
if Initial_data_frame == 1:
Readed_Data = i_simresult[[colnm_Date, colnm_hr]]
Readed_Data["Obs"] = i_simresult[colnm_obs]
Readed_Data[Scenario_NM[j]] = i_simresult[colnm_sim]
Readed_Data["Date"] = pd.to_datetime(
i_simresult[colnm_Date] + " " + i_simresult[colnm_hr]
)
Initial_data_frame = -1
data_len.append(len(Readed_Data))
else:
tempdata = i_simresult[[colnm_Date, colnm_hr]]
tempdata["Date"] = pd.to_datetime(
i_simresult[colnm_Date] + " " + i_simresult[colnm_hr]
)
rowmask = Readed_Data["Date"].isin(tempdata["Date"].values)
Readed_Data.loc[rowmask, Scenario_NM[j]] = i_simresult[
colnm_sim
].values
data_len.append(len(tempdata))
else:
readed_data_correc = -1
continue
print(readed_data_correc)
if readed_data_correc == -1:
continue
datalen = min(data_len)
# Readed_Data = Readed_Data.drop(columns=[colnm_Date,colnm_hr])
Readed_Data = Readed_Data.head(datalen)
Readed_Data = Readed_Data.set_index("Date")
Readed_Data = Readed_Data.resample("D").sum()
Readed_Data["ModelTime"] = Readed_Data.index.strftime("%Y-%m-%d")
Data_NSE = Readed_Data[Readed_Data["Obs"] > 0]
Metric_OUT.loc[i, "NSE"] = NSE(
Data_NSE["Obs"].values, Data_NSE[Scenario_NM[0]].values
)
print("adfadsfadsfadsf")
print(Metric_OUT)
# plotGuagelineobs(Scenario_NM,Readed_Data,os.path.join(OutputFolder,obs_nm + '.pdf'))
def Caluculate_Lake_Active_Depth_and_Lake_Evap(
Path_Finalcat_info="#",
Path_ReservoirStages="#",
Path_ReservoirMassBalance="#",
Output_Folder="#",
):
import numpy as np
import pandas as pd
from simpledbf import Dbf5
hyinfocsv = Path_Finalcat_info[:-3] + "dbf"
tempinfo = Dbf5(hyinfocsv)
finalcat_info = tempinfo.to_dataframe()
Res_Stage_info = pd.read_csv(Path_ReservoirStages, sep=",", header=0)
Res_MB_info = pd.read_csv(Path_ReservoirMassBalance, sep=",", header=0)
Res_Stage_info["Date_2"] = pd.to_datetime(
Res_Stage_info["date"] + " " + Res_Stage_info["hour"]
)
Res_Stage_info = Res_Stage_info.set_index("Date_2")
Res_MB_info["Date_2"] = pd.to_datetime(
Res_MB_info["date"] + " " + Res_MB_info["hour"]
)
Res_MB_info = Res_MB_info.set_index("Date_2")
Res_Wat_Ave_Dep_Vol = Res_Stage_info.copy(deep=True)
Res_Wat_Ave_Dep_Vol["Lake_Area"] = np.nan
Res_Wat_Ave_Dep_Vol["Lake_Stage_Ave"] = np.nan
Res_Wat_Ave_Dep_Vol["Lake_Vol"] = np.nan
Res_Wat_Ave_Dep_Vol["Lake_Stage_Below_Crest"] = np.nan
Res_Wat_Ave_Dep_Vol["Lake_Vol_Below_Crest"] = np.nan
Res_Wat_Ave_Dep_Vol["Lake_Area_Below_Crest"] = np.nan
Col_NMS_Stage = list(Res_Stage_info.columns)
Col_NMS_MB = list(Res_MB_info.columns)
finalcat_info_lake_hru = finalcat_info.loc[
(finalcat_info["Lake_Cat"] > 0) & (finalcat_info["HRU_Type"] == 1)
]
####
stage_out_NM = [
"Lake_Id",
"Lake_Area",
"Lake_DA",
"Lake_SubId",
"#_Day_Active_stage",
"Min_stage",
"Max_stage",
"Ave_stage",
]
data_stage = np.full((len(Col_NMS_Stage), 8), np.nan)
Stage_Statis = pd.DataFrame(data=data_stage, columns=stage_out_NM)
istage = 0
###
Evp_out_NM = ["Year", "Total_Lake_Evp_Loss"]
data_evp = np.full((50, 2), 0.00000)
MB_Statis = pd.DataFrame(data=data_evp, columns=Evp_out_NM)
Year_Begin = Res_MB_info.index.min().year
Year_end = Res_MB_info.index.max().year
imb = 0
for iyr in range(Year_Begin, Year_end + 1):
MB_Statis.loc[imb, "Year"] = iyr
imb = imb + 1
for i in range(0, len(Res_Wat_Ave_Dep_Vol)):
idate = Res_Wat_Ave_Dep_Vol.index[i]
Res_Wat_Ave_Vol_iday = 0.0
Res_Wat_Ave_Dep_Vol_iday = 0.0
Res_Wat_Ave_Dep_Lake_Area = 0.0
Res_Wat_Lake_Area_Below = 0.0
Res_Wat_Ave_Vol_Below_iday = 0.0
Res_Wat_Ave_Dep_Vol_Below_iday = 0.0
# print("#######################################"+str(i)+"###################################33")
for j in range(0, len(finalcat_info_lake_hru)):
LakeId = finalcat_info_lake_hru["HyLakeId"].values[j]
Lake_Area = finalcat_info_lake_hru["HRU_Area"].values[j]
Lake_Subid = finalcat_info_lake_hru["SubId"].values[j]
Lake_DA = finalcat_info_lake_hru["DrainArea"].values[j]
Mb_Col_NM = "sub" + str(int(Lake_Subid)) + " losses [m3]"
Stage_Col_NM = "sub" + str(int(Lake_Subid)) + " "
if Mb_Col_NM in Col_NMS_MB and Stage_Col_NM in Col_NMS_Stage:
Res_Wat_Ave_Dep_Vol_iday = (
Res_Wat_Ave_Dep_Vol[Stage_Col_NM].values[i] * Lake_Area
+ Res_Wat_Ave_Dep_Vol_iday
)
Res_Wat_Ave_Dep_Lake_Area = Lake_Area + Res_Wat_Ave_Dep_Lake_Area
if Res_Wat_Ave_Dep_Vol[Stage_Col_NM].values[i] < 0:
Res_Wat_Ave_Vol_Below_iday = (
Res_Wat_Ave_Dep_Vol[Stage_Col_NM].values[i] * Lake_Area
+ Res_Wat_Ave_Vol_Below_iday
)
Res_Wat_Lake_Area_Below = Lake_Area + Res_Wat_Lake_Area_Below
# print(Res_Wat_Ave_Vol_Below_iday,Res_Wat_Lake_Area_Below,Res_Wat_Ave_Dep_Vol[Stage_Col_NM].values[i])
# print(j,Res_Wat_Ave_Dep_Vol[Stage_Col_NM].values[i],Lake_Area,Res_Wat_Ave_Dep_Vol_iday)
if Res_Wat_Ave_Dep_Lake_Area > 0:
Res_Wat_Ave_Dep_iday = Res_Wat_Ave_Dep_Vol_iday / Res_Wat_Ave_Dep_Lake_Area
else:
Res_Wat_Ave_Dep_iday = np.nan
if Res_Wat_Lake_Area_Below > 0:
Res_Wat_Ave_Dep_Vol_Below_iday = (
Res_Wat_Ave_Vol_Below_iday / Res_Wat_Lake_Area_Below
)
else:
Res_Wat_Ave_Dep_Vol_Below_iday = np.nan
# print(Res_Wat_Ave_Dep_iday,Res_Wat_Ave_Dep_Lake_Area,Res_Wat_Ave_Dep_Vol_iday)
# print("####################################################")
Res_Wat_Ave_Dep_Vol.loc[idate, "Lake_Area"] = Res_Wat_Ave_Dep_Lake_Area
Res_Wat_Ave_Dep_Vol.loc[idate, "Lake_Stage_Ave"] = Res_Wat_Ave_Dep_iday
Res_Wat_Ave_Dep_Vol.loc[idate, "Lake_Vol"] = Res_Wat_Ave_Dep_Vol_iday
Res_Wat_Ave_Dep_Vol.loc[
idate, "Lake_Area_Below_Crest"
] = Res_Wat_Lake_Area_Below
Res_Wat_Ave_Dep_Vol.loc[
idate, "Lake_Stage_Below_Crest"
] = Res_Wat_Ave_Dep_Vol_Below_iday
Res_Wat_Ave_Dep_Vol.loc[
idate, "Lake_Vol_Below_Crest"
] = Res_Wat_Ave_Vol_Below_iday
Res_Wat_Ave_Dep_Vol = Res_Wat_Ave_Dep_Vol[
[
"date",
"hour",
"Lake_Area",
"Lake_Stage_Ave",
"Lake_Vol",
"Lake_Area_Below_Crest",
"Lake_Stage_Below_Crest",
"Lake_Vol_Below_Crest",
]
]
Res_Wat_Ave_Dep_Vol.to_csv(
os.path.join(Output_Folder, "Lake_Wat_Ave_Depth_Vol.csv"), sep=","
)
for i in range(0, len(finalcat_info_lake_hru)):
LakeId = finalcat_info_lake_hru["HyLakeId"].values[i]
Lake_Area = finalcat_info_lake_hru["HRU_Area"].values[i]
Lake_Subid = finalcat_info_lake_hru["SubId"].values[i]
Lake_DA = finalcat_info_lake_hru["DrainArea"].values[i]
####
stage_idx = Stage_Statis.index[istage]
Stage_Statis.loc[stage_idx, "Lake_Id"] = LakeId
Stage_Statis.loc[stage_idx, "Lake_Area"] = Lake_Area
Stage_Statis.loc[stage_idx, "Lake_SubId"] = Lake_Subid
Stage_Statis.loc[stage_idx, "Lake_DA"] = Lake_DA
istage = istage + 1
###
Mb_Col_NM = "sub" + str(int(Lake_Subid)) + " losses [m3]"
Stage_Col_NM = "sub" + str(int(Lake_Subid)) + " "
if Mb_Col_NM not in Col_NMS_MB or Stage_Col_NM not in Col_NMS_Stage:
print(
Mb_Col_NM in Col_NMS_MB,
Mb_Col_NM[0] == Col_NMS_MB[7][0],
len(Mb_Col_NM),
len(Mb_Col_NM[7]),
Mb_Col_NM,
Col_NMS_MB[7],
)
print(
Stage_Col_NM in Col_NMS_Stage,
Stage_Col_NM[0] == Col_NMS_Stage[7][0],
len(Stage_Col_NM),
len(Col_NMS_Stage[7]),
Stage_Col_NM,
Col_NMS_Stage[7],
)
if Mb_Col_NM in Col_NMS_MB and Stage_Col_NM in Col_NMS_Stage:
Mb_info_lake = Res_MB_info[Mb_Col_NM]
Stage_info_lake = Res_Stage_info[Stage_Col_NM]
### For stage
Stage_Statis = Calulate_Yearly_Reservior_stage_statistics(
Stage_info_lake, Stage_Statis, stage_idx, Stage_Col_NM
)
### for Mass
for iyr in range(Year_Begin, Year_end + 1):
Mb_info_lake_iyr = Mb_info_lake.loc[
Stage_info_lake.index.year == iyr
].values
MB_Statis.loc[
MB_Statis["Year"] == iyr, "Total_Lake_Evp_Loss"
] = MB_Statis.loc[
MB_Statis["Year"] == iyr, "Total_Lake_Evp_Loss"
] + sum(
Mb_info_lake_iyr
)
Stage_Statis.to_csv(
os.path.join(Output_Folder, "Lake_Stage_Yearly_statistics.csv"), sep=","
)
MB_Statis.to_csv(
os.path.join(Output_Folder, "Lake_MB_Yearly_statistics.csv"), sep=","
)
####
def Calulate_Yearly_Reservior_stage_statistics(
Stage_info_lake, Stage_Statis, stage_idx, Stage_Col_NM
):
Year_Begin = Stage_info_lake.index.min().year
Year_end = Stage_info_lake.index.max().year
Num_Day_Active_stage_sum = 0
Min_stage_sum = 0
Max_stage_sum = 0
Ave_stage_sum = 0
nyear = 0
for iyr in range(Year_Begin, Year_end + 1):
nyear = nyear + 1
Stage_info_lake_iyr = Stage_info_lake.loc[
Stage_info_lake.index.year == iyr
].values
Active_Stage_info_lake_iyr = Stage_info_lake_iyr[Stage_info_lake_iyr < 0]
if len(Active_Stage_info_lake_iyr) > 0:
Num_Day_Active_stage_sum = Num_Day_Active_stage_sum + len(
Active_Stage_info_lake_iyr
)
Min_stage_sum = Min_stage_sum + min(Stage_info_lake_iyr)
Max_stage_sum = Max_stage_sum + max(Stage_info_lake_iyr)
Ave_stage_sum = Ave_stage_sum + np.average(Stage_info_lake_iyr)
Stage_Statis.loc[stage_idx, "#_Day_Active_stage"] = Num_Day_Active_stage_sum / nyear
Stage_Statis.loc[stage_idx, "Min_stage"] = Min_stage_sum / nyear
Stage_Statis.loc[stage_idx, "Max_stage"] = Max_stage_sum / nyear
Stage_Statis.loc[stage_idx, "Ave_stage"] = Ave_stage_sum / nyear
return Stage_Statis
| [
"matplotlib.pyplot.hist",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.array",
"copy.copy",
"pandas.date_range",
"pandas.to_datetime",
"pandas.read_sql_query",
"numpy.mean",
"os.path.exists",
"os.listdir",
"simpledbf.Dbf5",
"matplotlib.pyplot.xlabel",
"os.path.split",
"matplotli... | [((8636, 8676), 'os.path.join', 'os.path.join', (['OutputFolder', '"""RavenInput"""'], {}), "(OutputFolder, 'RavenInput')\n", (8648, 8676), False, 'import os\n'), ((8694, 8731), 'os.path.join', 'os.path.join', (['Raveinputsfolder', '"""obs"""'], {}), "(Raveinputsfolder, 'obs')\n", (8706, 8731), False, 'import os\n'), ((8813, 8864), 'shutil.rmtree', 'shutil.rmtree', (['Raveinputsfolder'], {'ignore_errors': '(True)'}), '(Raveinputsfolder, ignore_errors=True)\n', (8826, 8864), False, 'import shutil\n'), ((9493, 9526), 'simpledbf.Dbf5', 'Dbf5', (["(finalcatchpath[:-3] + 'dbf')"], {}), "(finalcatchpath[:-3] + 'dbf')\n", (9497, 9526), False, 'from simpledbf import Dbf5\n'), ((13595, 13620), 'sqlite3.connect', 'sqlite3.connect', (['CA_HYDAT'], {}), '(CA_HYDAT)\n', (13610, 13620), False, 'import sqlite3\n'), ((13786, 13838), 'pandas.read_sql_query', 'pd.read_sql_query', (['sqlstat', 'con'], {'params': '[Station_NM]'}), '(sqlstat, con, params=[Station_NM])\n', (13803, 13838), True, 'import pandas as pd\n'), ((13997, 14115), 'numpy.array', 'np.array', (["[-1.2345, Station_info['DRAINAGE_AREA_GROSS'].values[0], Station_info[\n 'DRAINAGE_AREA_EFFECT'].values[0]]"], {}), "([-1.2345, Station_info['DRAINAGE_AREA_GROSS'].values[0],\n Station_info['DRAINAGE_AREA_EFFECT'].values[0]])\n", (14005, 14115), True, 'import numpy as np\n'), ((14408, 14460), 'pandas.read_sql_query', 'pd.read_sql_query', (['sqlstat', 'con'], {'params': '[Station_NM]'}), '(sqlstat, con, params=[Station_NM])\n', (14425, 14460), True, 'import pandas as pd\n'), ((15284, 15337), 'pandas.date_range', 'pd.date_range', ([], {'start': 'Date_ini', 'end': 'Date_end', 'freq': '"""D"""'}), "(start=Date_ini, end=Date_end, freq='D')\n", (15297, 15337), True, 'import pandas as pd\n'), ((17701, 17734), 'urllib.request.urlopen', 'urllib.request.urlopen', (['urlstlist'], {}), '(urlstlist)\n', (17723, 17734), False, 'import urllib\n'), ((19010, 19043), 'urllib.request.urlopen', 'urllib.request.urlopen', (['urlstlist'], {}), '(urlstlist)\n', (19032, 19043), False, 'import urllib\n'), ((19689, 19742), 'pandas.date_range', 'pd.date_range', ([], {'start': 'Date_ini', 'end': 'Date_end', 'freq': '"""D"""'}), "(start=Date_ini, end=Date_end, freq='D')\n", (19702, 19742), True, 'import pandas as pd\n'), ((26169, 26218), 'os.path.join', 'os.path.join', (['outFolderraven', "(Model_Name + '.rvt')"], {}), "(outFolderraven, Model_Name + '.rvt')\n", (26181, 26218), False, 'import os\n'), ((32630, 32679), 'os.path.join', 'os.path.join', (['outFolderraven', "(Model_Name + '.rvt')"], {}), "(outFolderraven, Model_Name + '.rvt')\n", (32642, 32679), False, 'import os\n'), ((42799, 42842), 'os.path.join', 'os.path.join', (['Raveinputsfolder', '"""Lakes.rvh"""'], {}), "(Raveinputsfolder, 'Lakes.rvh')\n", (42811, 42842), False, 'import os\n'), ((56272, 56328), 'os.path.join', 'os.path.join', (['Raveinputsfolder', '"""channel_properties.rvp"""'], {}), "(Raveinputsfolder, 'channel_properties.rvp')\n", (56284, 56328), False, 'import os\n'), ((56388, 56439), 'os.path.join', 'os.path.join', (['Raveinputsfolder', "(Model_Name + '.rvh')"], {}), "(Raveinputsfolder, Model_Name + '.rvh')\n", (56400, 56439), False, 'import os\n'), ((56497, 56548), 'os.path.join', 'os.path.join', (['Raveinputsfolder', "(Model_Name + '.rvp')"], {}), "(Raveinputsfolder, Model_Name + '.rvp')\n", (56509, 56548), False, 'import os\n'), ((57337, 57356), 'copy.copy', 'copy.copy', (['ocatinfo'], {}), '(ocatinfo)\n', (57346, 57356), False, 'import copy\n'), ((57371, 57390), 'copy.copy', 'copy.copy', (['ocatinfo'], {}), '(ocatinfo)\n', (57380, 57390), False, 'import copy\n'), ((71198, 71228), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (71204, 71228), True, 'import matplotlib.pyplot as plt\n'), ((71233, 71269), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '"""x-small"""'}), "('xtick', labelsize='x-small')\n", (71239, 71269), True, 'import matplotlib.pyplot as plt\n'), ((71274, 71310), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '"""x-small"""'}), "('ytick', labelsize='x-small')\n", (71280, 71310), True, 'import matplotlib.pyplot as plt\n'), ((71321, 71353), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7.48031, 3)'}), '(figsize=(7.48031, 3))\n', (71331, 71353), True, 'import matplotlib.pyplot as plt\n'), ((71401, 71436), 'numpy.array', 'np.array', (["['b', 'k', 'g', 'y', 'c']"], {}), "(['b', 'k', 'g', 'y', 'c'])\n", (71409, 71436), True, 'import numpy as np\n'), ((71753, 71822), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'frameon': '(False)', 'ncol': '(2)', 'prop': "{'size': 6}"}), "(loc='upper left', frameon=False, ncol=2, prop={'size': 6})\n", (71763, 71822), True, 'import matplotlib.pyplot as plt\n'), ((71912, 71936), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Model Time"""'], {}), "('Model Time')\n", (71922, 71936), True, 'import matplotlib.pyplot as plt\n'), ((71941, 71971), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Discharge (m3/s)"""'], {}), "('Discharge (m3/s)')\n", (71951, 71971), True, 'import matplotlib.pyplot as plt\n'), ((71976, 72030), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfilename'], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "(outfilename, bbox_inches='tight', dpi=300)\n", (71987, 72030), True, 'import matplotlib.pyplot as plt\n'), ((72035, 72046), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (72044, 72046), True, 'import matplotlib.pyplot as plt\n'), ((75884, 75896), 'numpy.mean', 'np.mean', (['obs'], {}), '(obs)\n', (75891, 75896), True, 'import numpy as np\n'), ((76258, 76285), 'os.listdir', 'os.listdir', (['Path_rvt_Folder'], {}), '(Path_rvt_Folder)\n', (76268, 76285), False, 'import os\n'), ((76376, 76421), 'pandas.DataFrame', 'pd.DataFrame', (['Obs_rvt_NMS'], {'columns': "['Obs_NM']"}), "(Obs_rvt_NMS, columns=['Obs_NM'])\n", (76388, 76421), True, 'import pandas as pd\n'), ((80472, 80487), 'simpledbf.Dbf5', 'Dbf5', (['hyinfocsv'], {}), '(hyinfocsv)\n', (80476, 80487), False, 'from simpledbf import Dbf5\n'), ((80554, 80606), 'pandas.read_csv', 'pd.read_csv', (['Path_ReservoirStages'], {'sep': '""","""', 'header': '(0)'}), "(Path_ReservoirStages, sep=',', header=0)\n", (80565, 80606), True, 'import pandas as pd\n'), ((80625, 80682), 'pandas.read_csv', 'pd.read_csv', (['Path_ReservoirMassBalance'], {'sep': '""","""', 'header': '(0)'}), "(Path_ReservoirMassBalance, sep=',', header=0)\n", (80636, 80682), True, 'import pandas as pd\n'), ((80715, 80784), 'pandas.to_datetime', 'pd.to_datetime', (["(Res_Stage_info['date'] + ' ' + Res_Stage_info['hour'])"], {}), "(Res_Stage_info['date'] + ' ' + Res_Stage_info['hour'])\n", (80729, 80784), True, 'import pandas as pd\n'), ((80884, 80947), 'pandas.to_datetime', 'pd.to_datetime', (["(Res_MB_info['date'] + ' ' + Res_MB_info['hour'])"], {}), "(Res_MB_info['date'] + ' ' + Res_MB_info['hour'])\n", (80898, 80947), True, 'import pandas as pd\n'), ((81898, 81949), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data_stage', 'columns': 'stage_out_NM'}), '(data=data_stage, columns=stage_out_NM)\n', (81910, 81949), True, 'import pandas as pd\n'), ((82038, 82059), 'numpy.full', 'np.full', (['(50, 2)', '(0.0)'], {}), '((50, 2), 0.0)\n', (82045, 82059), True, 'import numpy as np\n'), ((82080, 82127), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data_evp', 'columns': 'Evp_out_NM'}), '(data=data_evp, columns=Evp_out_NM)\n', (82092, 82127), True, 'import pandas as pd\n'), ((8744, 8772), 'os.path.exists', 'os.path.exists', (['OutputFolder'], {}), '(OutputFolder)\n', (8758, 8772), False, 'import os\n'), ((8782, 8807), 'os.makedirs', 'os.makedirs', (['OutputFolder'], {}), '(OutputFolder)\n', (8793, 8807), False, 'import os\n'), ((9092, 9124), 'os.path.exists', 'os.path.exists', (['Raveinputsfolder'], {}), '(Raveinputsfolder)\n', (9106, 9124), False, 'import os\n'), ((9134, 9163), 'os.makedirs', 'os.makedirs', (['Raveinputsfolder'], {}), '(Raveinputsfolder)\n', (9145, 9163), False, 'import os\n'), ((9175, 9201), 'os.path.exists', 'os.path.exists', (['Obs_Folder'], {}), '(Obs_Folder)\n', (9189, 9201), False, 'import os\n'), ((9211, 9234), 'os.makedirs', 'os.makedirs', (['Obs_Folder'], {}), '(Obs_Folder)\n', (9222, 9234), False, 'import os\n'), ((9335, 9389), 'os.path.join', 'os.path.join', (['Raveinputsfolder', '"""GriddedForcings2.txt"""'], {}), "(Raveinputsfolder, 'GriddedForcings2.txt')\n", (9347, 9389), False, 'import os\n'), ((14238, 14252), 'numpy.nanmax', 'np.nanmax', (['DAS'], {}), '(DAS)\n', (14247, 14252), True, 'import numpy as np\n'), ((72149, 72179), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (72155, 72179), True, 'import matplotlib.pyplot as plt\n'), ((72188, 72224), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '"""x-small"""'}), "('xtick', labelsize='x-small')\n", (72194, 72224), True, 'import matplotlib.pyplot as plt\n'), ((72233, 72269), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '"""x-small"""'}), "('ytick', labelsize='x-small')\n", (72239, 72269), True, 'import matplotlib.pyplot as plt\n'), ((72284, 72310), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (72294, 72310), True, 'import matplotlib.pyplot as plt\n'), ((72923, 72940), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (72931, 72940), True, 'import numpy as np\n'), ((73055, 73091), 'matplotlib.pyplot.hist', 'plt.hist', (['results[:, 2]'], {'bins': '"""auto"""'}), "(results[:, 2], bins='auto')\n", (73063, 73091), True, 'import matplotlib.pyplot as plt\n'), ((73100, 73201), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./Figures/' + basename + scenario[i] + '_errhist.pdf')"], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "('./Figures/' + basename + scenario[i] + '_errhist.pdf',\n bbox_inches='tight', dpi=300)\n", (73111, 73201), True, 'import matplotlib.pyplot as plt\n'), ((73253, 73264), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (73262, 73264), True, 'import matplotlib.pyplot as plt\n'), ((73289, 73319), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (73295, 73319), True, 'import matplotlib.pyplot as plt\n'), ((73328, 73364), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '"""x-small"""'}), "('xtick', labelsize='x-small')\n", (73334, 73364), True, 'import matplotlib.pyplot as plt\n'), ((73373, 73409), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '"""x-small"""'}), "('ytick', labelsize='x-small')\n", (73379, 73409), True, 'import matplotlib.pyplot as plt\n'), ((73424, 73450), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (73434, 73450), True, 'import matplotlib.pyplot as plt\n'), ((73664, 73765), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./Figures/' + basename + scenario[i] + '_errt1t2.pdf')"], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "('./Figures/' + basename + scenario[i] + '_errt1t2.pdf',\n bbox_inches='tight', dpi=300)\n", (73675, 73765), True, 'import matplotlib.pyplot as plt\n'), ((73817, 73828), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (73826, 73828), True, 'import matplotlib.pyplot as plt\n'), ((73854, 73884), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (73860, 73884), True, 'import matplotlib.pyplot as plt\n'), ((73893, 73929), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '"""x-small"""'}), "('xtick', labelsize='x-small')\n", (73899, 73929), True, 'import matplotlib.pyplot as plt\n'), ((73938, 73974), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '"""x-small"""'}), "('ytick', labelsize='x-small')\n", (73944, 73974), True, 'import matplotlib.pyplot as plt\n'), ((73989, 74015), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (73999, 74015), True, 'import matplotlib.pyplot as plt\n'), ((74094, 74194), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./Figures/' + basename + scenario[i] + '_erracf.pdf')"], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "('./Figures/' + basename + scenario[i] + '_erracf.pdf',\n bbox_inches='tight', dpi=300)\n", (74105, 74194), True, 'import matplotlib.pyplot as plt\n'), ((74246, 74257), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (74255, 74257), True, 'import matplotlib.pyplot as plt\n'), ((74941, 74958), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (74949, 74958), True, 'import numpy as np\n'), ((75018, 75048), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (75024, 75048), True, 'import matplotlib.pyplot as plt\n'), ((75057, 75093), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '"""x-small"""'}), "('xtick', labelsize='x-small')\n", (75063, 75093), True, 'import matplotlib.pyplot as plt\n'), ((75102, 75138), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '"""x-small"""'}), "('ytick', labelsize='x-small')\n", (75108, 75138), True, 'import matplotlib.pyplot as plt\n'), ((75153, 75185), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7.48031, 3)'}), '(figsize=(7.48031, 3))\n', (75163, 75185), True, 'import matplotlib.pyplot as plt\n'), ((75241, 75276), 'numpy.array', 'np.array', (["['b', 'k', 'g', 'y', 'c']"], {}), "(['b', 'k', 'g', 'y', 'c'])\n", (75249, 75276), True, 'import numpy as np\n'), ((75659, 75761), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./Figures/' + basename + scenario[i] + '_errqutiv.pdf')"], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "('./Figures/' + basename + scenario[i] + '_errqutiv.pdf',\n bbox_inches='tight', dpi=300)\n", (75670, 75761), True, 'import matplotlib.pyplot as plt\n'), ((75813, 75824), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (75822, 75824), True, 'import matplotlib.pyplot as plt\n'), ((76643, 76680), 'os.path.join', 'os.path.join', (['Path_rvt_Folder', 'obs_nm'], {}), '(Path_rvt_Folder, obs_nm)\n', (76655, 76680), False, 'import os\n'), ((85721, 85778), 'os.path.join', 'os.path.join', (['Output_Folder', '"""Lake_Wat_Ave_Depth_Vol.csv"""'], {}), "(Output_Folder, 'Lake_Wat_Ave_Depth_Vol.csv')\n", (85733, 85778), False, 'import os\n'), ((88059, 88122), 'os.path.join', 'os.path.join', (['Output_Folder', '"""Lake_Stage_Yearly_statistics.csv"""'], {}), "(Output_Folder, 'Lake_Stage_Yearly_statistics.csv')\n", (88071, 88122), False, 'import os\n'), ((88168, 88228), 'os.path.join', 'os.path.join', (['Output_Folder', '"""Lake_MB_Yearly_statistics.csv"""'], {}), "(Output_Folder, 'Lake_MB_Yearly_statistics.csv')\n", (88180, 88228), False, 'import os\n'), ((11976, 12015), 'os.path.join', 'os.path.join', (['Obs_Folder', '"""obsinfo.csv"""'], {}), "(Obs_Folder, 'obsinfo.csv')\n", (11988, 12015), False, 'import os\n'), ((73014, 73025), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (73023, 73025), True, 'import matplotlib.pyplot as plt\n'), ((75981, 75996), 'numpy.sum', 'np.sum', (['diffsim'], {}), '(diffsim)\n', (75987, 75996), True, 'import numpy as np\n'), ((75999, 76018), 'numpy.sum', 'np.sum', (['diffobsmean'], {}), '(diffobsmean)\n', (76005, 76018), True, 'import numpy as np\n'), ((78031, 78084), 'pandas.read_csv', 'pd.read_csv', (['Path_Hydrographs_output_file[j]'], {'sep': '""","""'}), "(Path_Hydrographs_output_file[j], sep=',')\n", (78042, 78084), True, 'import pandas as pd\n'), ((15629, 15708), 'pandas.to_datetime', 'pd.to_datetime', (["{'year': [row['YEAR']], 'month': [row['MONTH']], 'day': [iday]}"], {}), "({'year': [row['YEAR']], 'month': [row['MONTH']], 'day': [iday]})\n", (15643, 15708), True, 'import pandas as pd\n'), ((20126, 20211), 'pandas.to_datetime', 'pd.to_datetime', (["{'year': [date[0:4]], 'month': [date[5:7]], 'day': [date[8:10]]}"], {}), "({'year': [date[0:4]], 'month': [date[5:7]], 'day': [date[8:10]]}\n )\n", (20140, 20211), True, 'import pandas as pd\n'), ((26246, 26277), 'os.path.split', 'os.path.split', (['outObsfileFolder'], {}), '(outObsfileFolder)\n', (26259, 26277), False, 'import os\n'), ((89207, 89238), 'numpy.average', 'np.average', (['Stage_info_lake_iyr'], {}), '(Stage_info_lake_iyr)\n', (89217, 89238), True, 'import numpy as np\n'), ((78590, 78659), 'pandas.to_datetime', 'pd.to_datetime', (["(i_simresult[colnm_Date] + ' ' + i_simresult[colnm_hr])"], {}), "(i_simresult[colnm_Date] + ' ' + i_simresult[colnm_hr])\n", (78604, 78659), True, 'import pandas as pd\n'), ((78932, 79001), 'pandas.to_datetime', 'pd.to_datetime', (["(i_simresult[colnm_Date] + ' ' + i_simresult[colnm_hr])"], {}), "(i_simresult[colnm_Date] + ' ' + i_simresult[colnm_hr])\n", (78946, 79001), True, 'import pandas as pd\n')] |
import matplotlib.pyplot as plt
import numpy as np
from kino.steps import Paw
from kino.geometry import Trajectory
class Steps:
@staticmethod
def overlay_on_speed_trace(paw: Paw, ax: plt.Axes):
"""
Overlay the start/end of the steps on a paw's speed trace
"""
color = paw.trajectory.color
speed = paw.normalized_speed
# mark starts / end
ax.plot(speed, color=color, lw=0.5)
ax.axhline(paw.speed_th, zorder=-1, color="k", ls="--", lw=0.5)
ax.scatter(
paw.swings_start,
speed[paw.swings_start],
color=color,
s=25,
ec=color,
lw=1,
alpha=0.5,
zorder=100,
)
ax.scatter(
paw.swings_end,
speed[paw.swings_end],
color="white",
s=25,
ec=color,
lw=1,
alpha=0.5,
zorder=100,
)
# mark trace
for start, stop in zip(paw.swings_start, paw.swings_end):
ax.plot(
np.arange(start, stop + 1),
speed[start : stop + 1],
lw=2,
color=color,
)
@staticmethod
def sticks_and_balls(
paw: Paw, ax: plt.Axes, trajectory: Trajectory = None
):
"""
Do a o---o balls and stick plot marking the
start/end of each swing phase based on XY of
a trajectory
"""
if trajectory is None:
trajectory = paw.trajectory
color = trajectory.color
x, y = trajectory.x, trajectory.y
# mark starts
ax.scatter(
x[paw.swings_start],
y[paw.swings_start],
color=color,
s=25,
ec=color,
lw=1,
zorder=100,
)
ax.scatter(
x[paw.swings_end],
y[paw.swings_end],
color="white",
s=25,
ec=color,
lw=1,
zorder=100,
)
for start, stop in zip(paw.swings_start, paw.swings_end):
ax.plot(
[x[start], x[stop]], [y[start], y[stop]], lw=2, color=color
)
| [
"numpy.arange"
] | [((1096, 1122), 'numpy.arange', 'np.arange', (['start', '(stop + 1)'], {}), '(start, stop + 1)\n', (1105, 1122), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
run_doe.py generated by WhatsOpt.
"""
# DO NOT EDIT unless you know what you are doing
# analysis_id: 49
import numpy as np
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
from openmdao.api import Problem, SqliteRecorder, CaseReader
from whatsopt.smt_doe_driver import SmtDoeDriver
from mod_branin import ModBranin
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-b", "--batch",
action="store_true", dest="batch", default=False,
help="do not plot anything")
(options, args) = parser.parse_args()
pb = Problem(ModBranin())
pb.driver = SmtDoeDriver(sampling_method='LHS', n_cases=50)
case_recorder_filename = 'mod_branin_doe.sqlite'
recorder = SqliteRecorder(case_recorder_filename)
pb.driver.add_recorder(recorder)
pb.model.add_recorder(recorder)
pb.model.nonlinear_solver.add_recorder(recorder)
pb.model.add_design_var('x1', lower=-5, upper=10)
pb.model.add_design_var('x2', lower=0, upper=15)
pb.model.add_objective('f')
pb.model.add_constraint('g', upper=0.)
pb.setup()
pb.run_driver()
if options.batch:
exit(0)
reader = CaseReader(case_recorder_filename)
cases = reader.system_cases.list_cases()
n = len(cases)
data = {'inputs': {}, 'outputs': {} }
data['inputs']['x1'] = np.zeros((n,)+(1,))
data['inputs']['x2'] = np.zeros((n,)+(1,))
data['outputs']['f'] = np.zeros((n,)+(1,))
data['outputs']['g'] = np.zeros((n,)+(1,))
for i, case_id in enumerate(cases):
case = reader.system_cases.get_case(case_id)
data['inputs']['x1'][i,:] = case.inputs['x1']
data['inputs']['x2'][i,:] = case.inputs['x2']
data['outputs']['f'][i,:] = case.outputs['f']
data['outputs']['g'][i,:] = case.outputs['g']
output = data['outputs']['f'].reshape(-1)
input = data['inputs']['x1'].reshape(-1)
plt.subplot(2, 2, 1)
plt.plot(input[0::1], output[0::1], '.')
plt.ylabel('f')
plt.xlabel('x1')
input = data['inputs']['x2'].reshape(-1)
plt.subplot(2, 2, 2)
plt.plot(input[0::1], output[0::1], '.')
plt.xlabel('x2')
output = data['outputs']['g'].reshape(-1)
input = data['inputs']['x1'].reshape(-1)
plt.subplot(2, 2, 3)
plt.plot(input[0::1], output[0::1], '.')
plt.ylabel('g')
plt.xlabel('x1')
input = data['inputs']['x2'].reshape(-1)
plt.subplot(2, 2, 4)
plt.plot(input[0::1], output[0::1], '.')
plt.xlabel('x2')
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"openmdao.api.SqliteRecorder",
"optparse.OptionParser",
"mod_branin.ModBranin",
"whatsopt.smt_doe_driver.SmtDoeDriver",
"numpy.zeros",
"matplotlib.pyplot.subplot",
"openmdao.api.CaseReader",
"matplotlib.pyplot.show... | [((420, 434), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (432, 434), False, 'from optparse import OptionParser\n'), ((662, 709), 'whatsopt.smt_doe_driver.SmtDoeDriver', 'SmtDoeDriver', ([], {'sampling_method': '"""LHS"""', 'n_cases': '(50)'}), "(sampling_method='LHS', n_cases=50)\n", (674, 709), False, 'from whatsopt.smt_doe_driver import SmtDoeDriver\n'), ((778, 816), 'openmdao.api.SqliteRecorder', 'SqliteRecorder', (['case_recorder_filename'], {}), '(case_recorder_filename)\n', (792, 816), False, 'from openmdao.api import Problem, SqliteRecorder, CaseReader\n'), ((1180, 1214), 'openmdao.api.CaseReader', 'CaseReader', (['case_recorder_filename'], {}), '(case_recorder_filename)\n', (1190, 1214), False, 'from openmdao.api import Problem, SqliteRecorder, CaseReader\n'), ((1333, 1354), 'numpy.zeros', 'np.zeros', (['((n,) + (1,))'], {}), '((n,) + (1,))\n', (1341, 1354), True, 'import numpy as np\n'), ((1376, 1397), 'numpy.zeros', 'np.zeros', (['((n,) + (1,))'], {}), '((n,) + (1,))\n', (1384, 1397), True, 'import numpy as np\n'), ((1420, 1441), 'numpy.zeros', 'np.zeros', (['((n,) + (1,))'], {}), '((n,) + (1,))\n', (1428, 1441), True, 'import numpy as np\n'), ((1463, 1484), 'numpy.zeros', 'np.zeros', (['((n,) + (1,))'], {}), '((n,) + (1,))\n', (1471, 1484), True, 'import numpy as np\n'), ((1861, 1881), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (1872, 1881), True, 'import matplotlib.pyplot as plt\n'), ((1882, 1922), 'matplotlib.pyplot.plot', 'plt.plot', (['input[0::1]', 'output[0::1]', '"""."""'], {}), "(input[0::1], output[0::1], '.')\n", (1890, 1922), True, 'import matplotlib.pyplot as plt\n'), ((1923, 1938), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""f"""'], {}), "('f')\n", (1933, 1938), True, 'import matplotlib.pyplot as plt\n'), ((1939, 1955), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x1"""'], {}), "('x1')\n", (1949, 1955), True, 'import matplotlib.pyplot as plt\n'), ((1998, 2018), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (2009, 2018), True, 'import matplotlib.pyplot as plt\n'), ((2019, 2059), 'matplotlib.pyplot.plot', 'plt.plot', (['input[0::1]', 'output[0::1]', '"""."""'], {}), "(input[0::1], output[0::1], '.')\n", (2027, 2059), True, 'import matplotlib.pyplot as plt\n'), ((2060, 2076), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x2"""'], {}), "('x2')\n", (2070, 2076), True, 'import matplotlib.pyplot as plt\n'), ((2163, 2183), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (2174, 2183), True, 'import matplotlib.pyplot as plt\n'), ((2184, 2224), 'matplotlib.pyplot.plot', 'plt.plot', (['input[0::1]', 'output[0::1]', '"""."""'], {}), "(input[0::1], output[0::1], '.')\n", (2192, 2224), True, 'import matplotlib.pyplot as plt\n'), ((2225, 2240), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""g"""'], {}), "('g')\n", (2235, 2240), True, 'import matplotlib.pyplot as plt\n'), ((2241, 2257), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x1"""'], {}), "('x1')\n", (2251, 2257), True, 'import matplotlib.pyplot as plt\n'), ((2300, 2320), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (2311, 2320), True, 'import matplotlib.pyplot as plt\n'), ((2321, 2361), 'matplotlib.pyplot.plot', 'plt.plot', (['input[0::1]', 'output[0::1]', '"""."""'], {}), "(input[0::1], output[0::1], '.')\n", (2329, 2361), True, 'import matplotlib.pyplot as plt\n'), ((2362, 2378), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x2"""'], {}), "('x2')\n", (2372, 2378), True, 'import matplotlib.pyplot as plt\n'), ((2380, 2390), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2388, 2390), True, 'import matplotlib.pyplot as plt\n'), ((637, 648), 'mod_branin.ModBranin', 'ModBranin', ([], {}), '()\n', (646, 648), False, 'from mod_branin import ModBranin\n')] |
import numpy as np
x = np.array([0, 1])
w = np.array([0.5, 0.5])
b = -0.7
w * b
np.sum(w*x)
np.sum(w*x) + b | [
"numpy.array",
"numpy.sum"
] | [((24, 40), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (32, 40), True, 'import numpy as np\n'), ((46, 66), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (54, 66), True, 'import numpy as np\n'), ((85, 98), 'numpy.sum', 'np.sum', (['(w * x)'], {}), '(w * x)\n', (91, 98), True, 'import numpy as np\n'), ((98, 111), 'numpy.sum', 'np.sum', (['(w * x)'], {}), '(w * x)\n', (104, 111), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""A collection of combination methods for clustering
"""
# Author: <NAME> <<EMAIL>>
# License: BSD 2 clause
import numpy as np
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
from numpy.testing import assert_equal
from pyod.utils.utility import check_parameter
from .base import BaseAggregator
from .score_comb import majority_vote
OFFSET_FACTOR = 1000000
class ClustererEnsemble(BaseAggregator):
"""Clusterer Ensemble combines multiple base clustering estimators by
alignment. See :cite:`zhou2006clusterer` for details.
Parameters
----------
base_estimators : list or numpy array of shape (n_estimators,)
A list of base estimators. Estimators must have a `labels_`
attribute once fitted. Sklearn clustering estimators are recommended.
n_clusters : int, optional (default=8)
The number of clusters.
weights : numpy array of shape (n_estimators,)
Estimator weights. May be used after the alignment.
reference_idx : int in range [0, n_estimators-1], optional (default=0)
The ith base estimator used as the reference for label alignment.
pre_fitted : bool, optional (default=False)
Whether the base estimators are trained. If True, `fit`
process may be skipped.
Attributes
----------
labels_ : int
The predicted label of the fitted data.
"""
def __init__(self, base_estimators, n_clusters, weights=None,
reference_idx=0,
pre_fitted=False):
super(ClustererEnsemble, self).__init__(
base_estimators=base_estimators, pre_fitted=pre_fitted)
check_parameter(n_clusters, low=2, param_name='n_clusters')
self.n_clusters = n_clusters
check_parameter(reference_idx, low=0, high=self.n_base_estimators_ - 1,
include_left=True, include_right=True)
self.reference_idx = reference_idx
# set estimator weights
self._set_weights(weights)
def fit(self, X):
"""Fit estimators.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
"""
# Validate inputs X
X = check_array(X)
# initialize the score matrix to store the results
original_labels = np.zeros([X.shape[0], self.n_base_estimators_])
if self.pre_fitted:
print("Training Skipped")
else:
for clf in self.base_estimators:
clf.fit(X)
clf.fitted_ = True
for i, estimator in enumerate(self.base_estimators):
check_is_fitted(estimator, ['labels_'])
original_labels[:, i] = estimator.labels_
self.original_labels_ = original_labels
# get the aligned result
self.labels_, self.aligned_labels_ = clusterer_ensemble_scores(
original_labels,
self.n_base_estimators_,
n_clusters=self.n_clusters,
weights=self.weights,
return_results=True,
reference_idx=self.reference_idx)
def predict(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
Returns
-------
labels : numpy array of shape (n_samples,)
Class labels for each data sample.
"""
# TODO: decide whether enable predict function for clustering
raise NotImplemented("predict function is currently disabled for"
"clustering due to inconsistent behaviours.")
# Validate inputs X
X = check_array(X)
# initialize the score matrix to store the results
original_labels = np.zeros([X.shape[0], self.n_base_estimators_])
for i, estimator in enumerate(self.base_estimators):
check_is_fitted(estimator, ['labels_'])
original_labels[:, i] = estimator.predict(X)
# get the aligned result
predicted_labels = clusterer_ensemble_scores(
original_labels,
self.n_base_estimators_,
n_clusters=self.n_clusters,
weights=self.weights,
return_results=False,
reference_idx=self.reference_idx)
return predicted_labels
def predict_proba(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
Returns
-------
labels : numpy array of shape (n_samples,)
Class labels for each data sample.
"""
raise NotImplemented("predict_proba function is currently disabled for"
"clustering due to inconsistent behaviours.")
def fit_predict(self, X, y=None):
"""Fit estimator and predict on X. y is optional for unsupervised
methods.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
y : numpy array of shape (n_samples,), optional (default=None)
The ground truth of the input samples (labels).
Returns
-------
labels : numpy array of shape (n_samples,)
Cluster labels for each data sample.
"""
self.fit(X)
return self.labels_
def clusterer_ensemble_scores(original_labels, n_estimators, n_clusters,
weights=None, return_results=False,
reference_idx=0):
"""Function to align the raw clustering results from base estimators.
Different from ClustererEnsemble class, this function takes in the output
from base estimators directly without training and prediction.
Parameters
----------
original_labels : numpy array of shape (n_samples, n_estimators)
The raw output from base estimators
n_estimators : int
The number of base estimators.
n_clusters : int, optional (default=8)
The number of clusters.
weights : numpy array of shape (1, n_estimators)
Estimators weights.
return_results : bool, optional (default=False)
If True, also return the aligned label matrix.
reference_idx : int in range [0, n_estimators-1], optional (default=0)
The ith base estimator used as the reference for label alignment.
Returns
-------
aligned_labels : numpy array of shape (n_samples, n_estimators)
The aligned label results by using reference_idx estimator as the
reference.
"""
original_labels = _validate_cluster_number(original_labels, n_clusters)
alignment_mat = np.zeros([n_clusters, n_estimators])
aligned_labels = np.copy(original_labels)
for i in range(n_estimators):
inter_mat = _intersection_mat(original_labels, reference_idx, i,
n_clusters)
index_mapping = _alignment(inter_mat, n_clusters, i, aligned_labels,
OFFSET_FACTOR)
alignment_mat[:, i] = index_mapping[:, 1]
aligned_labels = aligned_labels - OFFSET_FACTOR
if weights is not None:
assert_equal(original_labels.shape[1], weights.shape[1])
# equal weights if not set
else:
weights = np.ones([1, n_estimators])
labels_by_vote = majority_vote(aligned_labels, n_classes=n_clusters,
weights=weights)
if return_results:
return labels_by_vote.astype(int), aligned_labels.astype(int)
else:
return labels_by_vote.astype(int)
def _intersection_mat(result_mat, first_idx, second_idx, n_clusters):
"""Calculate the number of overlappings of second_idx based on first_idx.
alignment_mat[i,j] represents the number of labels == j in second_idx
when labels == i in the first idx.
In other words, we should do the alignment based on the max by first
assigning the most
Parameters
----------
result_mat
first_idx
second_idx
n_clusters
Returns
-------
"""
alignment_mat = np.zeros([n_clusters, n_clusters])
for i in range(n_clusters):
for j in range(n_clusters):
i_index = np.argwhere(result_mat[:, first_idx] == i)
j_index = np.argwhere(result_mat[:, second_idx] == j)
inter_ij = np.intersect1d(i_index, j_index)
alignment_mat[i, j] = len(inter_ij)
return alignment_mat
def _alignment(inter_mat, n_clusters, second_idx, result_mat_aligned,
offset=OFFSET_FACTOR):
index_mapping = np.zeros([n_clusters, 2])
index_mapping[:, 0] = list(range(0, n_clusters))
while np.sum(inter_mat) > (-1 * n_clusters * n_clusters):
max_i, max_j = np.unravel_index(inter_mat.argmax(), inter_mat.shape)
index_mapping[max_i, 1] = max_j
inter_mat[max_i, :] = -1
inter_mat[:, max_j] = -1
# print('component 1 cluser', max_i, '==', 'component 2 cluser', max_j)
result_mat_aligned[
np.where(result_mat_aligned[:, second_idx] == max_j), second_idx] \
= max_i + offset
return index_mapping
def _validate_cluster_number(original_results, n_clusters):
"""validate all estimators form the same number of clusters as defined
in n_clusters.
Parameters
----------
original_results :
n_clusters
Returns
-------
"""
original_results = check_array(original_results)
for i in range(original_results.shape[1]):
values, counts = np.unique(original_results[:, i], return_counts=True)
if len(values) != n_clusters:
print(len(values), len(counts))
RuntimeError('cluster result does not equal to n_clusters')
return original_results
| [
"sklearn.utils.validation.check_is_fitted",
"numpy.copy",
"numpy.intersect1d",
"numpy.testing.assert_equal",
"numpy.ones",
"numpy.unique",
"numpy.where",
"pyod.utils.utility.check_parameter",
"numpy.sum",
"numpy.zeros",
"numpy.argwhere",
"sklearn.utils.check_array"
] | [((6861, 6897), 'numpy.zeros', 'np.zeros', (['[n_clusters, n_estimators]'], {}), '([n_clusters, n_estimators])\n', (6869, 6897), True, 'import numpy as np\n'), ((6919, 6943), 'numpy.copy', 'np.copy', (['original_labels'], {}), '(original_labels)\n', (6926, 6943), True, 'import numpy as np\n'), ((8288, 8322), 'numpy.zeros', 'np.zeros', (['[n_clusters, n_clusters]'], {}), '([n_clusters, n_clusters])\n', (8296, 8322), True, 'import numpy as np\n'), ((8782, 8807), 'numpy.zeros', 'np.zeros', (['[n_clusters, 2]'], {}), '([n_clusters, 2])\n', (8790, 8807), True, 'import numpy as np\n'), ((9632, 9661), 'sklearn.utils.check_array', 'check_array', (['original_results'], {}), '(original_results)\n', (9643, 9661), False, 'from sklearn.utils import check_array\n'), ((1701, 1760), 'pyod.utils.utility.check_parameter', 'check_parameter', (['n_clusters'], {'low': '(2)', 'param_name': '"""n_clusters"""'}), "(n_clusters, low=2, param_name='n_clusters')\n", (1716, 1760), False, 'from pyod.utils.utility import check_parameter\n'), ((1807, 1921), 'pyod.utils.utility.check_parameter', 'check_parameter', (['reference_idx'], {'low': '(0)', 'high': '(self.n_base_estimators_ - 1)', 'include_left': '(True)', 'include_right': '(True)'}), '(reference_idx, low=0, high=self.n_base_estimators_ - 1,\n include_left=True, include_right=True)\n', (1822, 1921), False, 'from pyod.utils.utility import check_parameter\n'), ((2283, 2297), 'sklearn.utils.check_array', 'check_array', (['X'], {}), '(X)\n', (2294, 2297), False, 'from sklearn.utils import check_array\n'), ((2384, 2431), 'numpy.zeros', 'np.zeros', (['[X.shape[0], self.n_base_estimators_]'], {}), '([X.shape[0], self.n_base_estimators_])\n', (2392, 2431), True, 'import numpy as np\n'), ((3778, 3792), 'sklearn.utils.check_array', 'check_array', (['X'], {}), '(X)\n', (3789, 3792), False, 'from sklearn.utils import check_array\n'), ((3879, 3926), 'numpy.zeros', 'np.zeros', (['[X.shape[0], self.n_base_estimators_]'], {}), '([X.shape[0], self.n_base_estimators_])\n', (3887, 3926), True, 'import numpy as np\n'), ((7369, 7425), 'numpy.testing.assert_equal', 'assert_equal', (['original_labels.shape[1]', 'weights.shape[1]'], {}), '(original_labels.shape[1], weights.shape[1])\n', (7381, 7425), False, 'from numpy.testing import assert_equal\n'), ((7486, 7512), 'numpy.ones', 'np.ones', (['[1, n_estimators]'], {}), '([1, n_estimators])\n', (7493, 7512), True, 'import numpy as np\n'), ((8872, 8889), 'numpy.sum', 'np.sum', (['inter_mat'], {}), '(inter_mat)\n', (8878, 8889), True, 'import numpy as np\n'), ((9736, 9789), 'numpy.unique', 'np.unique', (['original_results[:, i]'], {'return_counts': '(True)'}), '(original_results[:, i], return_counts=True)\n', (9745, 9789), True, 'import numpy as np\n'), ((2695, 2734), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['estimator', "['labels_']"], {}), "(estimator, ['labels_'])\n", (2710, 2734), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((4001, 4040), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['estimator', "['labels_']"], {}), "(estimator, ['labels_'])\n", (4016, 4040), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((8413, 8455), 'numpy.argwhere', 'np.argwhere', (['(result_mat[:, first_idx] == i)'], {}), '(result_mat[:, first_idx] == i)\n', (8424, 8455), True, 'import numpy as np\n'), ((8478, 8521), 'numpy.argwhere', 'np.argwhere', (['(result_mat[:, second_idx] == j)'], {}), '(result_mat[:, second_idx] == j)\n', (8489, 8521), True, 'import numpy as np\n'), ((8545, 8577), 'numpy.intersect1d', 'np.intersect1d', (['i_index', 'j_index'], {}), '(i_index, j_index)\n', (8559, 8577), True, 'import numpy as np\n'), ((9228, 9280), 'numpy.where', 'np.where', (['(result_mat_aligned[:, second_idx] == max_j)'], {}), '(result_mat_aligned[:, second_idx] == max_j)\n', (9236, 9280), True, 'import numpy as np\n')] |
from openmdao.api import ExplicitComponent
import numpy as np
class SellarDis1(ExplicitComponent):
"""
Component containing Discipline 1.
"""
def __init__(self, derivative_method='full_analytic', **kwargs):
super(SellarDis1, self).__init__(**kwargs)
self.derivative_method = derivative_method
def setup(self):
# Attributes to count number of compute and compute_partials performed
self.num_compute = 0
self.num_compute_partials = 0
# Global Design Variable
self.add_input('z', val=np.zeros(2))
# Local Design Variable
self.add_input('x', val=0.)
# Coupling parameter
self.add_input('y2', val=0.)
# Coupling output
self.add_output('y1', val=0.)
if self.derivative_method == 'full_analytic':
self.declare_partials('*', '*', method='exact')
else:
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
"""
Evaluates the equation
y1 = z1**2 + z2 + x - 0.2*y2
"""
z1 = inputs['z'][0]
z2 = inputs['z'][1]
x = inputs['x']
y2 = inputs['y2']
outputs['y1'] = z1 ** 2 + z2 + x - 0.2 * y2
self.num_compute += 1
def compute_partials(self, inputs, partials):
"""
Jacobian for Sellar discipline 1.
"""
partials['y1', 'y2'] = -0.2
partials['y1', 'z'] = np.array([[2.0 * inputs['z'][0], 1.0]])
partials['y1', 'x'] = 1.0
self.num_compute_partials += 1
class SellarDis2(ExplicitComponent):
"""
Component containing Discipline 2.
"""
def __init__(self, derivative_method='full_analytic', **kwargs):
super(SellarDis2, self).__init__(**kwargs)
self.derivative_method = derivative_method
def setup(self):
# Attributes to count number of compute and compute_partials performed
self.num_compute = 0
self.num_compute_partials = 0
# Global Design Variable
self.add_input('z', val=np.zeros(2))
# Coupling parameter
self.add_input('y1', val=0.)
# Coupling output
self.add_output('y2', val=0.)
if self.derivative_method == 'full_analytic':
self.declare_partials('*', '*', method='exact')
else:
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
"""
Evaluates the equation
y2 = y1**(.5) + z1 + z2
"""
z1 = inputs['z'][0]
z2 = inputs['z'][1]
y1 = inputs['y1']
# Note: this may cause some issues. However, y1 is constrained to be
# above 3.16, so lets just let it converge, and the optimizer will
# throw it out
if y1.real < 0.0:
y1 *= -1
outputs['y2'] = y1 ** .5 + z1 + z2
def compute_partials(self, inputs, J):
"""
Jacobian for Sellar discipline 2.
"""
y1 = inputs['y1']
if y1.real < 0.0:
y1 *= -1
if y1.real < 1e-8:
y1 = 1e-8
J['y2', 'y1'] = .5 * y1 ** -.5
J['y2', 'z'] = np.array([[1.0, 1.0]])
class SellarConComp1(ExplicitComponent):
"""
Component containing the constraint component 1 of the Sellar problem.
"""
def __init__(self, derivative_method='full_analytic', **kwargs):
super(SellarConComp1, self).__init__(**kwargs)
self.derivative_method = derivative_method
def setup(self):
# Parameter
self.add_input('y1', val=0.)
# Constraint
self.add_output('con1', val=0.)
if self.derivative_method == 'full_analytic':
self.declare_partials('*', '*', method='exact')
else:
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
"""
Evaluates the equation
con1 = 3.16 - y1
"""
y1 = inputs['y1']
outputs['con1'] = 3.16 - y1
def compute_partials(self, inputs, J):
"""
Jacobian for component 1 of the Sellar problem.
"""
J['con1', 'y1'] = -1.
class SellarConComp2(ExplicitComponent):
"""
Component containing the constraint component 2 of the Sellar problem.
"""
def __init__(self, derivative_method='full_analytic', **kwargs):
super(SellarConComp2, self).__init__(**kwargs)
self.derivative_method = derivative_method
def setup(self):
# Parameter
self.add_input('y2', val=0.)
# Constraint
self.add_output('con2', val=0.)
if self.derivative_method == 'full_analytic':
self.declare_partials('*', '*', method='exact')
else:
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
"""
Evaluates the equation
con2 = y2 - 24.0
"""
y2 = inputs['y2']
outputs['con2'] = y2 - 24.0
def compute_partials(self, inputs, J):
"""
Jacobian for component 2 of the Sellar problem.
"""
J['con2', 'y2'] = 1.
class SellarConsistencyConstraintY2(ExplicitComponent):
"""ExplicitComponent that computes the y2 consistency constraint for IDF."""
def __init__(self, derivative_method='full_analytic', **kwargs):
super(SellarConsistencyConstraintY2, self).__init__(**kwargs)
self.derivative_method = derivative_method
def setup(self):
# y2 consistency design variable
self.add_input('y2_t', val=0.)
# y2
self.add_input('y2', val=0.)
# y2 consistency constraint
self.add_output('y2_c_constr', val=0.)
if self.derivative_method == ('derivative_free' or 'full_analytic'):
self.declare_partials('*', '*', method='exact')
else:
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
"""Evaluates the equation
y2_c_constr = y2_t - y2"""
y2_t = inputs['y2_t']
y2 = inputs['y2']
outputs['y2_c_constr'] = y2_t - y2
def compute_partials(self, inputs, partials):
""" Jacobian for y2 consistency constraint."""
partials['y2_c_constr', 'y2_t'] = 1.
partials['y2_c_constr', 'y2'] = -1.
class SellarObjective(ExplicitComponent):
"""
Component containing the objective of the Sellar problem.
"""
def __init__(self, derivative_method='full_analytic', **kwargs):
super(SellarObjective, self).__init__(**kwargs)
self.derivative_method = derivative_method
def setup(self):
# Global Design Variable
self.add_input('z', val=np.zeros(2))
# Local Design Variable
self.add_input('x', val=0.)
# Coupling parameter
self.add_input('y1', val=0.)
self.add_input('y2', val=0.)
# Output (objective)
self.add_output('obj', val=0.)
if self.derivative_method == 'full_analytic':
self.declare_partials('*', '*', method='exact')
else:
self.declare_partials('*', '*', method='fd')
self.obj_hist = {}
self.num_iter = 0
# First index is one because of init
self.major_iterations = [1]
def compute(self, inputs, outputs):
"""
Evaluates the equation
obj = x**2 + z2 + y1 + exp(-y2)
"""
z2 = inputs['z'][1]
x = inputs['x']
y1 = inputs['y1']
y2 = inputs['y2']
outputs['obj'] = x ** 2. + z2 + y1 + np.exp(-y2)
self.obj_hist[self.num_iter] = outputs['obj'][0]
self.num_iter += 1
def compute_partials(self, inputs, J):
"""
Jacobian for objective of the Sellar problem.
"""
J['obj', 'z'] = np.array([0., 1.])
J['obj', 'x'] = 2. * inputs['x']
J['obj', 'y1'] = 1.
J['obj', 'y2'] = -np.exp(-inputs['y2'])
self.major_iterations.append(self.num_iter)
| [
"numpy.exp",
"numpy.array",
"numpy.zeros"
] | [((1468, 1507), 'numpy.array', 'np.array', (["[[2.0 * inputs['z'][0], 1.0]]"], {}), "([[2.0 * inputs['z'][0], 1.0]])\n", (1476, 1507), True, 'import numpy as np\n'), ((3185, 3207), 'numpy.array', 'np.array', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (3193, 3207), True, 'import numpy as np\n'), ((7835, 7855), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (7843, 7855), True, 'import numpy as np\n'), ((7593, 7604), 'numpy.exp', 'np.exp', (['(-y2)'], {}), '(-y2)\n', (7599, 7604), True, 'import numpy as np\n'), ((7949, 7970), 'numpy.exp', 'np.exp', (["(-inputs['y2'])"], {}), "(-inputs['y2'])\n", (7955, 7970), True, 'import numpy as np\n'), ((562, 573), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (570, 573), True, 'import numpy as np\n'), ((2081, 2092), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2089, 2092), True, 'import numpy as np\n'), ((6729, 6740), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (6737, 6740), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from scipy.stats import multinomial
from ..utils.array import one_hot
from .categorical import CategoricalDist
if tf.__version__ >= '2.0':
tf.random.set_seed(11)
else:
tf.set_random_seed(11)
rnd = np.random.RandomState(13)
x_np = rnd.randn(7, 5)
y_np = rnd.randint(3, size=7)
y_onehot_np = one_hot(y_np, 3)
x = keras.Input([5], dtype='float32')
y = keras.Input([1], dtype='int32')
y_onehot = keras.Input([3], dtype='float32')
logits = keras.layers.Dense(3)(x)
proba = keras.layers.Lambda(K.softmax)(logits)
dist = CategoricalDist(logits)
sample = keras.layers.Lambda(lambda args: dist.sample())(logits)
# test against scipy implementation
proba_np = keras.Model(x, proba).predict(x_np)
dists_np = [multinomial(n=1, p=p) for p in proba_np] # cannot broadcast
def test_sample():
# if tf.__version__ >= '2.0':
# expected = one_hot(np.array([1, 1, 1, 2, 1, 0, 2]), n=3)
# else:
# expected = one_hot(np.array([1, 1, 1, 2, 1, 0, 2]), n=3)
actual = keras.Model(x, sample).predict(x_np)
assert actual.shape == (7, 3)
np.testing.assert_array_almost_equal(actual.sum(axis=1), np.ones(7))
# np.testing.assert_array_almost_equal(actual, expected)
def test_log_proba():
expected = np.stack([d.logpmf(a) for d, a in zip(dists_np, y_onehot_np)])
out = keras.layers.Lambda(lambda args: dist.log_proba(y))(logits)
actual = keras.Model([x, y], out).predict([x_np, y_np])
np.testing.assert_array_almost_equal(actual, expected)
def test_log_proba_onehot():
expected = np.stack([d.logpmf(a) for d, a in zip(dists_np, y_onehot_np)])
out = keras.layers.Lambda(lambda args: dist.log_proba(y_onehot))(logits)
actual = keras.Model([x, y_onehot], out).predict([x_np, y_onehot_np])
np.testing.assert_array_almost_equal(actual, expected)
def test_entropy():
expected = np.stack([d.entropy() for d in dists_np])
out = keras.layers.Lambda(lambda args: dist.entropy())(logits)
actual = keras.Model(x, out).predict(x_np)
np.testing.assert_array_almost_equal(actual, expected)
def test_cross_entropy():
# TODO: test this without implementing the same thing in numpy
pass
def test_kl_divergence():
# TODO: test this without implementing the same thing in numpy
pass
def test_proba_ratio():
# TODO: test this without implementing the same thing in numpy
pass
| [
"numpy.testing.assert_array_almost_equal",
"tensorflow.random.set_seed",
"numpy.ones",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Input",
"tensorflow.keras.Model",
"scipy.stats.multinomial",
"tensorflow.set_random_seed",
"numpy.random.RandomState"
] | [((322, 347), 'numpy.random.RandomState', 'np.random.RandomState', (['(13)'], {}), '(13)\n', (343, 347), True, 'import numpy as np\n'), ((438, 471), 'tensorflow.keras.Input', 'keras.Input', (['[5]'], {'dtype': '"""float32"""'}), "([5], dtype='float32')\n", (449, 471), False, 'from tensorflow import keras\n'), ((476, 507), 'tensorflow.keras.Input', 'keras.Input', (['[1]'], {'dtype': '"""int32"""'}), "([1], dtype='int32')\n", (487, 507), False, 'from tensorflow import keras\n'), ((519, 552), 'tensorflow.keras.Input', 'keras.Input', (['[3]'], {'dtype': '"""float32"""'}), "([3], dtype='float32')\n", (530, 552), False, 'from tensorflow import keras\n'), ((260, 282), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(11)'], {}), '(11)\n', (278, 282), True, 'import tensorflow as tf\n'), ((293, 315), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(11)'], {}), '(11)\n', (311, 315), True, 'import tensorflow as tf\n'), ((563, 584), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(3)'], {}), '(3)\n', (581, 584), False, 'from tensorflow import keras\n'), ((596, 626), 'tensorflow.keras.layers.Lambda', 'keras.layers.Lambda', (['K.softmax'], {}), '(K.softmax)\n', (615, 626), False, 'from tensorflow import keras\n'), ((828, 849), 'scipy.stats.multinomial', 'multinomial', ([], {'n': '(1)', 'p': 'p'}), '(n=1, p=p)\n', (839, 849), False, 'from scipy.stats import multinomial\n'), ((1548, 1602), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (1584, 1602), True, 'import numpy as np\n'), ((1869, 1923), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (1905, 1923), True, 'import numpy as np\n'), ((2123, 2177), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (2159, 2177), True, 'import numpy as np\n'), ((780, 801), 'tensorflow.keras.Model', 'keras.Model', (['x', 'proba'], {}), '(x, proba)\n', (791, 801), False, 'from tensorflow import keras\n'), ((1237, 1247), 'numpy.ones', 'np.ones', (['(7)'], {}), '(7)\n', (1244, 1247), True, 'import numpy as np\n'), ((1104, 1126), 'tensorflow.keras.Model', 'keras.Model', (['x', 'sample'], {}), '(x, sample)\n', (1115, 1126), False, 'from tensorflow import keras\n'), ((1496, 1520), 'tensorflow.keras.Model', 'keras.Model', (['[x, y]', 'out'], {}), '([x, y], out)\n', (1507, 1520), False, 'from tensorflow import keras\n'), ((1803, 1834), 'tensorflow.keras.Model', 'keras.Model', (['[x, y_onehot]', 'out'], {}), '([x, y_onehot], out)\n', (1814, 1834), False, 'from tensorflow import keras\n'), ((2084, 2103), 'tensorflow.keras.Model', 'keras.Model', (['x', 'out'], {}), '(x, out)\n', (2095, 2103), False, 'from tensorflow import keras\n')] |
#!/usr/bin/python
import sys
import textadapter
import unittest
from .generate import (generate_dataset, IntIter,
MissingValuesIter, FixedWidthIter)
import numpy as np
from numpy.testing import assert_array_equal
import gzip
import os
import io
from six import StringIO
class TestTextAdapter(unittest.TestCase):
num_records = 100000
def assert_equality(self, left, right):
try:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
self.assert_array_equal(left, right)
else:
self.assertTrue(left == right)
except AssertionError:
raise AssertionError('FAIL: {0} != {1}'.format(left, right))
# Basic parsing tests
def test_string_parsing(self):
data = StringIO('1,2,3\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S5', 1:'S5', 2:'S5'})
assert_array_equal(adapter[:], np.array([('1', '2', '3')], dtype='S5,S5,S5'))
data = io.StringIO(u'1,2,3\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S5', 1:'S5', 2:'S5'})
assert_array_equal(adapter[:], np.array([('1', '2', '3')], dtype='S5,S5,S5'))
data = io.BytesIO(b'1,2,3\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S5', 1:'S5', 2:'S5'})
assert_array_equal(adapter[:], np.array([('1', '2', '3')], dtype='S5,S5,S5'))
# basic utf_8 tests
def test_utf8_parsing(self):
# test single byte character
data = io.BytesIO(u'1,2,\u0033'.encode('utf_8'))
adapter = textadapter.text_adapter(data, field_names=False)
expected = np.array([('1', '2', '3')], dtype='u8,u8,u8')
assert_array_equal(adapter[:], expected)
# test multibyte character
data = io.BytesIO(u'1,2,\u2092'.encode('utf_8'))
adapter = textadapter.text_adapter(data, field_names=False)
expected = np.array([('1', '2', u'\u2092')], dtype='u8,u8,O')
assert_array_equal(adapter[:], expected)
def test_no_whitespace_stripping(self):
data = StringIO('1 ,2 ,3 \n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S3', 1:'S3', 2:'S3'})
assert_array_equal(adapter[:], np.array([('1 ', '2 ', '3 ')], dtype='S3,S3,S3'))
data = StringIO(' 1, 2, 3\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S3', 1:'S3', 2:'S3'})
assert_array_equal(adapter[:], np.array([(' 1', ' 2', ' 3')], dtype='S3,S3,S3'))
data = StringIO(' 1 , 2 , 3 \n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S5', 1:'S5', 2:'S5'})
assert_array_equal(adapter[:], np.array([(' 1 ', ' 2 ', ' 3 ')], dtype='S5,S5,S5'))
data = StringIO('\t1\t,\t2\t,\t3\t\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S3', 1:'S3', 2:'S3'})
assert_array_equal(adapter[:], np.array([('\t1\t', '\t2\t', '\t3\t')], dtype='S3,S3,S3'))
def test_quoted_whitespace(self):
data = StringIO('"1 ","2 ","3 "\n')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'S3', 1:'S3', 2:'S3'})
assert_array_equal(adapter[:], np.array([('1 ', '2 ', '3 ')], dtype='S3,S3,S3'))
data = StringIO('"\t1\t"\t"\t2\t"\t"\t3\t"\n')
adapter = textadapter.text_adapter(data, field_names=False, delimiter='\t')
adapter.set_field_types({0:'S3', 1:'S3', 2:'S3'})
assert_array_equal(adapter[:], np.array([('\t1\t', '\t2\t', '\t3\t')], dtype='S3,S3,S3'))
def test_fixed_simple(self):
# TODO: fix this test on 32-bit and on Windows
if tuple.__itemsize__ == 4:
# This test does not work on 32-bit, so we skip it
return
if sys.platform == 'win32':
# This test does not work on Windows
return
data = StringIO(" 1 2 3\n 4 5 67\n890123 4")
adapter = textadapter.FixedWidthTextAdapter(data, 3, infer_types=False, field_names=False)
adapter.set_field_types({0:'i', 1:'i', 2:'i'})
control = np.array([(1, 2, 3), (4, 5, 67), (890, 123, 4)], dtype='i,i,i')
assert_array_equal(adapter[:], control)
def test_spaces_around_numeric_values(self):
data = StringIO(' 1 , -2 , 3.3 , -4.4 \n 5 , -6 , 7.7 , -8.8 ')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'u4', 1:'i8', 2:'f4', 3:'f8'})
array = adapter[:]
control = np.array([(1,-2,3.3,-4.4), (5,-6,7.7,-8.8)], dtype='u4,i8,f4,f8')
assert_array_equal(array, control)
def test_slicing(self):
data = StringIO()
generate_dataset(data, IntIter(), ',', self.num_records)
adapter = textadapter.text_adapter(data, field_names=False)
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
assert_array_equal(adapter[0], np.array([(0, 1, 2, 3, 4)], dtype='u4,u4,u4,u4,u4'))
expected_values = [((self.num_records-1)*5)+x for x in range(5)]
self.assert_equality(adapter[self.num_records-1].item(), tuple(expected_values))
#adapter.create_index()
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
self.assert_equality(adapter['f0'][0].item(), (0,))
self.assert_equality(adapter['f4'][1].item(), (9,))
#self.assert_equality(adapter[self.num_records-1]['f4'], (self.num_records*5)-1)
array = adapter[:]
record = [x for x in range(0, 5)]
self.assert_equality(array.size, self.num_records)
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
array = adapter[:-1]
record = [x for x in range(0, 5)]
self.assert_equality(array.size, self.num_records-1)
for i in range(0, self.num_records-1):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
array = adapter[0:10]
self.assert_equality(array.size, 10)
record = [x for x in range(0, 5)]
for i in range(0, 10):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
array = adapter[1:]
self.assert_equality(array.size, self.num_records-1)
record = [x for x in range(5, 10)]
for i in range(0, self.num_records-1):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
array = adapter[0:10:2]
self.assert_equality(array.size, 5)
record = [x for x in range(0, 5)]
for i in range(0, 5):
self.assert_equality(array[i].item(), tuple(record))
record = [x+10 for x in record]
array = adapter[['f0', 'f4']][:]
record = [0, 4]
self.assert_equality(array.size, self.num_records)
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
adapter.field_filter = [0, 'f4']
array = adapter[:]
record = [0, 4]
self.assert_equality(array.size, self.num_records)
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
adapter.field_filter = None
array = adapter[:]
record = [0, 1, 2, 3, 4]
self.assert_equality(array.size, self.num_records)
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
try:
adapter[self.num_records]
except textadapter.AdapterIndexError:
pass
else:
self.fail('AdaperIndexError not thrown')
try:
adapter[0:self.num_records+1]
except textadapter.AdapterIndexError:
pass
else:
self.fail('AdaperIndexError not thrown')
def test_converters(self):
data = StringIO()
generate_dataset(data, IntIter(), ',', self.num_records)
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False)
#adapter.set_field_types({0:'u4', 1:'u4', 2:'u4', 3:'u4', 4:'u4'})
def increment(input_str):
return int(input_str) + 1
def double(input_str):
return int(input_str) + int(input_str)
if sys.platform == 'win32' and tuple.__itemsize__ == 8:
# TODO: there problems below here 64-bit Windows, I get
# OverflowError: can't convert negative value to unigned PY_LONG_LONG
return
adapter.set_converter(0, increment)
adapter.set_converter('f1', double)
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [1, 2, 2, 3, 4]
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record[0] += 5
record[1] = (10 * (i+1)) + 2
record[2] += 5
record[3] += 5
record[4] += 5
def test_missing_fill_values(self):
data = StringIO()
generate_dataset(data, MissingValuesIter(), ',', self.num_records)
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=False)
adapter.set_field_types({'f0':'u4', 1:'u4', 2:'u4', 3:'u4', 'f4':'u4'})
adapter.set_missing_values({0:['NA', 'NaN'], 'f4':['xx','inf']})
adapter.set_fill_values({0:99, 4:999})
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [x for x in range(0, 5)]
for i in range(0, self.num_records):
if i % 4 == 0 or i % 4 == 1:
record[0] = 99
record[4] = 999
else:
record[0] = record[1] - 1
record[4] = record[3] + 1
self.assert_equality(array[i].item(), tuple(record))
record[1] += 5
record[2] += 5
record[3] += 5
data.seek(0)
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=True)
adapter.set_missing_values({0:['NA', 'NaN'], 4:['xx','inf']})
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [x for x in range(0, 5)]
for i in range(0, self.num_records):
if i % 4 == 0 or i % 4 == 1:
record[0] = 0
record[4] = 0
else:
record[0] = record[1] - 1
record[4] = record[3] + 1
self.assert_equality(array[i].item(), tuple(record))
record[1] += 5
record[2] += 5
record[3] += 5
# Test missing field
data = StringIO('1,2,3\n4,5\n7,8,9')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.field_types = {0:'O', 1:'O', 2:'O'}
adapter.set_fill_values({0:np.nan, 1:np.nan, 2:np.nan})
array = adapter[:]
# NumPy assert_array_equal no longer supports mixed O/nan types
expected = [('1','2','3'),('4','5',np.nan),('7','8','9')]
self.assert_equality(array.tolist(), expected)
def test_fixed_width(self):
data = StringIO()
generate_dataset(data, FixedWidthIter(), '', self.num_records)
adapter = textadapter.FixedWidthTextAdapter(data, [2,3,4,5,6], field_names=False, infer_types=False)
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [0, 0, 0, 0, 0]
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+1 for x in record]
if record[0] == 100:
record[0] = 0
if record[1] == 1000:
record[1] = 0
if record[2] == 10000:
record[2] = 0
if record[3] == 100000:
record[3] = 0
if record[4] == 1000000:
record[4] = 0
# Test skipping blank lines
data = StringIO(' 1 2 3\n\n 4 5 6')
adapter = textadapter.text_adapter(data, parser='fixed_width',
field_widths=[2,2,2], field_names=False)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3), (4,5,6)],
dtype=[('f0','<u8'),('f1','<u8'),('f2','<u8')]))
# Test comment lines
data = StringIO('# 1 2 3\n 1 2 3\n# foo\n 4 5 6')
adapter = textadapter.text_adapter(data, parser='fixed_width',
field_widths=[2,2,2], field_names=False)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3), (4,5,6)],
dtype=[('f0','<u8'),('f1','<u8'),('f2','<u8')]))
# Test field names line
data = StringIO(' a b c\n 1 2 3')
adapter = textadapter.text_adapter(data, parser='fixed_width',
field_widths=[2,2,2], field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3)],
dtype=[('a','<u8'),('b','<u8'),('c','<u8')]))
# Test field names line as comment line
data = StringIO('# a b c\n 1 2 3')
adapter = textadapter.text_adapter(data, parser='fixed_width',
field_widths=[2,2,2], field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3)],
dtype=[('a','<u8'),('b','<u8'),('c','<u8')]))
# Test incomplete field names line
data = StringIO(' a\n 1 2 3')
adapter = textadapter.text_adapter(data, parser='fixed_width',
field_widths=[2,2,2], field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3)],
dtype=[('a','<u8'),('f1','<u8'),('f2','<u8')]))
def test_regex(self):
data = StringIO()
generate_dataset(data, IntIter(), ',', self.num_records)
adapter = textadapter.RegexTextAdapter(data, '([0-9]*),([0-9]*),([0-9]*),([0-9]*),([0-9]*)\n', field_names=False, infer_types=False)
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [x for x in range(0, 5)]
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record = [x+5 for x in record]
# Test skipping blank lines
data = StringIO('1 2 3\n\n4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9]) ([0-9]) ([0-9])', field_names=False)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3), (4,5,6)],
dtype=[('f0','<u8'),('f1','<u8'),('f2','<u8')]))
# Test comment lines
data = StringIO('#1 2 3\n1 2 3\n# foo\n4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9]) ([0-9]) ([0-9])', field_names=False)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3), (4,5,6)],
dtype=[('f0','<u8'),('f1','<u8'),('f2','<u8')]))
# Test field names line
data = StringIO('a b c\n4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9]) ([0-9]) ([0-9])', field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(4,5,6)],
dtype=[('a','<u8'),('b','<u8'),('c','<u8')]))
# Test field names line as comment line
data = StringIO('#a b c\n4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9]) ([0-9]) ([0-9])', field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(4,5,6)],
dtype=[('a','<u8'),('b','<u8'),('c','<u8')]))
# Test incomplete field names line
data = StringIO('a b\n4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9]) ([0-9]) ([0-9])', field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([(4,5,6)],
dtype=[('a','<u8'),('b','<u8'),('f2','<u8')]))
# Test field names line that doesn't match regex
data = StringIO('a b c\n1 2 3 4 5 6')
adapter = textadapter.text_adapter(data, parser='regex',
regex_string='([0-9\s]+) ([0-9\s]+) ([0-9\s]+)', field_names=True)
array = adapter[:]
assert_array_equal(array, np.array([('1 2', '3 4', '5 6')],
dtype=[('a','O'),('b','O'),('c','O')]))
def test_index(self):
if sys.platform == 'win32':
# TODO: this test fails on Windows because of file lock problems
return
num_records = 100000
expected_values = [((num_records-1)*5) + x for x in range(5)]
data = StringIO()
generate_dataset(data, IntIter(), ',', num_records)
# test explicit index building
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=False)
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
adapter.create_index()
self.assert_equality(adapter[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
# test implicitly creating disk index on the fly
if os.path.exists('test.idx'):
os.remove('test.idx')
data.seek(0)
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=False, index_name='test.idx')
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
adapter.to_array()
self.assert_equality(adapter[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
adapter.close()
# test loading disk index
data.seek(0)
adapter2 = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=False, index_name='test.idx')
adapter2.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
self.assert_equality(adapter2[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter2[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter2[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter2[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter2[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter2[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter2[-1].item(), tuple(expected_values))
adapter.close()
os.remove('test.idx')
def test_gzip_index(self):
num_records = 1000000
data = StringIO()
generate_dataset(data, IntIter(), ',', num_records)
#if sys.version > '3':
if True:
dataz = io.BytesIO()
else:
dataz = StringIO()
gzip_output = gzip.GzipFile(fileobj=dataz, mode='wb')
#if sys.version > '3':
if True:
gzip_output.write(data.getvalue().encode('utf8'))
else:
gzip_output.write(data.getvalue())
gzip_output.close()
dataz.seek(0)
# test explicit index building
adapter = textadapter.text_adapter(dataz, compression='gzip', delimiter=',', field_names=False, infer_types=False)
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
adapter.create_index()
self.assert_equality(adapter[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter[100000].item(), tuple([(100000*5) + x for x in range(5)]))
self.assert_equality(adapter[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
# test 'trouble' records that have caused crashes in the past
self.assert_equality(adapter[290000].item(), tuple([(290000*5) + x for x in range(5)]))
self.assert_equality(adapter[818000].item(), tuple([(818000*5) + x for x in range(5)]))
# test implicitly creating disk index on the fly
# JNB: not implemented yet
'''adapter = textadapter.text_adapter(dataz, compression='gzip', delimiter=',', field_names=False, infer_types=False, indexing=True, index_filename='test.idx')
adapter.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
adapter.to_array()
self.assert_equality(adapter[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter[100000].item(), tuple([(100000*5) + x for x in range(5)]))
self.assert_equality(adapter[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
# test 'trouble' records that have caused crashes in the past
self.assert_equality(adapter[290000].item(), tuple([(290000*5) + x for x in range(5)]))
self.assert_equality(adapter[818000].item(), tuple([(818000*5) + x for x in range(5)]))
# test loading disk index
adapter2 = textadapter.text_adapter(dataz, compression='gzip', delimiter=',', field_names=False, infer_types=False, indexing=True, index_filename='test.idx')
adapter2.set_field_types({0:'u4',1:'u4',2:'u4',3:'u4',4:'u4'})
self.assert_equality(adapter2[0].item(), tuple([(0*5) + x for x in range(5)]))
self.assert_equality(adapter2[10].item(), tuple([(10*5) + x for x in range(5)]))
self.assert_equality(adapter2[100].item(), tuple([(100*5) + x for x in range(5)]))
self.assert_equality(adapter2[1000].item(), tuple([(1000*5) + x for x in range(5)]))
self.assert_equality(adapter2[10000].item(), tuple([(10000*5) + x for x in range(5)]))
self.assert_equality(adapter2[100000].item(), tuple([(100000*5) + x for x in range(5)]))
self.assert_equality(adapter2[num_records - 1].item(), tuple([((num_records - 1)*5) + x for x in range(5)]))
#self.assert_equality(adapter[-1].item(), tuple(expected_values))
# test 'trouble' records that have caused crashes in the past
self.assert_equality(adapter2[290000].item(), tuple([(290000*5) + x for x in range(5)]))
self.assert_equality(adapter2[818000].item(), tuple([(818000*5) + x for x in range(5)]))
os.remove('test.idx')'''
def test_header_footer(self):
data = StringIO('0,1,2,3,4\n5,6,7,8,9\n10,11,12,13,14')
adapter = textadapter.text_adapter(data, header=1, field_names=False)
adapter.field_types = dict(zip(range(5), ['u4']*5))
assert_array_equal(adapter[:], np.array([(5,6,7,8,9), (10,11,12,13,14)],
dtype='u4,u4,u4,u4,u4'))
data.seek(0)
adapter = textadapter.text_adapter(data, header=2, field_names=False)
adapter.field_types = dict(zip(range(5), ['u4']*5))
assert_array_equal(adapter[:], np.array([(10,11,12,13,14)],
dtype='u4,u4,u4,u4,u4'))
data.seek(0)
adapter = textadapter.text_adapter(data, header=1, field_names=True)
adapter.field_types = dict(zip(range(5), ['u4']*5))
assert_array_equal(adapter[:], np.array([(10,11,12,13,14)],
dtype=[('5','u4'),('6','u4'),('7','u4'),('8','u4'),('9','u4')]))
def test_delimiter(self):
data = StringIO('1,2,3\n')
adapter = textadapter.text_adapter(data, field_names=False)
self.assert_equality(adapter[0].item(), (1,2,3))
data = StringIO('1 2 3\n')
adapter = textadapter.text_adapter(data, field_names=False)
self.assert_equality(adapter[0].item(), (1,2,3))
data = StringIO('1\t2\t3\n')
adapter = textadapter.text_adapter(data, field_names=False)
self.assert_equality(adapter[0].item(), (1,2,3))
data = StringIO('1x2x3\n')
adapter = textadapter.text_adapter(data, field_names=False)
self.assert_equality(adapter[0].item(), (1,2,3))
# Test no delimiter in single field csv data
data = StringIO('aaa\nbbb\nccc')
array = textadapter.text_adapter(data, field_names=False, delimiter=None)[:]
assert_array_equal(array, np.array([('aaa',), ('bbb',), ('ccc',)], dtype=[('f0', 'O')]))
def test_auto_type_inference(self):
data = StringIO('0,1,2,3,4\n5.5,6,7,8,9\n10,11,12,13,14a\n15,16,xxx,18,19')
adapter = textadapter.text_adapter(data, field_names=False, infer_types=True)
array = adapter.to_array()
self.assert_equality(array.dtype.fields['f0'][0], np.dtype('float64'))
self.assert_equality(array.dtype.fields['f1'][0], np.dtype('uint64'))
self.assert_equality(array.dtype.fields['f2'][0], np.dtype('O'))
self.assert_equality(array.dtype.fields['f3'][0], np.dtype('uint64'))
self.assert_equality(array.dtype.fields['f4'][0], np.dtype('O'))
data = StringIO('0,1,2,3,4\n5.5,6,7,8,9\n10,11,12,13,14a\n15,16,xxx,18,19')
adapter = textadapter.text_adapter(data, field_names=False, infer_types=True)
self.assert_equality(adapter[0].dtype.fields['f0'][0], np.dtype('uint64'))
self.assert_equality(adapter[1:3].dtype.fields['f0'][0], np.dtype('float64'))
self.assert_equality(adapter[3].dtype.fields['f4'][0], np.dtype('uint64'))
self.assert_equality(adapter[:].dtype.fields['f3'][0], np.dtype('uint64'))
self.assert_equality(adapter[-1].dtype.fields['f2'][0], np.dtype('O'))
self.assert_equality(adapter[2].dtype.fields['f4'][0], np.dtype('O'))
def test_64bit_ints(self):
data = StringIO(str((2**63)-1) + ',' + str(((2**63)-1)*-1) + ',' + str((2**64)-1))
adapter = textadapter.text_adapter(data, delimiter=',', field_names=False, infer_types=False)
adapter.set_field_types({0:'i8', 1:'i8', 2:'u8'})
array = adapter.to_array()
self.assert_equality(array[0].item(), ((2**63)-1, ((2**63)-1)*-1, (2**64)-1))
def test_adapter_factory(self):
data = StringIO("1,2,3")
adapter = textadapter.text_adapter(data, "csv", delimiter=',', field_names=False, infer_types=False)
self.assertTrue(isinstance(adapter, textadapter.CSVTextAdapter))
self.assertRaises(textadapter.AdapterException, textadapter.text_adapter, data, "foobar")
def test_field_names(self):
# Test for ignoring of extra fields
data = StringIO('f0,f1\n0,1,2\n3,4,5')
adapter = textadapter.text_adapter(data, 'csv', delimiter=',', field_names=True)
array = adapter.to_array()
self.assert_equality(array.dtype.names, ('f0', 'f1'))
self.assert_equality(array[0].item(), (0,1))
self.assert_equality(array[1].item(), (3,4))
# Test for duplicate field names
data = StringIO('f0,field,field\n0,1,2\n3,4,5')
adapter = textadapter.text_adapter(data, 'csv', delimiter=',', field_names=True, infer_types=False)
adapter.set_field_types({0:'u4', 1:'u4', 2:'u4'})
array = adapter.to_array()
self.assert_equality(array.dtype.names, ('f0', 'field', 'field1'))
# Test for field names list
data = StringIO('0,1,2\n3,4,5')
adapter = textadapter.text_adapter(data, field_names=['a', 'b', 'c'], infer_types=False)
adapter.field_types = {0:'u4', 1:'u4', 2:'u4'}
array = adapter[:]
self.assertTrue(array.dtype.names == ('a', 'b', 'c'))
assert_array_equal(array, np.array([(0,1,2), (3,4,5)], dtype=[('a', 'u4'), ('b', 'u4'), ('c', 'u4')]))
def test_float_conversion(self):
data = StringIO('10,1.333,-1.23,10.0E+2,999.9e-2')
adapter = textadapter.text_adapter(data, field_names=False, infer_types=False)
adapter.set_field_types(dict(zip(range(5), ['f8']*5)))
array = adapter[0]
#self.assert_equality(array[0].item(), (10.0,1.333,-1.23,1000.0,9.999))
self.assertAlmostEqual(array[0][0], 10.0)
self.assertAlmostEqual(array[0][1], 1.333)
self.assertAlmostEqual(array[0][2], -1.23)
self.assertAlmostEqual(array[0][3], 1000.0)
self.assertAlmostEqual(array[0][4], 9.999)
def test_generators(self):
def int_generator(num_recs):
for i in range(num_recs):
yield ','.join([str(i*5), str(i*5+1), str(i*5+2), str(i*5+3), str(i*5+4)])
adapter = textadapter.text_adapter(int_generator(self.num_records), field_names=False)
array = adapter[:]
self.assert_equality(array.size, self.num_records)
record = [x for x in range(0, 5)]
for i in range(0, self.num_records):
self.assert_equality(array[i].item(), tuple(record))
record[0] += 5
record[1] += 5
record[2] += 5
record[3] += 5
record[4] += 5
def test_comments(self):
data = StringIO('1,2,3\n#4,5,6')
adapter = textadapter.text_adapter(data, field_names=False)
array = adapter[:]
self.assert_equality(array.size, 1)
self.assert_equality(array[0].item(), (1,2,3))
data = StringIO('1,2,3\n#4,5,6')
adapter = textadapter.text_adapter(data, field_names=False, comment=None)
array = adapter[:]
self.assert_equality(array.size, 2)
self.assert_equality(array[0].item(), ('1',2,3))
self.assert_equality(array[1].item(), ('#4',5,6))
def test_escapechar(self):
data = StringIO('1,2\\2,3\n4,5\\5\\5,6')
array = textadapter.text_adapter(data, field_names=False)[:]
assert_array_equal(array,
np.array([(1,22,3), (4,555,6)], dtype='u8,u8,u8'))
data = StringIO('\\1,2,3\n4,5,6\\')
array = textadapter.text_adapter(data, field_names=False)[:]
assert_array_equal(array,
np.array([(1,2,3), (4,5,6)], dtype='u8,u8,u8'))
data = StringIO('a,b\\,b,c\na,b\\,b\\,b,c')
array = textadapter.text_adapter(data, field_names=False)[:]
assert_array_equal(array,
np.array([('a', 'b,b', 'c'), ('a', 'b,b,b', 'c')], dtype='O,O,O'))
data = StringIO('a,bx,b,c\na,bx,bx,b,c')
array = textadapter.text_adapter(data, field_names=False, escape='x')[:]
assert_array_equal(array,
np.array([('a', 'b,b', 'c'), ('a', 'b,b,b', 'c')], dtype='O,O,O'))
'''def test_dataframe_output(self):
try:
import pandas
except ImportError:
return
# Test filling blank lines with fill values if output is dataframe
data = StringIO('1,2,3\n\n4,5,6')
adapter = textadapter.text_adapter(data, field_names=False)
adapter.field_types = {0:'O', 1:'O', 2:'O'}
adapter.set_fill_values({0:np.nan, 1:np.nan, 2:np.nan})
df = adapter.to_dataframe()'''
def test_csv(self):
# Test skipping blank lines
data = StringIO('1,2,3\n\n4,5,6')
adapter = textadapter.text_adapter(data, field_names=False)
array = adapter[:]
assert_array_equal(array, np.array([(1,2,3), (4,5,6)],
dtype=[('f0','<u8'),('f1','<u8'),('f2','<u8')]))
def test_json(self):
# Test json number
data = StringIO('{"id":123}')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([(123,)], dtype=[('id', 'u8')]))
# Test json number
data = StringIO('{"id":"xxx"}')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([('xxx',)], dtype=[('id', 'O')]))
# Test multiple values
data = StringIO('{"id":123, "name":"xxx"}')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([(123, 'xxx',)], dtype=[('id', 'u8'), ('name', 'O')]))
# Test multiple records
data = StringIO('[{"id":123, "name":"xxx"}, {"id":456, "name":"yyy"}]')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([(123, 'xxx',), (456, 'yyy')], dtype=[('id', 'u8'), ('name', 'O')]))
# Test multiple objects separated by newlines
data = StringIO('{"id":123, "name":"xxx"}\n{"id":456, "name":"yyy"}')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([(123, 'xxx',), (456, 'yyy')], dtype=[('id', 'u8'), ('name', 'O')]))
data = StringIO('{"id":123, "name":"xxx"}\n')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([(123, 'xxx',)], dtype=[('id', 'u8'), ('name', 'O')]))
# JNB: broken; should be really be supporting the following json inputs?
'''
# Test subarrays
data = StringIO('{"id":123, "names":["xxx","yyy","zzz"]}')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([(123, 'xxx', 'yyy', 'zzz',)],
dtype=[('f0', 'u8'), ('f1', 'O'), ('f2', 'O'), ('f3', 'O')]))
# Test subobjects
data = StringIO('{"id":123, "names":{"a":"xxx", "b":"yyy", "c":"zzz"}}')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([(123, 'xxx', 'yyy', 'zzz',)],
dtype=[('f0', 'u8'), ('f1', 'O'), ('f2', 'O'), ('f3', 'O')]))
'''
# Test ranges
data = StringIO('{"id": 1, "name": "www"}\n'
'{"id": 2, "name": "xxx"}\n'
'{"id": 3, "name": "yyy"}\n'
'{"id": 4, "name": "zzz"}')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[2:4]
assert_array_equal(array, np.array([(3, 'yyy'), (4, 'zzz')],
dtype=[('id', 'u8'), ('name', 'O')]))
# Test column order
data = StringIO('{"xxx": 1, "aaa": 2}\n')
adapter = textadapter.text_adapter(data, parser='json')
array = adapter[:]
assert_array_equal(array, np.array([(1, 2)],
dtype=[('xxx', 'u8'), ('aaa', 'u8')]))
# Test field filter
data = StringIO('{"id": 1, "name": "www"}\n'
'{"id": 2, "name": "xxx"}\n'
'{"id": 3, "name": "yyy"}\n'
'{"id": 4, "name": "zzz"}')
adapter = textadapter.text_adapter(data, parser='json')
adapter.field_filter = ['name']
array = adapter[:]
assert_array_equal(array, np.array([('www',), ('xxx',), ('yyy',), ('zzz',)],
dtype=[('name', 'O')]))
def test_stepping(self):
data = StringIO('0,1\n2,3\n4,5\n6,7\n8,9\n10,11\n12,13\n14,15\n16,17\n18,19')
adapter = textadapter.text_adapter(data, field_names=False)
assert_array_equal(adapter[::2], np.array([(0,1), (4,5), (8,9), (12,13), (16,17)], dtype='u8,u8'))
assert_array_equal(adapter[::3], np.array([(0,1), (6,7), (12,13), (18,19)], dtype='u8,u8'))
def test_num_records(self):
data = StringIO('0,1\n2,3\n4,5\n6,7\n8,9\n10,11\n12,13\n14,15\n16,17\n18,19')
adapter = textadapter.text_adapter(data, field_names=False, num_records=2)
assert_array_equal(adapter[:], np.array([(0, 1), (2, 3)], dtype='u8,u8'))
def run(verbosity=1, num_records=100000):
if num_records < 10:
raise ValueError('number of records for generated datasets must be at least 10')
TestTextAdapter.num_records = num_records
suite = unittest.TestLoader().loadTestsFromTestCase(TestTextAdapter)
return unittest.TextTestRunner(verbosity=verbosity).run(suite)
if __name__ == '__main__':
run()
| [
"textadapter.FixedWidthTextAdapter",
"os.path.exists",
"numpy.dtype",
"unittest.TestLoader",
"textadapter.RegexTextAdapter",
"io.BytesIO",
"textadapter.text_adapter",
"numpy.array",
"gzip.GzipFile",
"six.StringIO",
"io.StringIO",
"unittest.TextTestRunner",
"numpy.testing.assert_array_equal",... | [((798, 817), 'six.StringIO', 'StringIO', (['"""1,2,3\n"""'], {}), "('1,2,3\\n')\n", (806, 817), False, 'from six import StringIO\n'), ((836, 885), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (860, 885), False, 'import textadapter\n'), ((1046, 1069), 'io.StringIO', 'io.StringIO', (['u"""1,2,3\n"""'], {}), "(u'1,2,3\\n')\n", (1057, 1069), False, 'import io\n'), ((1088, 1137), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (1112, 1137), False, 'import textadapter\n'), ((1298, 1320), 'io.BytesIO', 'io.BytesIO', (["b'1,2,3\\n'"], {}), "(b'1,2,3\\n')\n", (1308, 1320), False, 'import io\n'), ((1339, 1388), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (1363, 1388), False, 'import textadapter\n'), ((1703, 1752), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (1727, 1752), False, 'import textadapter\n'), ((1772, 1817), 'numpy.array', 'np.array', (["[('1', '2', '3')]"], {'dtype': '"""u8,u8,u8"""'}), "([('1', '2', '3')], dtype='u8,u8,u8')\n", (1780, 1817), True, 'import numpy as np\n'), ((1826, 1866), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['adapter[:]', 'expected'], {}), '(adapter[:], expected)\n', (1844, 1866), False, 'from numpy.testing import assert_array_equal\n'), ((1978, 2027), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (2002, 2027), False, 'import textadapter\n'), ((2047, 2092), 'numpy.array', 'np.array', (["[('1', '2', u'ₒ')]"], {'dtype': '"""u8,u8,O"""'}), "([('1', '2', u'ₒ')], dtype='u8,u8,O')\n", (2055, 2092), True, 'import numpy as np\n'), ((2106, 2146), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['adapter[:]', 'expected'], {}), '(adapter[:], expected)\n', (2124, 2146), False, 'from numpy.testing import assert_array_equal\n'), ((2207, 2232), 'six.StringIO', 'StringIO', (['"""1 ,2 ,3 \n"""'], {}), "('1 ,2 ,3 \\n')\n", (2215, 2232), False, 'from six import StringIO\n'), ((2251, 2300), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (2275, 2300), False, 'import textadapter\n'), ((2467, 2492), 'six.StringIO', 'StringIO', (['""" 1, 2, 3\n"""'], {}), "(' 1, 2, 3\\n')\n", (2475, 2492), False, 'from six import StringIO\n'), ((2511, 2560), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (2535, 2560), False, 'import textadapter\n'), ((2727, 2758), 'six.StringIO', 'StringIO', (['""" 1 , 2 , 3 \n"""'], {}), "(' 1 , 2 , 3 \\n')\n", (2735, 2758), False, 'from six import StringIO\n'), ((2777, 2826), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (2801, 2826), False, 'import textadapter\n'), ((2999, 3030), 'six.StringIO', 'StringIO', (['"""\t1\t,\t2\t,\t3\t\n"""'], {}), "('\\t1\\t,\\t2\\t,\\t3\\t\\n')\n", (3007, 3030), False, 'from six import StringIO\n'), ((3049, 3098), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (3073, 3098), False, 'import textadapter\n'), ((3309, 3340), 'six.StringIO', 'StringIO', (['""""1 ","2 ","3 \\"\n"""'], {}), '(\'"1 ","2 ","3 "\\n\')\n', (3317, 3340), False, 'from six import StringIO\n'), ((3359, 3408), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (3383, 3408), False, 'import textadapter\n'), ((3575, 3614), 'six.StringIO', 'StringIO', (['""""\t1\t"\t"\t2\t"\t"\t3\t\\"\n"""'], {}), '(\'"\\t1\\t"\\t"\\t2\\t"\\t"\\t3\\t"\\n\')\n', (3583, 3614), False, 'from six import StringIO\n'), ((3633, 3698), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)', 'delimiter': '"""\t"""'}), "(data, field_names=False, delimiter='\\t')\n", (3657, 3698), False, 'import textadapter\n'), ((4181, 4226), 'six.StringIO', 'StringIO', (['""" 1 2 3\n 4 5 67\n890123 4"""'], {}), '(""" 1 2 3\n 4 5 67\n890123 4""")\n', (4189, 4226), False, 'from six import StringIO\n'), ((4243, 4328), 'textadapter.FixedWidthTextAdapter', 'textadapter.FixedWidthTextAdapter', (['data', '(3)'], {'infer_types': '(False)', 'field_names': '(False)'}), '(data, 3, infer_types=False, field_names=False\n )\n', (4276, 4328), False, 'import textadapter\n'), ((4398, 4461), 'numpy.array', 'np.array', (['[(1, 2, 3), (4, 5, 67), (890, 123, 4)]'], {'dtype': '"""i,i,i"""'}), "([(1, 2, 3), (4, 5, 67), (890, 123, 4)], dtype='i,i,i')\n", (4406, 4461), True, 'import numpy as np\n'), ((4470, 4509), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['adapter[:]', 'control'], {}), '(adapter[:], control)\n', (4488, 4509), False, 'from numpy.testing import assert_array_equal\n'), ((4575, 4639), 'six.StringIO', 'StringIO', (['""" 1 , -2 , 3.3 , -4.4 \n 5 , -6 , 7.7 , -8.8 """'], {}), '(""" 1 , -2 , 3.3 , -4.4 \n 5 , -6 , 7.7 , -8.8 """)\n', (4583, 4639), False, 'from six import StringIO\n'), ((4655, 4704), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (4679, 4704), False, 'import textadapter\n'), ((4817, 4888), 'numpy.array', 'np.array', (['[(1, -2, 3.3, -4.4), (5, -6, 7.7, -8.8)]'], {'dtype': '"""u4,i8,f4,f8"""'}), "([(1, -2, 3.3, -4.4), (5, -6, 7.7, -8.8)], dtype='u4,i8,f4,f8')\n", (4825, 4888), True, 'import numpy as np\n'), ((4891, 4925), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['array', 'control'], {}), '(array, control)\n', (4909, 4925), False, 'from numpy.testing import assert_array_equal\n'), ((4970, 4980), 'six.StringIO', 'StringIO', ([], {}), '()\n', (4978, 4980), False, 'from six import StringIO\n'), ((5064, 5113), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (5088, 5113), False, 'import textadapter\n'), ((8437, 8447), 'six.StringIO', 'StringIO', ([], {}), '()\n', (8445, 8447), False, 'from six import StringIO\n'), ((8531, 8595), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'delimiter': '""","""', 'field_names': '(False)'}), "(data, delimiter=',', field_names=False)\n", (8555, 8595), False, 'import textadapter\n'), ((9587, 9597), 'six.StringIO', 'StringIO', ([], {}), '()\n', (9595, 9597), False, 'from six import StringIO\n'), ((9692, 9779), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'delimiter': '""","""', 'field_names': '(False)', 'infer_types': '(False)'}), "(data, delimiter=',', field_names=False,\n infer_types=False)\n", (9716, 9779), False, 'import textadapter\n'), ((10544, 10630), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'delimiter': '""","""', 'field_names': '(False)', 'infer_types': '(True)'}), "(data, delimiter=',', field_names=False,\n infer_types=True)\n", (10568, 10630), False, 'import textadapter\n'), ((11267, 11296), 'six.StringIO', 'StringIO', (['"""1,2,3\n4,5\n7,8,9"""'], {}), "('1,2,3\\n4,5\\n7,8,9')\n", (11275, 11296), False, 'from six import StringIO\n'), ((11315, 11364), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (11339, 11364), False, 'import textadapter\n'), ((11750, 11760), 'six.StringIO', 'StringIO', ([], {}), '()\n', (11758, 11760), False, 'from six import StringIO\n'), ((11850, 11948), 'textadapter.FixedWidthTextAdapter', 'textadapter.FixedWidthTextAdapter', (['data', '[2, 3, 4, 5, 6]'], {'field_names': '(False)', 'infer_types': '(False)'}), '(data, [2, 3, 4, 5, 6], field_names=False,\n infer_types=False)\n', (11883, 11948), False, 'import textadapter\n'), ((12663, 12691), 'six.StringIO', 'StringIO', (['""" 1 2 3\n\n 4 5 6"""'], {}), "(' 1 2 3\\n\\n 4 5 6')\n", (12671, 12691), False, 'from six import StringIO\n'), ((12710, 12809), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""fixed_width"""', 'field_widths': '[2, 2, 2]', 'field_names': '(False)'}), "(data, parser='fixed_width', field_widths=[2, 2, 2],\n field_names=False)\n", (12734, 12809), False, 'import textadapter\n'), ((13012, 13055), 'six.StringIO', 'StringIO', (['"""# 1 2 3\n 1 2 3\n# foo\n 4 5 6"""'], {}), '("""# 1 2 3\n 1 2 3\n# foo\n 4 5 6""")\n', (13020, 13055), False, 'from six import StringIO\n'), ((13073, 13172), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""fixed_width"""', 'field_widths': '[2, 2, 2]', 'field_names': '(False)'}), "(data, parser='fixed_width', field_widths=[2, 2, 2],\n field_names=False)\n", (13097, 13172), False, 'import textadapter\n'), ((13378, 13404), 'six.StringIO', 'StringIO', (['""" a b c\n 1 2 3"""'], {}), "(' a b c\\n 1 2 3')\n", (13386, 13404), False, 'from six import StringIO\n'), ((13423, 13521), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""fixed_width"""', 'field_widths': '[2, 2, 2]', 'field_names': '(True)'}), "(data, parser='fixed_width', field_widths=[2, 2, 2],\n field_names=True)\n", (13447, 13521), False, 'import textadapter\n'), ((13731, 13758), 'six.StringIO', 'StringIO', (['"""# a b c\n 1 2 3"""'], {}), "('# a b c\\n 1 2 3')\n", (13739, 13758), False, 'from six import StringIO\n'), ((13777, 13875), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""fixed_width"""', 'field_widths': '[2, 2, 2]', 'field_names': '(True)'}), "(data, parser='fixed_width', field_widths=[2, 2, 2],\n field_names=True)\n", (13801, 13875), False, 'import textadapter\n'), ((14080, 14102), 'six.StringIO', 'StringIO', (['""" a\n 1 2 3"""'], {}), "(' a\\n 1 2 3')\n", (14088, 14102), False, 'from six import StringIO\n'), ((14121, 14219), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""fixed_width"""', 'field_widths': '[2, 2, 2]', 'field_names': '(True)'}), "(data, parser='fixed_width', field_widths=[2, 2, 2],\n field_names=True)\n", (14145, 14219), False, 'import textadapter\n'), ((14409, 14419), 'six.StringIO', 'StringIO', ([], {}), '()\n', (14417, 14419), False, 'from six import StringIO\n'), ((14503, 14633), 'textadapter.RegexTextAdapter', 'textadapter.RegexTextAdapter', (['data', '"""([0-9]*),([0-9]*),([0-9]*),([0-9]*),([0-9]*)\n"""'], {'field_names': '(False)', 'infer_types': '(False)'}), "(data,\n '([0-9]*),([0-9]*),([0-9]*),([0-9]*),([0-9]*)\\n', field_names=False,\n infer_types=False)\n", (14531, 14633), False, 'import textadapter\n'), ((15032, 15058), 'six.StringIO', 'StringIO', (['"""1 2 3\n\n4 5 6"""'], {}), "('1 2 3\\n\\n4 5 6')\n", (15040, 15058), False, 'from six import StringIO\n'), ((15077, 15187), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""regex"""', 'regex_string': '"""([0-9]) ([0-9]) ([0-9])"""', 'field_names': '(False)'}), "(data, parser='regex', regex_string=\n '([0-9]) ([0-9]) ([0-9])', field_names=False)\n", (15101, 15187), False, 'import textadapter\n'), ((15391, 15431), 'six.StringIO', 'StringIO', (['"""#1 2 3\n1 2 3\n# foo\n4 5 6"""'], {}), '("""#1 2 3\n1 2 3\n# foo\n4 5 6""")\n', (15399, 15431), False, 'from six import StringIO\n'), ((15449, 15559), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""regex"""', 'regex_string': '"""([0-9]) ([0-9]) ([0-9])"""', 'field_names': '(False)'}), "(data, parser='regex', regex_string=\n '([0-9]) ([0-9]) ([0-9])', field_names=False)\n", (15473, 15559), False, 'import textadapter\n'), ((15766, 15790), 'six.StringIO', 'StringIO', (['"""a b c\n4 5 6"""'], {}), "('a b c\\n4 5 6')\n", (15774, 15790), False, 'from six import StringIO\n'), ((15809, 15918), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""regex"""', 'regex_string': '"""([0-9]) ([0-9]) ([0-9])"""', 'field_names': '(True)'}), "(data, parser='regex', regex_string=\n '([0-9]) ([0-9]) ([0-9])', field_names=True)\n", (15833, 15918), False, 'import textadapter\n'), ((16129, 16154), 'six.StringIO', 'StringIO', (['"""#a b c\n4 5 6"""'], {}), "('#a b c\\n4 5 6')\n", (16137, 16154), False, 'from six import StringIO\n'), ((16173, 16282), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""regex"""', 'regex_string': '"""([0-9]) ([0-9]) ([0-9])"""', 'field_names': '(True)'}), "(data, parser='regex', regex_string=\n '([0-9]) ([0-9]) ([0-9])', field_names=True)\n", (16197, 16282), False, 'import textadapter\n'), ((16488, 16510), 'six.StringIO', 'StringIO', (['"""a b\n4 5 6"""'], {}), "('a b\\n4 5 6')\n", (16496, 16510), False, 'from six import StringIO\n'), ((16529, 16638), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""regex"""', 'regex_string': '"""([0-9]) ([0-9]) ([0-9])"""', 'field_names': '(True)'}), "(data, parser='regex', regex_string=\n '([0-9]) ([0-9]) ([0-9])', field_names=True)\n", (16553, 16638), False, 'import textadapter\n'), ((16859, 16894), 'six.StringIO', 'StringIO', (['"""a b c\n1 2 3 4 5 6"""'], {}), '("""a b c\n1 2 3 4 5 6""")\n', (16867, 16894), False, 'from six import StringIO\n'), ((16910, 17033), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""regex"""', 'regex_string': '"""([0-9\\\\s]+) ([0-9\\\\s]+) ([0-9\\\\s]+)"""', 'field_names': '(True)'}), "(data, parser='regex', regex_string=\n '([0-9\\\\s]+) ([0-9\\\\s]+) ([0-9\\\\s]+)', field_names=True)\n", (16934, 17033), False, 'import textadapter\n'), ((17460, 17470), 'six.StringIO', 'StringIO', ([], {}), '()\n', (17468, 17470), False, 'from six import StringIO\n'), ((17589, 17676), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'delimiter': '""","""', 'field_names': '(False)', 'infer_types': '(False)'}), "(data, delimiter=',', field_names=False,\n infer_types=False)\n", (17613, 17676), False, 'import textadapter\n'), ((18484, 18510), 'os.path.exists', 'os.path.exists', (['"""test.idx"""'], {}), "('test.idx')\n", (18498, 18510), False, 'import os\n'), ((18585, 18695), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'delimiter': '""","""', 'field_names': '(False)', 'infer_types': '(False)', 'index_name': '"""test.idx"""'}), "(data, delimiter=',', field_names=False,\n infer_types=False, index_name='test.idx')\n", (18609, 18695), False, 'import textadapter\n'), ((19530, 19640), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'delimiter': '""","""', 'field_names': '(False)', 'infer_types': '(False)', 'index_name': '"""test.idx"""'}), "(data, delimiter=',', field_names=False,\n infer_types=False, index_name='test.idx')\n", (19554, 19640), False, 'import textadapter\n'), ((20390, 20411), 'os.remove', 'os.remove', (['"""test.idx"""'], {}), "('test.idx')\n", (20399, 20411), False, 'import os\n'), ((20490, 20500), 'six.StringIO', 'StringIO', ([], {}), '()\n', (20498, 20500), False, 'from six import StringIO\n'), ((20710, 20749), 'gzip.GzipFile', 'gzip.GzipFile', ([], {'fileobj': 'dataz', 'mode': '"""wb"""'}), "(fileobj=dataz, mode='wb')\n", (20723, 20749), False, 'import gzip\n'), ((21029, 21137), 'textadapter.text_adapter', 'textadapter.text_adapter', (['dataz'], {'compression': '"""gzip"""', 'delimiter': '""","""', 'field_names': '(False)', 'infer_types': '(False)'}), "(dataz, compression='gzip', delimiter=',',\n field_names=False, infer_types=False)\n", (21053, 21137), False, 'import textadapter\n'), ((24959, 25009), 'six.StringIO', 'StringIO', (['"""0,1,2,3,4\n5,6,7,8,9\n10,11,12,13,14"""'], {}), '("""0,1,2,3,4\n5,6,7,8,9\n10,11,12,13,14""")\n', (24967, 25009), False, 'from six import StringIO\n'), ((25026, 25085), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'header': '(1)', 'field_names': '(False)'}), '(data, header=1, field_names=False)\n', (25050, 25085), False, 'import textadapter\n'), ((25304, 25363), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'header': '(2)', 'field_names': '(False)'}), '(data, header=2, field_names=False)\n', (25328, 25363), False, 'import textadapter\n'), ((25569, 25627), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'header': '(1)', 'field_names': '(True)'}), '(data, header=1, field_names=True)\n', (25593, 25627), False, 'import textadapter\n'), ((25880, 25899), 'six.StringIO', 'StringIO', (['"""1,2,3\n"""'], {}), "('1,2,3\\n')\n", (25888, 25899), False, 'from six import StringIO\n'), ((25918, 25967), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (25942, 25967), False, 'import textadapter\n'), ((26041, 26060), 'six.StringIO', 'StringIO', (['"""1 2 3\n"""'], {}), "('1 2 3\\n')\n", (26049, 26060), False, 'from six import StringIO\n'), ((26079, 26128), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (26103, 26128), False, 'import textadapter\n'), ((26202, 26223), 'six.StringIO', 'StringIO', (['"""1\t2\t3\n"""'], {}), "('1\\t2\\t3\\n')\n", (26210, 26223), False, 'from six import StringIO\n'), ((26242, 26291), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (26266, 26291), False, 'import textadapter\n'), ((26365, 26384), 'six.StringIO', 'StringIO', (['"""1x2x3\n"""'], {}), "('1x2x3\\n')\n", (26373, 26384), False, 'from six import StringIO\n'), ((26403, 26452), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (26427, 26452), False, 'import textadapter\n'), ((26579, 26604), 'six.StringIO', 'StringIO', (['"""aaa\nbbb\nccc"""'], {}), "('aaa\\nbbb\\nccc')\n", (26587, 26604), False, 'from six import StringIO\n'), ((26843, 26912), 'six.StringIO', 'StringIO', (['"""0,1,2,3,4\n5.5,6,7,8,9\n10,11,12,13,14a\n15,16,xxx,18,19"""'], {}), '("""0,1,2,3,4\n5.5,6,7,8,9\n10,11,12,13,14a\n15,16,xxx,18,19""")\n', (26851, 26912), False, 'from six import StringIO\n'), ((26930, 26997), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)', 'infer_types': '(True)'}), '(data, field_names=False, infer_types=True)\n', (26954, 26997), False, 'import textadapter\n'), ((27430, 27499), 'six.StringIO', 'StringIO', (['"""0,1,2,3,4\n5.5,6,7,8,9\n10,11,12,13,14a\n15,16,xxx,18,19"""'], {}), '("""0,1,2,3,4\n5.5,6,7,8,9\n10,11,12,13,14a\n15,16,xxx,18,19""")\n', (27438, 27499), False, 'from six import StringIO\n'), ((27517, 27584), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)', 'infer_types': '(True)'}), '(data, field_names=False, infer_types=True)\n', (27541, 27584), False, 'import textadapter\n'), ((28218, 28305), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'delimiter': '""","""', 'field_names': '(False)', 'infer_types': '(False)'}), "(data, delimiter=',', field_names=False,\n infer_types=False)\n", (28242, 28305), False, 'import textadapter\n'), ((28533, 28550), 'six.StringIO', 'StringIO', (['"""1,2,3"""'], {}), "('1,2,3')\n", (28541, 28550), False, 'from six import StringIO\n'), ((28569, 28663), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data', '"""csv"""'], {'delimiter': '""","""', 'field_names': '(False)', 'infer_types': '(False)'}), "(data, 'csv', delimiter=',', field_names=False,\n infer_types=False)\n", (28593, 28663), False, 'import textadapter\n'), ((28924, 28957), 'six.StringIO', 'StringIO', (['"""f0,f1\n0,1,2\n3,4,5"""'], {}), '("""f0,f1\n0,1,2\n3,4,5""")\n', (28932, 28957), False, 'from six import StringIO\n'), ((28974, 29044), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data', '"""csv"""'], {'delimiter': '""","""', 'field_names': '(True)'}), "(data, 'csv', delimiter=',', field_names=True)\n", (28998, 29044), False, 'import textadapter\n'), ((29305, 29347), 'six.StringIO', 'StringIO', (['"""f0,field,field\n0,1,2\n3,4,5"""'], {}), '("""f0,field,field\n0,1,2\n3,4,5""")\n', (29313, 29347), False, 'from six import StringIO\n'), ((29364, 29457), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data', '"""csv"""'], {'delimiter': '""","""', 'field_names': '(True)', 'infer_types': '(False)'}), "(data, 'csv', delimiter=',', field_names=True,\n infer_types=False)\n", (29388, 29457), False, 'import textadapter\n'), ((29674, 29698), 'six.StringIO', 'StringIO', (['"""0,1,2\n3,4,5"""'], {}), "('0,1,2\\n3,4,5')\n", (29682, 29698), False, 'from six import StringIO\n'), ((29717, 29795), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': "['a', 'b', 'c']", 'infer_types': '(False)'}), "(data, field_names=['a', 'b', 'c'], infer_types=False)\n", (29741, 29795), False, 'import textadapter\n'), ((30104, 30147), 'six.StringIO', 'StringIO', (['"""10,1.333,-1.23,10.0E+2,999.9e-2"""'], {}), "('10,1.333,-1.23,10.0E+2,999.9e-2')\n", (30112, 30147), False, 'from six import StringIO\n'), ((30166, 30234), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)', 'infer_types': '(False)'}), '(data, field_names=False, infer_types=False)\n', (30190, 30234), False, 'import textadapter\n'), ((31374, 31399), 'six.StringIO', 'StringIO', (['"""1,2,3\n#4,5,6"""'], {}), "('1,2,3\\n#4,5,6')\n", (31382, 31399), False, 'from six import StringIO\n'), ((31418, 31467), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (31442, 31467), False, 'import textadapter\n'), ((31610, 31635), 'six.StringIO', 'StringIO', (['"""1,2,3\n#4,5,6"""'], {}), "('1,2,3\\n#4,5,6')\n", (31618, 31635), False, 'from six import StringIO\n'), ((31654, 31717), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)', 'comment': 'None'}), '(data, field_names=False, comment=None)\n', (31678, 31717), False, 'import textadapter\n'), ((31951, 31987), 'six.StringIO', 'StringIO', (['"""1,2\\\\2,3\n4,5\\\\5\\\\5,6"""'], {}), '("""1,2\\\\2,3\n4,5\\\\5\\\\5,6""")\n', (31959, 31987), False, 'from six import StringIO\n'), ((32167, 32195), 'six.StringIO', 'StringIO', (['"""\\\\1,2,3\n4,5,6\\\\"""'], {}), "('\\\\1,2,3\\n4,5,6\\\\')\n", (32175, 32195), False, 'from six import StringIO\n'), ((32375, 32414), 'six.StringIO', 'StringIO', (['"""a,b\\\\,b,c\na,b\\\\,b\\\\,b,c"""'], {}), '("""a,b\\\\,b,c\na,b\\\\,b\\\\,b,c""")\n', (32383, 32414), False, 'from six import StringIO\n'), ((32610, 32646), 'six.StringIO', 'StringIO', (['"""a,bx,b,c\na,bx,bx,b,c"""'], {}), '("""a,bx,b,c\na,bx,bx,b,c""")\n', (32618, 32646), False, 'from six import StringIO\n'), ((33383, 33409), 'six.StringIO', 'StringIO', (['"""1,2,3\n\n4,5,6"""'], {}), "('1,2,3\\n\\n4,5,6')\n", (33391, 33409), False, 'from six import StringIO\n'), ((33428, 33477), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (33452, 33477), False, 'import textadapter\n'), ((33697, 33719), 'six.StringIO', 'StringIO', (['"""{"id":123}"""'], {}), '(\'{"id":123}\')\n', (33705, 33719), False, 'from six import StringIO\n'), ((33738, 33783), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""json"""'}), "(data, parser='json')\n", (33762, 33783), False, 'import textadapter\n'), ((33930, 33954), 'six.StringIO', 'StringIO', (['"""{"id":"xxx"}"""'], {}), '(\'{"id":"xxx"}\')\n', (33938, 33954), False, 'from six import StringIO\n'), ((33973, 34018), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""json"""'}), "(data, parser='json')\n", (33997, 34018), False, 'import textadapter\n'), ((34170, 34206), 'six.StringIO', 'StringIO', (['"""{"id":123, "name":"xxx"}"""'], {}), '(\'{"id":123, "name":"xxx"}\')\n', (34178, 34206), False, 'from six import StringIO\n'), ((34225, 34270), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""json"""'}), "(data, parser='json')\n", (34249, 34270), False, 'import textadapter\n'), ((34444, 34508), 'six.StringIO', 'StringIO', (['"""[{"id":123, "name":"xxx"}, {"id":456, "name":"yyy"}]"""'], {}), '(\'[{"id":123, "name":"xxx"}, {"id":456, "name":"yyy"}]\')\n', (34452, 34508), False, 'from six import StringIO\n'), ((34527, 34572), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""json"""'}), "(data, parser='json')\n", (34551, 34572), False, 'import textadapter\n'), ((34782, 34847), 'six.StringIO', 'StringIO', (['"""{"id":123, "name":"xxx"}\n{"id":456, "name":"yyy"}"""'], {}), '("""{"id":123, "name":"xxx"}\n{"id":456, "name":"yyy"}""")\n', (34790, 34847), False, 'from six import StringIO\n'), ((34863, 34908), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""json"""'}), "(data, parser='json')\n", (34887, 34908), False, 'import textadapter\n'), ((35064, 35102), 'six.StringIO', 'StringIO', (['"""{"id":123, "name":"xxx"}\n"""'], {}), '(\'{"id":123, "name":"xxx"}\\n\')\n', (35072, 35102), False, 'from six import StringIO\n'), ((35121, 35166), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""json"""'}), "(data, parser='json')\n", (35145, 35166), False, 'import textadapter\n'), ((36114, 36239), 'six.StringIO', 'StringIO', (['"""{"id": 1, "name": "www"}\n{"id": 2, "name": "xxx"}\n{"id": 3, "name": "yyy"}\n{"id": 4, "name": "zzz"}"""'], {}), '(\n """{"id": 1, "name": "www"}\n{"id": 2, "name": "xxx"}\n{"id": 3, "name": "yyy"}\n{"id": 4, "name": "zzz"}"""\n )\n', (36122, 36239), False, 'from six import StringIO\n'), ((36355, 36400), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""json"""'}), "(data, parser='json')\n", (36379, 36400), False, 'import textadapter\n'), ((36593, 36627), 'six.StringIO', 'StringIO', (['"""{"xxx": 1, "aaa": 2}\n"""'], {}), '(\'{"xxx": 1, "aaa": 2}\\n\')\n', (36601, 36627), False, 'from six import StringIO\n'), ((36646, 36691), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""json"""'}), "(data, parser='json')\n", (36670, 36691), False, 'import textadapter\n'), ((36867, 36992), 'six.StringIO', 'StringIO', (['"""{"id": 1, "name": "www"}\n{"id": 2, "name": "xxx"}\n{"id": 3, "name": "yyy"}\n{"id": 4, "name": "zzz"}"""'], {}), '(\n """{"id": 1, "name": "www"}\n{"id": 2, "name": "xxx"}\n{"id": 3, "name": "yyy"}\n{"id": 4, "name": "zzz"}"""\n )\n', (36875, 36992), False, 'from six import StringIO\n'), ((37108, 37153), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'parser': '"""json"""'}), "(data, parser='json')\n", (37132, 37153), False, 'import textadapter\n'), ((37387, 37452), 'six.StringIO', 'StringIO', (['"""0,1\n2,3\n4,5\n6,7\n8,9\n10,11\n12,13\n14,15\n16,17\n18,19"""'], {}), '("""0,1\n2,3\n4,5\n6,7\n8,9\n10,11\n12,13\n14,15\n16,17\n18,19""")\n', (37395, 37452), False, 'from six import StringIO\n'), ((37476, 37525), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (37500, 37525), False, 'import textadapter\n'), ((37781, 37846), 'six.StringIO', 'StringIO', (['"""0,1\n2,3\n4,5\n6,7\n8,9\n10,11\n12,13\n14,15\n16,17\n18,19"""'], {}), '("""0,1\n2,3\n4,5\n6,7\n8,9\n10,11\n12,13\n14,15\n16,17\n18,19""")\n', (37789, 37846), False, 'from six import StringIO\n'), ((37870, 37934), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)', 'num_records': '(2)'}), '(data, field_names=False, num_records=2)\n', (37894, 37934), False, 'import textadapter\n'), ((983, 1028), 'numpy.array', 'np.array', (["[('1', '2', '3')]"], {'dtype': '"""S5,S5,S5"""'}), "([('1', '2', '3')], dtype='S5,S5,S5')\n", (991, 1028), True, 'import numpy as np\n'), ((1235, 1280), 'numpy.array', 'np.array', (["[('1', '2', '3')]"], {'dtype': '"""S5,S5,S5"""'}), "([('1', '2', '3')], dtype='S5,S5,S5')\n", (1243, 1280), True, 'import numpy as np\n'), ((1486, 1531), 'numpy.array', 'np.array', (["[('1', '2', '3')]"], {'dtype': '"""S5,S5,S5"""'}), "([('1', '2', '3')], dtype='S5,S5,S5')\n", (1494, 1531), True, 'import numpy as np\n'), ((2398, 2449), 'numpy.array', 'np.array', (["[('1 ', '2 ', '3 ')]"], {'dtype': '"""S3,S3,S3"""'}), "([('1 ', '2 ', '3 ')], dtype='S3,S3,S3')\n", (2406, 2449), True, 'import numpy as np\n'), ((2658, 2709), 'numpy.array', 'np.array', (["[(' 1', ' 2', ' 3')]"], {'dtype': '"""S3,S3,S3"""'}), "([(' 1', ' 2', ' 3')], dtype='S3,S3,S3')\n", (2666, 2709), True, 'import numpy as np\n'), ((2924, 2981), 'numpy.array', 'np.array', (["[(' 1 ', ' 2 ', ' 3 ')]"], {'dtype': '"""S5,S5,S5"""'}), "([(' 1 ', ' 2 ', ' 3 ')], dtype='S5,S5,S5')\n", (2932, 2981), True, 'import numpy as np\n'), ((3196, 3253), 'numpy.array', 'np.array', (["[('\\t1\\t', '\\t2\\t', '\\t3\\t')]"], {'dtype': '"""S3,S3,S3"""'}), "([('\\t1\\t', '\\t2\\t', '\\t3\\t')], dtype='S3,S3,S3')\n", (3204, 3253), True, 'import numpy as np\n'), ((3506, 3557), 'numpy.array', 'np.array', (["[('1 ', '2 ', '3 ')]"], {'dtype': '"""S3,S3,S3"""'}), "([('1 ', '2 ', '3 ')], dtype='S3,S3,S3')\n", (3514, 3557), True, 'import numpy as np\n'), ((3796, 3853), 'numpy.array', 'np.array', (["[('\\t1\\t', '\\t2\\t', '\\t3\\t')]"], {'dtype': '"""S3,S3,S3"""'}), "([('\\t1\\t', '\\t2\\t', '\\t3\\t')], dtype='S3,S3,S3')\n", (3804, 3853), True, 'import numpy as np\n'), ((5224, 5275), 'numpy.array', 'np.array', (['[(0, 1, 2, 3, 4)]'], {'dtype': '"""u4,u4,u4,u4,u4"""'}), "([(0, 1, 2, 3, 4)], dtype='u4,u4,u4,u4,u4')\n", (5232, 5275), True, 'import numpy as np\n'), ((12877, 12966), 'numpy.array', 'np.array', (['[(1, 2, 3), (4, 5, 6)]'], {'dtype': "[('f0', '<u8'), ('f1', '<u8'), ('f2', '<u8')]"}), "([(1, 2, 3), (4, 5, 6)], dtype=[('f0', '<u8'), ('f1', '<u8'), ('f2',\n '<u8')])\n", (12885, 12966), True, 'import numpy as np\n'), ((13240, 13329), 'numpy.array', 'np.array', (['[(1, 2, 3), (4, 5, 6)]'], {'dtype': "[('f0', '<u8'), ('f1', '<u8'), ('f2', '<u8')]"}), "([(1, 2, 3), (4, 5, 6)], dtype=[('f0', '<u8'), ('f1', '<u8'), ('f2',\n '<u8')])\n", (13248, 13329), True, 'import numpy as np\n'), ((13589, 13660), 'numpy.array', 'np.array', (['[(1, 2, 3)]'], {'dtype': "[('a', '<u8'), ('b', '<u8'), ('c', '<u8')]"}), "([(1, 2, 3)], dtype=[('a', '<u8'), ('b', '<u8'), ('c', '<u8')])\n", (13597, 13660), True, 'import numpy as np\n'), ((13943, 14014), 'numpy.array', 'np.array', (['[(1, 2, 3)]'], {'dtype': "[('a', '<u8'), ('b', '<u8'), ('c', '<u8')]"}), "([(1, 2, 3)], dtype=[('a', '<u8'), ('b', '<u8'), ('c', '<u8')])\n", (13951, 14014), True, 'import numpy as np\n'), ((14287, 14360), 'numpy.array', 'np.array', (['[(1, 2, 3)]'], {'dtype': "[('a', '<u8'), ('f1', '<u8'), ('f2', '<u8')]"}), "([(1, 2, 3)], dtype=[('a', '<u8'), ('f1', '<u8'), ('f2', '<u8')])\n", (14295, 14360), True, 'import numpy as np\n'), ((15256, 15345), 'numpy.array', 'np.array', (['[(1, 2, 3), (4, 5, 6)]'], {'dtype': "[('f0', '<u8'), ('f1', '<u8'), ('f2', '<u8')]"}), "([(1, 2, 3), (4, 5, 6)], dtype=[('f0', '<u8'), ('f1', '<u8'), ('f2',\n '<u8')])\n", (15264, 15345), True, 'import numpy as np\n'), ((15628, 15717), 'numpy.array', 'np.array', (['[(1, 2, 3), (4, 5, 6)]'], {'dtype': "[('f0', '<u8'), ('f1', '<u8'), ('f2', '<u8')]"}), "([(1, 2, 3), (4, 5, 6)], dtype=[('f0', '<u8'), ('f1', '<u8'), ('f2',\n '<u8')])\n", (15636, 15717), True, 'import numpy as np\n'), ((15987, 16058), 'numpy.array', 'np.array', (['[(4, 5, 6)]'], {'dtype': "[('a', '<u8'), ('b', '<u8'), ('c', '<u8')]"}), "([(4, 5, 6)], dtype=[('a', '<u8'), ('b', '<u8'), ('c', '<u8')])\n", (15995, 16058), True, 'import numpy as np\n'), ((16351, 16422), 'numpy.array', 'np.array', (['[(4, 5, 6)]'], {'dtype': "[('a', '<u8'), ('b', '<u8'), ('c', '<u8')]"}), "([(4, 5, 6)], dtype=[('a', '<u8'), ('b', '<u8'), ('c', '<u8')])\n", (16359, 16422), True, 'import numpy as np\n'), ((16707, 16779), 'numpy.array', 'np.array', (['[(4, 5, 6)]'], {'dtype': "[('a', '<u8'), ('b', '<u8'), ('f2', '<u8')]"}), "([(4, 5, 6)], dtype=[('a', '<u8'), ('b', '<u8'), ('f2', '<u8')])\n", (16715, 16779), True, 'import numpy as np\n'), ((17099, 17176), 'numpy.array', 'np.array', (["[('1 2', '3 4', '5 6')]"], {'dtype': "[('a', 'O'), ('b', 'O'), ('c', 'O')]"}), "([('1 2', '3 4', '5 6')], dtype=[('a', 'O'), ('b', 'O'), ('c', 'O')])\n", (17107, 17176), True, 'import numpy as np\n'), ((18524, 18545), 'os.remove', 'os.remove', (['"""test.idx"""'], {}), "('test.idx')\n", (18533, 18545), False, 'import os\n'), ((20630, 20642), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (20640, 20642), False, 'import io\n'), ((20677, 20687), 'six.StringIO', 'StringIO', ([], {}), '()\n', (20685, 20687), False, 'from six import StringIO\n'), ((25185, 25258), 'numpy.array', 'np.array', (['[(5, 6, 7, 8, 9), (10, 11, 12, 13, 14)]'], {'dtype': '"""u4,u4,u4,u4,u4"""'}), "([(5, 6, 7, 8, 9), (10, 11, 12, 13, 14)], dtype='u4,u4,u4,u4,u4')\n", (25193, 25258), True, 'import numpy as np\n'), ((25463, 25519), 'numpy.array', 'np.array', (['[(10, 11, 12, 13, 14)]'], {'dtype': '"""u4,u4,u4,u4,u4"""'}), "([(10, 11, 12, 13, 14)], dtype='u4,u4,u4,u4,u4')\n", (25471, 25519), True, 'import numpy as np\n'), ((25727, 25836), 'numpy.array', 'np.array', (['[(10, 11, 12, 13, 14)]'], {'dtype': "[('5', 'u4'), ('6', 'u4'), ('7', 'u4'), ('8', 'u4'), ('9', 'u4')]"}), "([(10, 11, 12, 13, 14)], dtype=[('5', 'u4'), ('6', 'u4'), ('7',\n 'u4'), ('8', 'u4'), ('9', 'u4')])\n", (25735, 25836), True, 'import numpy as np\n'), ((26621, 26686), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)', 'delimiter': 'None'}), '(data, field_names=False, delimiter=None)\n', (26645, 26686), False, 'import textadapter\n'), ((26724, 26785), 'numpy.array', 'np.array', (["[('aaa',), ('bbb',), ('ccc',)]"], {'dtype': "[('f0', 'O')]"}), "([('aaa',), ('bbb',), ('ccc',)], dtype=[('f0', 'O')])\n", (26732, 26785), True, 'import numpy as np\n'), ((27091, 27110), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (27099, 27110), True, 'import numpy as np\n'), ((27170, 27188), 'numpy.dtype', 'np.dtype', (['"""uint64"""'], {}), "('uint64')\n", (27178, 27188), True, 'import numpy as np\n'), ((27248, 27261), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (27256, 27261), True, 'import numpy as np\n'), ((27321, 27339), 'numpy.dtype', 'np.dtype', (['"""uint64"""'], {}), "('uint64')\n", (27329, 27339), True, 'import numpy as np\n'), ((27399, 27412), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (27407, 27412), True, 'import numpy as np\n'), ((27648, 27666), 'numpy.dtype', 'np.dtype', (['"""uint64"""'], {}), "('uint64')\n", (27656, 27666), True, 'import numpy as np\n'), ((27733, 27752), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (27741, 27752), True, 'import numpy as np\n'), ((27817, 27835), 'numpy.dtype', 'np.dtype', (['"""uint64"""'], {}), "('uint64')\n", (27825, 27835), True, 'import numpy as np\n'), ((27900, 27918), 'numpy.dtype', 'np.dtype', (['"""uint64"""'], {}), "('uint64')\n", (27908, 27918), True, 'import numpy as np\n'), ((27984, 27997), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (27992, 27997), True, 'import numpy as np\n'), ((28062, 28075), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (28070, 28075), True, 'import numpy as np\n'), ((29974, 30053), 'numpy.array', 'np.array', (['[(0, 1, 2), (3, 4, 5)]'], {'dtype': "[('a', 'u4'), ('b', 'u4'), ('c', 'u4')]"}), "([(0, 1, 2), (3, 4, 5)], dtype=[('a', 'u4'), ('b', 'u4'), ('c', 'u4')])\n", (29982, 30053), True, 'import numpy as np\n'), ((32001, 32050), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (32025, 32050), False, 'import textadapter\n'), ((32100, 32153), 'numpy.array', 'np.array', (['[(1, 22, 3), (4, 555, 6)]'], {'dtype': '"""u8,u8,u8"""'}), "([(1, 22, 3), (4, 555, 6)], dtype='u8,u8,u8')\n", (32108, 32153), True, 'import numpy as np\n'), ((32212, 32261), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (32236, 32261), False, 'import textadapter\n'), ((32311, 32361), 'numpy.array', 'np.array', (['[(1, 2, 3), (4, 5, 6)]'], {'dtype': '"""u8,u8,u8"""'}), "([(1, 2, 3), (4, 5, 6)], dtype='u8,u8,u8')\n", (32319, 32361), True, 'import numpy as np\n'), ((32428, 32477), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)'}), '(data, field_names=False)\n', (32452, 32477), False, 'import textadapter\n'), ((32527, 32592), 'numpy.array', 'np.array', (["[('a', 'b,b', 'c'), ('a', 'b,b,b', 'c')]"], {'dtype': '"""O,O,O"""'}), "([('a', 'b,b', 'c'), ('a', 'b,b,b', 'c')], dtype='O,O,O')\n", (32535, 32592), True, 'import numpy as np\n'), ((32660, 32721), 'textadapter.text_adapter', 'textadapter.text_adapter', (['data'], {'field_names': '(False)', 'escape': '"""x"""'}), "(data, field_names=False, escape='x')\n", (32684, 32721), False, 'import textadapter\n'), ((32771, 32836), 'numpy.array', 'np.array', (["[('a', 'b,b', 'c'), ('a', 'b,b,b', 'c')]"], {'dtype': '"""O,O,O"""'}), "([('a', 'b,b', 'c'), ('a', 'b,b,b', 'c')], dtype='O,O,O')\n", (32779, 32836), True, 'import numpy as np\n'), ((33539, 33628), 'numpy.array', 'np.array', (['[(1, 2, 3), (4, 5, 6)]'], {'dtype': "[('f0', '<u8'), ('f1', '<u8'), ('f2', '<u8')]"}), "([(1, 2, 3), (4, 5, 6)], dtype=[('f0', '<u8'), ('f1', '<u8'), ('f2',\n '<u8')])\n", (33547, 33628), True, 'import numpy as np\n'), ((33845, 33885), 'numpy.array', 'np.array', (['[(123,)]'], {'dtype': "[('id', 'u8')]"}), "([(123,)], dtype=[('id', 'u8')])\n", (33853, 33885), True, 'import numpy as np\n'), ((34080, 34121), 'numpy.array', 'np.array', (["[('xxx',)]"], {'dtype': "[('id', 'O')]"}), "([('xxx',)], dtype=[('id', 'O')])\n", (34088, 34121), True, 'import numpy as np\n'), ((34332, 34393), 'numpy.array', 'np.array', (["[(123, 'xxx')]"], {'dtype': "[('id', 'u8'), ('name', 'O')]"}), "([(123, 'xxx')], dtype=[('id', 'u8'), ('name', 'O')])\n", (34340, 34393), True, 'import numpy as np\n'), ((34634, 34709), 'numpy.array', 'np.array', (["[(123, 'xxx'), (456, 'yyy')]"], {'dtype': "[('id', 'u8'), ('name', 'O')]"}), "([(123, 'xxx'), (456, 'yyy')], dtype=[('id', 'u8'), ('name', 'O')])\n", (34642, 34709), True, 'import numpy as np\n'), ((34970, 35045), 'numpy.array', 'np.array', (["[(123, 'xxx'), (456, 'yyy')]"], {'dtype': "[('id', 'u8'), ('name', 'O')]"}), "([(123, 'xxx'), (456, 'yyy')], dtype=[('id', 'u8'), ('name', 'O')])\n", (34978, 35045), True, 'import numpy as np\n'), ((35228, 35289), 'numpy.array', 'np.array', (["[(123, 'xxx')]"], {'dtype': "[('id', 'u8'), ('name', 'O')]"}), "([(123, 'xxx')], dtype=[('id', 'u8'), ('name', 'O')])\n", (35236, 35289), True, 'import numpy as np\n'), ((36464, 36535), 'numpy.array', 'np.array', (["[(3, 'yyy'), (4, 'zzz')]"], {'dtype': "[('id', 'u8'), ('name', 'O')]"}), "([(3, 'yyy'), (4, 'zzz')], dtype=[('id', 'u8'), ('name', 'O')])\n", (36472, 36535), True, 'import numpy as np\n'), ((36753, 36809), 'numpy.array', 'np.array', (['[(1, 2)]'], {'dtype': "[('xxx', 'u8'), ('aaa', 'u8')]"}), "([(1, 2)], dtype=[('xxx', 'u8'), ('aaa', 'u8')])\n", (36761, 36809), True, 'import numpy as np\n'), ((37255, 37328), 'numpy.array', 'np.array', (["[('www',), ('xxx',), ('yyy',), ('zzz',)]"], {'dtype': "[('name', 'O')]"}), "([('www',), ('xxx',), ('yyy',), ('zzz',)], dtype=[('name', 'O')])\n", (37263, 37328), True, 'import numpy as np\n'), ((37567, 37636), 'numpy.array', 'np.array', (['[(0, 1), (4, 5), (8, 9), (12, 13), (16, 17)]'], {'dtype': '"""u8,u8"""'}), "([(0, 1), (4, 5), (8, 9), (12, 13), (16, 17)], dtype='u8,u8')\n", (37575, 37636), True, 'import numpy as np\n'), ((37674, 37735), 'numpy.array', 'np.array', (['[(0, 1), (6, 7), (12, 13), (18, 19)]'], {'dtype': '"""u8,u8"""'}), "([(0, 1), (6, 7), (12, 13), (18, 19)], dtype='u8,u8')\n", (37682, 37735), True, 'import numpy as np\n'), ((37974, 38015), 'numpy.array', 'np.array', (['[(0, 1), (2, 3)]'], {'dtype': '"""u8,u8"""'}), "([(0, 1), (2, 3)], dtype='u8,u8')\n", (37982, 38015), True, 'import numpy as np\n'), ((38233, 38254), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (38252, 38254), False, 'import unittest\n'), ((38305, 38349), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': 'verbosity'}), '(verbosity=verbosity)\n', (38328, 38349), False, 'import unittest\n')] |
"""
Test file formats.
"""
import wave
import numpy
import pytest
from pytest import approx
from scipy.io import wavfile
from scipy.fftpack import fft
from diapason import generate_wav
@pytest.mark.parametrize(('frequency', 'duration', 'rate'), [
(440., 2., 44100),
(220., 1., 48000),
(880., 3., 16000),
])
def test_generate_wav(frequency, duration, rate):
"""
Test generated WAV files.
"""
# Test using `wave`
bytesio = generate_wav(frequency, duration, rate)
wav = wave.open(bytesio)
assert wav.getnchannels() == 1
assert wav.getframerate() == rate
assert wav.getnframes() / wav.getframerate() == approx(duration)
# Test using `scipy`
bytesio = generate_wav(frequency, duration, rate)
scipyrate, data = wavfile.read(bytesio)
assert scipyrate == rate
assert len(data) / rate == approx(duration)
transformed = fft(data)
absolute = abs(transformed[:len(transformed) // 2 - 1])
assert numpy.argmax(absolute) / duration == approx(frequency)
| [
"pytest.approx",
"wave.open",
"numpy.argmax",
"pytest.mark.parametrize",
"diapason.generate_wav",
"scipy.io.wavfile.read",
"scipy.fftpack.fft"
] | [((190, 318), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('frequency', 'duration', 'rate')", '[(440.0, 2.0, 44100), (220.0, 1.0, 48000), (880.0, 3.0, 16000)]'], {}), "(('frequency', 'duration', 'rate'), [(440.0, 2.0, \n 44100), (220.0, 1.0, 48000), (880.0, 3.0, 16000)])\n", (213, 318), False, 'import pytest\n'), ((457, 496), 'diapason.generate_wav', 'generate_wav', (['frequency', 'duration', 'rate'], {}), '(frequency, duration, rate)\n', (469, 496), False, 'from diapason import generate_wav\n'), ((507, 525), 'wave.open', 'wave.open', (['bytesio'], {}), '(bytesio)\n', (516, 525), False, 'import wave\n'), ((708, 747), 'diapason.generate_wav', 'generate_wav', (['frequency', 'duration', 'rate'], {}), '(frequency, duration, rate)\n', (720, 747), False, 'from diapason import generate_wav\n'), ((770, 791), 'scipy.io.wavfile.read', 'wavfile.read', (['bytesio'], {}), '(bytesio)\n', (782, 791), False, 'from scipy.io import wavfile\n'), ((887, 896), 'scipy.fftpack.fft', 'fft', (['data'], {}), '(data)\n', (890, 896), False, 'from scipy.fftpack import fft\n'), ((651, 667), 'pytest.approx', 'approx', (['duration'], {}), '(duration)\n', (657, 667), False, 'from pytest import approx\n'), ((852, 868), 'pytest.approx', 'approx', (['duration'], {}), '(duration)\n', (858, 868), False, 'from pytest import approx\n'), ((1005, 1022), 'pytest.approx', 'approx', (['frequency'], {}), '(frequency)\n', (1011, 1022), False, 'from pytest import approx\n'), ((968, 990), 'numpy.argmax', 'numpy.argmax', (['absolute'], {}), '(absolute)\n', (980, 990), False, 'import numpy\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.