text
stringlengths 29
850k
|
|---|
import numpy as np
from lpf import lowpass_cosine as filt
import matplotlib.pyplot as plt
plt.ion()
def fwhm(v_array, vmin, vmax):
v_array = v_array[:,1000:-1000,]
steps = v_array.shape[1]
print steps, vmax, vmin
vres = (vmax - vmin)/steps
v = np.arange(vmin, vmax, vres)/1.0e3
tau = 1.0
fc = 0.05
fwhm = np.zeros(v_array.shape[0])
vlpf = np.zeros_like(v_array)
#near = np.zeros_like(fwhm).astype('int')
#second = np.zeros_like(fwhm).astype('int')
for i in range(len(v_array)):
# avoid edges
if not np.nanmean(v_array[i]):
fwhm[i] = np.nan
else:
try:
v_lpf = filt(v_array[i], tau, fc, fc/3.)
vlpf[i] = v_lpf
maximum = np.nanmax(v_lpf)
max_idx = np.nanargmax(v_lpf)
minimum = np.nanmin(v_lpf)
height = maximum - minimum
half_max = height/2.
nearest = np.nanargmin((np.abs(v_lpf - half_max)))
#near[i] = nearest
"""
print "half max:", half_max
print "nearest:", nearest
print "max idx:", max_idx
"""
if nearest > max_idx: # if half max point is to right of maximum
second_point = np.nanargmin((np.abs(v_lpf[:max_idx] - half_max)))
if nearest < max_idx: # if it's on the left
second_point = np.nanargmin((np.abs(v_lpf[max_idx:] - half_max)))
#second[i] = second_point
#print i, max_idx, nearest, second_point
half_width_idxs = np.abs(nearest - second_point)
if half_width_idxs > 1000:
fwhm[i] = np.nan
else:
fwhm[i] = 2.*half_width_idxs*vres
except ValueError:
fwhm[i] = np.nan
return vlpf, fwhm
|
18. T.N. Romanuk, Y. Zhou, F.S. Valdovinos and N.D. Martinez (2017) Robustness Trade-Offs in Model Food Webs: Invasion Probability Decreases While Invasion Consequences Increase with Connectance. Advances in Ecological Research.
17. Valdovinos, F.S., Brosi, B.J., Briggs, H.M., Moisset de Espanés, P., Ramos-Jiliberto, R., Martinez, N.D. (2016). Niche partitioning due to adaptive foraging reverses effects of nestedness and connectance on pollination network stability. Ecology Letters, 19, 1277-1286.
16. Davies, N., Field, D., Gavaghan, D., Holbrook, S. J., Planes, S., Troyer, M., Bonsall, M., Claudet, J., Roderick, G., Schmitt, R.J., Zettler, L. A., Berteaux, V., Bossin, H. C., Cabasse, C., Collin, A., Deck, J., Dell, T., Dunne, J., Gates, R., Harfoot, M., Hench, J. L., Hopuare, M., Kirch, P., Kotoulas, G., Kosenkov, A., Kusenko, A., Leichter, J. J., Lenihan, H., Magoulas, A., Martinez, N., Meyer, C., Stoll, B., Swalla, B., Tartakovsky, D. M., Teavai, H. Turyshev, M. S., Valdovinos, F., Williams, R, Wood, Spencer, and IDEA Consortium. Simulating social-ecological systems: the Island Digital Ecosystem Avatars (IDEA) consortium. (2016) GigaScience, 5, 1.
15. Kuparinen, A., Boit, A., Valdovinos, F. S., Lassaux, H., Martinez, N. D. (2016) Fishing-induced life-history changes degrade and destabilize harvested ecosystems. Scientific Reports, 6:22245, DOI: 10.1038/srep22245.
13. Vázquez, D. P., Ramos‐Jiliberto, R., Urbani, P., & Valdovinos, F. S. (2015) A conceptual framework for studying the strength of plant–animal mutualistic interactions. Ecology Letters, 18, 385-400.
12. Smith-Ramírez C., Ramos-Jiliberto R., Valdovinos F.S., Martínez P., Castillo J.A., Armesto J.J. (2014) Decadal trends in the pollinator assemblage of Eucryphia cordifolia in Chilean rainforests. Oecologia, 176, 157-169.
11. Valdovinos, F.S., Moisset de Espanés, P., Flores J.D, Ramos-Jiliberto, R. (2013) Adaptive foraging allows the maintenance of biodiversity of pollination networks. Oikos 122: 907-917.
10. Ramos-Jiliberto, R., Valdovinos, F.S., Moisset de Espanés, P., Flores J.D. (2012) Topological plasticity increases robustness of mutualistic networks. Journal of Animal Ecology 81: 896-904.
9. Ramos-Jiliberto, R., Valdovinos, F.S., Arias, J., Alcaraz, C., García-Bertho, E. (2011) A network-based approach to the analysis of ontogenetic diet shift: an example with the endangered fish Aphanius iberus. Ecological Complexity 8: 123–129.
8. Ramos-Jiliberto, R., Domínguez, D., Espinoza, C., López, G., Valdovinos, F.S., Medel, R., Bustamante, R.O. (2010) Topology of Andean plant-pollinator networks along an altitudinal gradient. Ecological Complexity 7: 86-90.
7. Valdovinos, F.S., Ramos-Jiliberto, R., Urbani, P., Garay-Narváez, Dunne, J.A. (2010b) Consequences of adaptive behavior for the structure and dynamics of food webs. Ecology Letters 13: 1546–1559.
6. Valdovinos, F.S., Urbani, P, Ramos-Jiliberto, R. (2010a) Analysis of the consequences of individual adaptive behavior on population stability: The case of optimal foraging. Revista Chilena de Historia Natural 83: 207-218.
5. Ramos-Jiliberto, R., Oyanedel, J.P., Vega-Retter, C., Valdovinos, F.S. (2009b) Nested structure of plankton communities from Chilean freshwaters. Limnologica 39: 319-324.
4. Ramos-Jiliberto, R., Albornoz, A., Valdovinos, F.S., Smith-Ramírez, C., Arim, M., Armesto, J., Marquet, P. (2009a) A network analysis of plant-pollinator interactions in temperate rain forests of Chiloé Island, Chile. Oecologia. 160: 697-706.
3. Valdovinos, F.S., Ramos-Jiliberto, R., Flores, J.D., Espinoza, C., López, G. (2009b) Structure and dynamics of pollination networks: the role of alien plants. Oikos 118: 1190-1200.
2. Valdovinos, F.S., Chiappa, E., Simonetti, J.A. (2009a) Nestedness of bee assemblages in an endemic South American forest: the role of pine matrix and small fragments. Journal of Insect Conservation 13: 449-452.
1. Guerrero-Bosagna, C., Sabat, P., Valdovinos, F.S., Valladares, L. and Clark, S.J. Epigenetic and Phenotypic Changes Derived from a Continuous Pre and Post Natal Dietary Exposure to Environmental Estrogens in an Experimental Population of Mice (2008) BMC physiology, 8:10, doi: 10.1186/1472-6793-8-17. Available in http://www.biomedcentral.com/1472-6793/8/17.
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
# pyeq2 is a collection of equations expressed as Python classes
#
# Copyright (C) 2013 James R. Phillips
# 2548 Vera Cruz Drive
# Birmingham, AL 35235 USA
#
# email: zunzun@zunzun.com
#
# License: BSD-style (see LICENSE.txt in main source directory)
import sys, os
if os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..') not in sys.path:
sys.path.append(os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..'))
import pyeq2
import numpy
numpy.seterr(all= 'ignore')
import pyeq2.Model_2D_BaseClass
class Geometric_Modified(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Geometric Modified"
_HTML = 'y = a * x<sup>(b/x)</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
independentData1CannotContainBothPositiveAndNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * numpy.power(x_in, (b/x_in))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(x_in, (b/x_in));\n"
return s
class PowerA_Modified(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Power A Modified"
_HTML = 'y = a * b<sup>x</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def __init__(self, inFittingTarget = 'SSQABS', inExtendedVersionName = 'Default'):
pyeq2.Model_2D_BaseClass.Model_2D_BaseClass.__init__(self, inFittingTarget, inExtendedVersionName)
self.lowerCoefficientBounds = [None, 0.0]
self.extendedVersionHandler.AppendAdditionalCoefficientBounds(self)
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * numpy.power(b, x_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(b, x_in);\n"
return s
class PowerA_Modified_Transform(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Power A Modified Transform"
_HTML = 'y = a * b<sup>cx + d</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
try:
temp = a * numpy.power(b, c * x_in + d)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(b, c * x_in + d);\n"
return s
class PowerB_Modified(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Power B Modified"
_HTML = 'y = a<sup>ln(x)</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.LogX(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_LogX = inDataCacheDictionary['LogX'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = numpy.power(a, x_LogX)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = pow(a, log(x_in));\n"
return s
class PowerB_Modified_Transform(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Power B Modified Transform"
_HTML = 'y = a<sup>ln(bx + c)</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = numpy.power(a, numpy.log(b * x_in + c))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = pow(a, log(b * x_in + c));\n"
return s
class PowerC_Modified(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Power C Modified"
_HTML = 'y = (a + x)<sup>b</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = numpy.power(a + x_in, b)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = pow(a + x_in, b);\n"
return s
class PowerC_Modified_Transform(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Power C Modified Transform"
_HTML = 'y = (a + bx)<sup>c</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = numpy.power(a + b * x_in, c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = pow(a + b * x_in, c);\n"
return s
class PowerLawExponentialCutoff(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Power Law With Exponential Cutoff"
_HTML = 'p(k) = C * k<sup>(-T)</sup> * exp(-k/K)'
_leftSideHTML = 'p(k)'
_coefficientDesignators = ['C', 'T', 'K']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
C = inCoeffs[0]
T = inCoeffs[1]
K = inCoeffs[2]
try:
temp = C * numpy.power(x_in, -1.0 * T) * numpy.exp(-1.0 * x_in / K)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = C * pow(x_in, -1.0 * T) * exp(-1.0 * x_in / K);\n"
return s
class PowerRoot(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Root"
_HTML = 'y = a<sup>(1.0/x)</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[-1.0]), [-1.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
PowX_Neg1 = inDataCacheDictionary['PowX_-1.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = numpy.power(a, PowX_Neg1)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = pow(a, (1.0/x_in));\n"
return s
class SimplePower(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Simple Power"
_HTML = 'y = x<sup>a</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = False
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = numpy.power(x_in, a)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = pow(x_in, a);\n"
return s
class StandardGeometric(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Standard Geometric"
_HTML = 'y = a * x<sup>bx</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * numpy.power(x_in, (b*x_in))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(x_in, (b*x_in));\n"
return s
class StandardPower(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Standard Power"
_HTML = 'y = a * x<sup>b</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = False
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * numpy.power(x_in, b)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(x_in, b);\n"
return s
class XShiftedPower(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "X Shifted Power"
_HTML = 'y = a * (x-b)<sup>c</sup>'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * numpy.power((x_in-b), c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * pow((x_in-b), c);\n"
return s
|
Jose Mourinho and Arsene Wenger could be set for dramatic returns to football management in the coming weeks as they are already in talks with a handful of clubs, it has emerged.
Mourinho is billed to hold talks with Inter Milan director BeppeMarotta over becoming manager of the Italian club for a second time.
The sacked Manchester United boss is still adored by Inter fans having delivered the treble in 2010.
Mourinho, 56, was dismissed from his post at Old Trafford on December 18 and is looking at his route back into management, holding talks with Inter on Thursday, according to the Mirror.
He has already turned down the chance to take over Benfica and is understood to be content with assessing his options until the summer.
Inter would welcome Mourinho back with open arms and his legacy lives on after he delivered the Serie A, Copa Italia and Champions League nine years ago.
Antonio Conte is also a potential candidate with current boss, Luciano Spalletti, under mounting pressure.
Inter are third in the Italian top flight but remain a massive 20 points adrift of leaders Juventus.
There is a feeling that Mourinho could help close that gap, one echoed by former Inter chairman, Massimo Moratti.
“Would I like Mourinho back at Inter? Yes, I’m very fond of him,” he said.
“However, we’ll give time to Spalletti to prove his worth. Diego Simeone? I love him too, but Mou is special,” he added.
Wenger, meanwhile is said to have four offers on the table for him already, even though none of those have come from Premier League sides.
Report suggests that the Frenchman is not willing to take a job in the English top flight as it would put him in too close contact with Arsenal.
Paris Saint-Germain are one of the quarter of clubs eyeing his services. They are thought to view him as a potential director of football.
There is also a national team, who have made him an offer to be their new coach.
And two other ‘leading European clubs’ have made proposals to Wenger as they bid to make him their manager.
Bayern Munich, AC Milan and Real Madrid have all previously been credited with an interest in appointing him.
|
from setuptools import setup, find_packages
from sys import version_info
def install_requires():
requires = [
'traitlets>=4.1',
'six>=1.9.0',
'pyyaml>=3.11',
]
if (version_info.major, version_info.minor) < (3, 4):
requires.append('singledispatch>=3.4.0')
return requires
def extras_require():
return {
'test': [
'tox',
'pytest>=2.8.5',
'pytest-cov>=1.8.1',
'pytest-pep8>=1.0.6',
'click>=6.0',
],
}
def main():
setup(
name='straitlets',
# remember to update straitlets/__init__.py!
version='0.3.3',
description="Serializable IPython Traitlets",
author="Quantopian Team",
author_email="opensource@quantopian.com",
packages=find_packages(include='straitlets.*'),
include_package_data=True,
zip_safe=True,
url="https://github.com/quantopian/serializable-traitlets",
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: IPython',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python',
],
install_requires=install_requires(),
extras_require=extras_require()
)
if __name__ == '__main__':
main()
|
The IRES - 633rd International Conference on Food and Agricultural Engineering (ICFAE) aimed at presenting current research being carried out in that area and scheduled to be held on 17th - 18th June, 2019 in Orlando, USA . The idea of the conference is for the scientists, scholars, engineers and students from the Universities all around the world and the industry to present ongoing research activities, and hence to foster research relations between the Universities and the industry. This conference provides opportunities for the delegates to exchange new ideas and application experiences face to face, to establish business or research relations and to find global partners for future collaboration.
|
import sys
import numpy as np
import time
import theano
import theano.tensor as T
def class_acc(hat_y, y_ref):
'''
Computes percent accuracy and log probability given estimated and reference
class indices
'''
# Check probability of devel set
pred = hat_y[y_ref, np.arange(y_ref.shape[0])]
p_dev = np.sum(np.log(pred))
# Check percent correct classification on the devel set
cr = np.sum((np.argmax(hat_y, 0) == y_ref).astype(int))*1.0/y_ref.shape[0]
return (cr, p_dev)
def sanity_checks(batch_up, n_batch, bsize, lrate, train_set):
if batch_up:
if not n_batch:
raise ValueError, ("If you use compiled batch update you need to "
"specify n_batch")
if bsize or lrate or train_set:
raise ValueError, ("If you use compiled batch update you can not"
"specify bsize, lrate and train_set")
else:
if not bsize or not lrate or not train_set:
raise ValueError, ("If compiled batch not used you need to specity"
"bsize, lrate and train_set")
def SGD_train(model, n_iter, bsize=None, lrate=None, train_set=None,
batch_up=None, n_batch=None, devel_set=None, model_dbg=None):
# SANITY CHECKS:
sanity_checks(batch_up, n_batch, bsize, lrate, train_set)
if not batch_up:
train_x, train_y = train_set
# Number of mini batches
n_batch = train_x.shape[1]/bsize + 1
# Check for Theano vars
if getattr(model, "_forward", None):
shared_vars = True
else:
shared_vars = False
# For each iteration run backpropagation in a batch of examples. For
# each batch, sum up all gradients and update each weights with the
# SGD rule.
prev_p_devel = None
prev_p_train = None
for i in np.arange(n_iter):
# This will hold the posterior of train data for each epoch
p_train = 0
init_time = time.clock()
for j in np.arange(n_batch):
if batch_up:
# Compiled batch update
p_train += -batch_up(j)
else:
# Manual batch update
# Mini batch
batch_x = train_x[:, j*bsize:(j+1)*bsize]
batch_y = train_y[j*bsize:(j+1)*bsize]
# Get gradients for each layer and this batch
nabla_params = model.grads(batch_x, batch_y)
# Update each parameter with SGD rule
for m in np.arange(len(model.params)):
if shared_vars:
# Parameters as theano shared variables
model.params[m].set_value(model.params[m].get_value()
- lrate*np.array(nabla_params[m]))
else:
# Parameters as numpy array
model.params[m] -= lrate*nabla_params[m]
# INFO
sys.stdout.write("\rBatch %d/%d (%d%%) " %
(j+1, n_batch, (j+1)*100.0/n_batch))
sys.stdout.flush()
batch_time = time.clock() - init_time
# Check probability of devel set
if devel_set:
corr, p_devel = class_acc(model.forward(devel_set[0]), devel_set[1])
if prev_p_devel:
delta_p_devel = p_devel - prev_p_devel
else:
delta_p_devel = 0
prev_p_devel = p_devel
if prev_p_train:
delta_p_train = p_train - prev_p_train
else:
delta_p_train = 0
prev_p_train = p_train
validation_time = time.clock() - init_time - batch_time
sys.stdout.write(" Epoch %2d/%2d in %2.2f seg\n" % (i+1, n_iter, batch_time))
if devel_set:
sys.stdout.write("Logpos devel: %10.1f (delta: %10.2f) Corr devel %2.2f\n\n" % (p_devel, delta_p_devel, corr))
print ""
|
Apparently it has worked rather well for him over the years.
Every day I have choice to make. Happy. Not happy.
The happiness balance is tedious, constant work. Sometimes I do it well; sometimes I do appallingly.
Today was one of those “not-do-it-so-well” days.
My Monday got turned around like the weather and I found myself in the linger of thoughts of all the things I haven’t done with my life, should already have done with my life, and all the in-between mud-and-sling scenarios.
Clearly the long tooth of winter needs to go the dentist and have a root canal.
I will gladly pay the bill.
I hate to admit it but I’ve imagined slipping the dentist an extra $50 bucks to forgo Novocaine when removing said long tooth—just to emphasize how much of a pain in the posterior Old Man Winter has been this year.
A few weeks ago, the crusty old curmudgeon spawned a rebel in me that fought the hard fight against any further snow blowing of the driveway or shovelling of the back step, no matter how much snowfall arrived.
I’ve been a fan of TED for years. Some of the greatest lessons I’ve learned have been from TED, a global platform of speakers who share their ideas—be they funny, courageous, ingenious, inspiring, or informative—in talks of 18 minutes or less.
Obviously he never spent a long, cold winter cooped up in this part of the country awaiting signs of spring.
I don’t know about you but my patience is pooped out and my disposition has run amuck. I’m sick of the cold and tired of defrosting the ends of my fingers each morning.
My heart leapt. I tried to contain my inner child-like glee because I’d been hoping he’d say that for weeks now.
The late great Duke Ellington had the right idea.
Sadly, I can pout with the best of them, but I cannot read, write, nor play a lick of music.
When I pout, all my energy goes into finding a piece of chocolate I stashed in the cupboard, which then leads to the blues because I always eat more of it than I should.
Hmmm, not exactly the kind of productive energy transference Mr. Ellington was talking about.
“I’d stop eating chocolate but I’m no quitter.” Now that’s more like me.
Where have I been all my life that I didn’t try ice fishing until now? I live in Northwestern Ontario for crying out loud!
And not only did I just have my inaugural experience with the sport, but it also was only the second time I’d ever been in a vehicle on a frozen lake ice road—and the first time I’d gotten out and walked on water I might sail on one day or drive a motor boat across.
I turned my head in his direction with a curious stare of pause.
“An anniversary? No. No anniversary,” I replied, returning to look upon the road to home.
My intuitive friend is a gem—and he knows what is coming. Alas, so do I. Yet I was trying to convince myself that two years post would allow the day to pass without feeling it so much.
What if I had turned left in the hallway at the college I was attending in the fall of 1980?
What if I had turned left and found a lounge chair in a window vestibule, and plopped myself there during a cancelled class instead of going to the cafeteria for a big cornmeal muffin and a coffee.
If I had turned left on that November morning some 33 years ago, how would my life be different today?
|
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Björn Larsson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class BiCircular(object):
def __init__(self, enumerator):
self._index = 0
self._enum = enumerator
def next(self):
self._nextIndex(True)
return self._enum[self._index]
def previous(self):
self._nextIndex(False)
return self._enum[self._index]
def _nextIndex(self, next):
inc = 1 if next else -1
self._index += inc
if self._index < 0:
self._index = len(self._enum) - 1
elif self._index >= len(self._enum):
self._index = 0
|
The European Union is appealing to Greece to meet several outstanding conditions by next week so that more funds can be released to help bolster the debt-strapped country's economy.
Top EU economy official Pierre Moscovici told European lawmakers Tuesday that "it's of capital importance to be able to finish this by Monday."
Successfully meeting the milestones would pave the way for the disbursement to Greece of 2.8 billion euros ($3.1 billion).
|
# Copyright 2014 OpenStack Foundation
# Copyright 2014 Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations. This test case reads the configuration
file test_migrations.conf for database connection settings
to use in the tests. For each connection found in the config file,
the test case runs a series of test cases to ensure that migrations work
properly.
There are also "opportunistic" tests for both mysql and postgresql in here,
which allows testing against mysql and pg) in a properly configured unit
test environment.
For the opportunistic testing you need to set up a db named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost.
The test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands:
sudo -u postgres psql
postgres=# create user openstack_citest with createdb login password
'openstack_citest';
postgres=# create database openstack_citest with owner openstack_citest;
"""
from oslo_config import cfg
from oslo_db.sqlalchemy import utils as db_utils
from storyboard.tests.db.migration import test_migrations_base as base
CONF = cfg.CONF
class TestMigrations(base.BaseWalkMigrationTestCase, base.CommonTestsMixIn):
"""Test sqlalchemy-migrate migrations."""
USER = "openstack_citest"
PASSWD = "openstack_citest"
DATABASE = "openstack_citest"
def __init__(self, *args, **kwargs):
super(TestMigrations, self).__init__(*args, **kwargs)
def setUp(self):
super(TestMigrations, self).setUp()
def assertColumnExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertIn(column, t.c)
def assertColumnNotExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertNotIn(column, t.c)
def assertIndexExists(self, engine, table, index):
t = db_utils.get_table(engine, table)
index_names = [idx.name for idx in t.indexes]
self.assertIn(index, index_names)
def assertIndexMembers(self, engine, table, index, members):
self.assertIndexExists(engine, table, index)
t = db_utils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = idx.columns.keys()
break
self.assertEqual(sorted(members), sorted(index_columns))
def _pre_upgrade_001(self, engine):
# Anything returned from this method will be
# passed to corresponding _check_xxx method as 'data'.
pass
def _check_001(self, engine, data):
self.assertColumnExists(engine, 'users', 'created_at')
self.assertColumnExists(engine, 'users', 'last_login')
self.assertColumnExists(engine, 'teams', 'updated_at')
self.assertColumnExists(engine, 'teams', 'name')
def _check_002(self, engine, data):
self.assertColumnExists(engine, 'users', 'openid')
self.assertColumnNotExists(engine, 'users', 'password')
def _check_003(self, engine, data):
self.assertColumnExists(engine, 'projects', 'is_active')
self.assertColumnExists(engine, 'stories', 'is_active')
self.assertColumnExists(engine, 'tasks', 'is_active')
def _check_004(self, engine, data):
self.assertColumnExists(engine, 'projects', 'description')
def _check_005(self, engine, data):
self.assertColumnExists(engine, 'projects', 'is_active')
self.assertColumnExists(engine, 'stories', 'is_active')
self.assertColumnExists(engine, 'tasks', 'is_active')
def _check_006(self, engine, data):
self.assertColumnNotExists(engine, 'users', 'first_name')
self.assertColumnNotExists(engine, 'users', 'last_name')
self.assertColumnExists(engine, 'users', 'full_name')
def _pre_upgrade_007(self, engine):
self.assertColumnNotExists(engine, 'comments', 'is_active')
def _check_007(self, engine, data):
self.assertColumnExists(engine, 'comments', 'is_active')
|
Inspirational Models Of Antique Brass Flush Mount Ceiling Light – From the thousand Pics online about antique brass flush mount ceiling light, we all selects the top libraries with perfect resolution only for our readers, and of course,this photos is actually one of graphics collections under our good-looking pictures gallery with regards to Inspirational Models Of Antique Brass Flush Mount Ceiling Light. I feel you will want it.
submitted simply by Kenneth Morrison at 2018-03-25 00:30:18. To view all pictures with Inspirational Models Of Antique Brass Flush Mount Ceiling Light images gallery don’t forget to adhere to this particular our web URL.
|
# -*- encoding: utf-8 -*-
from django.shortcuts import render
from django.contrib.auth import login, authenticate, logout
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.views.decorators.csrf import csrf_exempt
import json
from forms import *
from models import *
from allauth.socialaccount.models import SocialToken
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('landing'))
def landing(request):
return render(request, 'app/landing.html',locals())
def login(request, empresa, token):
if not request.user.is_anonymous():
return HttpResponseRedirect(reverse('index'))
form = LoginForm()
if request.method == "POST":
form = LoginForm(request.POST)
if form.is_valid():
user = form.cleaned_data["user"]
password = form.cleaned_data["password"]
company = form.cleaned_data["company"]
username = company + '_' + user
if not company:
username = 'ZaresApp_Castellanos'
access = authenticate(username=username, password=password)
if access is not None:
if access.is_active:
login(request, access)
return HttpResponseRedirect(reverse('index'))
else:
mensaje="Usuario esta desactivado"
else:
mensaje="Usuario o contraseña incorrecto"
else:
print form.errors
mensaje="Usuario o contraseña incorrecto"
return render(request, 'app/login.html',locals())
def index(request):
return render(request, 'app/sell_point.html',locals())
@csrf_exempt
def menus(request):
form = MenusForm()
if request.method == "POST":
if 'jstree' in request.POST:
menu = Menu.objects.filter(id = request.POST.get('menu_id'))[0]
#menu.nivel = int(request.POST.get('level')) + 1
if request.POST.get('depend_id'):
menu.parent = Menu.objects.filter(id = request.POST.get('depend_id'))[0]
else:
menu.parent = None
menu.save()
return HttpResponse(json.dumps({"data": "true"}),content_type="application/json")
if 'add_menu' in request.POST:
form = MenusForm(request.POST)
if form.is_valid():
obj = form.save(commit = False)
obj.company = request.user.company
obj.save()
messages.success(request, 'Guardado con éxito!')
form = MenusForm()
else:
messages.error(request, 'Algunos datos en tu formulario estan incorrectos')
menus = Menu.objects.filter(company = request.user.company)
return render(request, 'app/menus.html',locals())
def menus_edit(request,slug, ide):
menu = Menu.objects.filter(slug=slug, ide=ide, company = request.user.company)[0]
form = MenusForm(instance = menu)
if request.POST:
form = MenusForm(request.POST, instance=menu)
if form.is_valid():
obj = form.save(commit = False)
obj.company = request.user.company
obj.save()
messages.success(request, 'Modificado con éxito!')
return HttpResponseRedirect( reverse('mis_menus' ) )
else:
messages.error(request, 'Algunos datos en tu formulario estan incorrectos')
menu = Menu.objects.filter(slug=slug, ide=ide, company = request.user.company)[0]
return render(request, 'app/menus_edit.html',locals())
def menus_delete(request,slug, ide):
Menu.objects.filter(slug=slug, ide=ide, company = request.user.company)[0].delete()
messages.warning(request, 'El menú se eliminó con exito')
registry = Registry()
registry.name = "Se eliminó menú"
registry.code = "delete_menu"
registry.user = request.user.user
registry.company = request.user.company
registry.save()
return HttpResponseRedirect( reverse('menus') )
def sellpoints(request):
form = Sell_pointForm()
if request.method == "POST":
if 'add_sell_point' in request.POST:
form = Sell_pointForm(request.POST, request.FILES)
if form.is_valid():
points = len(Sell_point.objects.filter(company=request.user.company))
if points >= request.user.company.sell_point_limits:
messages.error(request, 'Llegaste al limite de tus puntos de venta')
else:
obj = form.save(commit=False)
obj.company = request.user.company
obj.create_by = request.user
obj.save()
messages.success(request, 'Nuevo punto de venta dado de alta con éxito')
else:
messages.error(request, 'Algunos datos en tu formulario estan incorrectos')
pvs = Sell_point.objects.filter(company = request.user.company)
return render(request, 'app/sellpoints.html',locals())
def sellpoints_edit(request, ide):
sell_point = Sell_point.objects.filter(ide=ide)[0]
form = Sell_pointForm(instance = sell_point)
if request.POST:
form = Sell_pointForm(request.POST, request.FILES, instance=sell_point)
if form.is_valid():
form.save()
messages.success(request, 'Punto de venta modificado con éxito')
return HttpResponseRedirect( reverse('mis_puntos_de_venta' ) )
else:
messages.error(request, 'Algunos datos en tu formulario estan incorrectos')
return render(request, 'app/mis_puntos_de_venta_edit.html',locals())
def products(request):
return render(request, 'app/products.html',locals())
def products_add(request):
form = ProductForm()
#if request.POST:
#form = Sell_pointForm(request.POST, request.FILES)
#if form.is_valid():
#points = len(Sell_point.objects.filter(company=request.user.company))
#if points >= request.user.company.sell_point_limits:
#messages.error(request, 'Llegaste al limite de tus puntos de venta')
#else:
#obj = form.save(commit=False)
#obj.company = request.user.company
#obj.create_by = request.user
#obj.save()
#messages.success(request, 'Nuevo punto de venta dado de alta con éxito')
#return HttpResponseRedirect( reverse('mis_puntos_de_venta' ) )
#else:
#messages.error(request, 'Algunos datos en tu formulario estan incorrectos')
return render(request, 'app/products_add.html',locals())
|
Minnesota sits in ninth place in the Western Conference, with a record of 5-9-3. They are currently on 18 points from 17 matches, four points below the line and only two from the bottom. Their -14 goal differential (24 for, 38 against) is the second-worst in MLS, besting only Real Salt Lake’s -20. Minnesota does lead the league in one category – Goals Against (38).
The early part of the season was not kind to the MLS newcomers. Outscored 11-2 in their first two matches, coach Adrian Heath found his club behind the 8-ball early, with very clear deficiencies in the defending half and not a lot of time (or personnel) to make changes. A 2-2 draw against Colorado in their third match would see Minnesota nab their first points of the year, only to be beaten down once again the following week by a brutal 5-2 score line at New England.
That’s been the way of things for the expansion side when playing away from home. The Loons are being outscored by a ridiculous 23-9, and are winless on the road. Minnesota’s back line has of course struggled in their inaugural MLS season, but the strength of this team has been it’s attacking quartet of Miguel Ibarra, Johan Venegas, and Kevin Molino topped off with Christian Ramirez at the top. While Molino and his seven goals are familiar to the Boys in Blue, Ramirez will be a new challenge for the City defense. Leading the team in goals with nine, the man they call Batman has found the back of the net three times in his last six matches.
Tactically, the Loons line up in a 4-2-3-1 and try to utilize the wide play of Ibarra and Molino to feed Ramirez. It will be interesting to watch how they adapt to the narrow pitch at Yankee Stadium.
NYCFC has taken points in four of their last five and extended their win streak to a season-best three matches last weekend against Red Bulls. They currently sit third in the East with 30 points from 17 matches with a record of 9-5-3. City is tied for second in the league with 31 goals (Atlanta leads with 33) and are third in MLS with a +11 differential.
New York City will once again find themselves without central midfielder Maxi Moralez, who has returned to training in a limited capacity but is not yet ready for game action. Tommy McNamara has been getting the starting minutes in his place, playing centrally in a 4-2-3-1. The rest of the XI should remain mostly the same as it’s been the last two matches; the notable exceptions being Ronald Matarrita’s return to left back, replacing Derby hero Ben Sweat, and Eirik Johansen‘s return, replacing Gold Cup-bound Sean Johnson.
“If we don’t have the same concentration and focus that we had against Red Bulls, we will not have any chance of winning against Minnesota.
This match has all the makings of a “trap” game – midweek, after an inspiring win and against a weak opponent. The last time NYCFC found themselves going into a trap game was in week 11 against Real Salt Lake – a match they lost 2-1. The starting eleven in that match featured heavy rotation, something we shouldn’t see this time around.
It will be imperative for Alex Ring and Yangel Herrera to deny Ramirez service from the Minnesota midfield – a task they have been up to against tougher opponents. Limiting bad giveaways in the midfield will also be of top importance, as Molino, Ibarra and Venegas have speed to burn and can punish those mistakes effectively. Minnesota has a lot of experience playing from behind, so early goals should not be sat on. The Loon attackers may also try and put pressure on the inexperienced Johansen when trying to distribute, but Maxime Chanot and Alexander Callens should be up to the task of asserting their presence.
|
# -*- coding: utf-8 -*-
#
# QAPI code generation
#
# Copyright (c) 2018-2019 Red Hat Inc.
#
# Authors:
# Markus Armbruster <armbru@redhat.com>
# Marc-André Lureau <marcandre.lureau@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
import errno
import os
import re
from contextlib import contextmanager
from qapi.common import *
from qapi.schema import QAPISchemaVisitor
class QAPIGen:
def __init__(self, fname):
self.fname = fname
self._preamble = ''
self._body = ''
def preamble_add(self, text):
self._preamble += text
def add(self, text):
self._body += text
def get_content(self):
return self._top() + self._preamble + self._body + self._bottom()
def _top(self):
return ''
def _bottom(self):
return ''
def write(self, output_dir):
# Include paths starting with ../ are used to reuse modules of the main
# schema in specialised schemas. Don't overwrite the files that are
# already generated for the main schema.
if self.fname.startswith('../'):
return
pathname = os.path.join(output_dir, self.fname)
odir = os.path.dirname(pathname)
if odir:
try:
os.makedirs(odir)
except os.error as e:
if e.errno != errno.EEXIST:
raise
fd = os.open(pathname, os.O_RDWR | os.O_CREAT, 0o666)
f = open(fd, 'r+', encoding='utf-8')
text = self.get_content()
oldtext = f.read(len(text) + 1)
if text != oldtext:
f.seek(0)
f.truncate(0)
f.write(text)
f.close()
def _wrap_ifcond(ifcond, before, after):
if before == after:
return after # suppress empty #if ... #endif
assert after.startswith(before)
out = before
added = after[len(before):]
if added[0] == '\n':
out += '\n'
added = added[1:]
out += gen_if(ifcond)
out += added
out += gen_endif(ifcond)
return out
class QAPIGenCCode(QAPIGen):
def __init__(self, fname):
super().__init__(fname)
self._start_if = None
def start_if(self, ifcond):
assert self._start_if is None
self._start_if = (ifcond, self._body, self._preamble)
def end_if(self):
assert self._start_if
self._wrap_ifcond()
self._start_if = None
def _wrap_ifcond(self):
self._body = _wrap_ifcond(self._start_if[0],
self._start_if[1], self._body)
self._preamble = _wrap_ifcond(self._start_if[0],
self._start_if[2], self._preamble)
def get_content(self):
assert self._start_if is None
return super().get_content()
class QAPIGenC(QAPIGenCCode):
def __init__(self, fname, blurb, pydoc):
super().__init__(fname)
self._blurb = blurb
self._copyright = '\n * '.join(re.findall(r'^Copyright .*', pydoc,
re.MULTILINE))
def _top(self):
return mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
%(blurb)s
*
* %(copyright)s
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*/
''',
blurb=self._blurb, copyright=self._copyright)
def _bottom(self):
return mcgen('''
/* Dummy declaration to prevent empty .o file */
char qapi_dummy_%(name)s;
''',
name=c_fname(self.fname))
class QAPIGenH(QAPIGenC):
def _top(self):
return super()._top() + guardstart(self.fname)
def _bottom(self):
return guardend(self.fname)
@contextmanager
def ifcontext(ifcond, *args):
"""A 'with' statement context manager to wrap with start_if()/end_if()
*args: any number of QAPIGenCCode
Example::
with ifcontext(ifcond, self._genh, self._genc):
modify self._genh and self._genc ...
Is equivalent to calling::
self._genh.start_if(ifcond)
self._genc.start_if(ifcond)
modify self._genh and self._genc ...
self._genh.end_if()
self._genc.end_if()
"""
for arg in args:
arg.start_if(ifcond)
yield
for arg in args:
arg.end_if()
class QAPIGenDoc(QAPIGen):
def _top(self):
return (super()._top()
+ '@c AUTOMATICALLY GENERATED, DO NOT MODIFY\n\n')
class QAPISchemaMonolithicCVisitor(QAPISchemaVisitor):
def __init__(self, prefix, what, blurb, pydoc):
self._prefix = prefix
self._what = what
self._genc = QAPIGenC(self._prefix + self._what + '.c',
blurb, pydoc)
self._genh = QAPIGenH(self._prefix + self._what + '.h',
blurb, pydoc)
def write(self, output_dir):
self._genc.write(output_dir)
self._genh.write(output_dir)
class QAPISchemaModularCVisitor(QAPISchemaVisitor):
def __init__(self, prefix, what, user_blurb, builtin_blurb, pydoc):
self._prefix = prefix
self._what = what
self._user_blurb = user_blurb
self._builtin_blurb = builtin_blurb
self._pydoc = pydoc
self._genc = None
self._genh = None
self._module = {}
self._main_module = None
@staticmethod
def _is_user_module(name):
return name and not name.startswith('./')
@staticmethod
def _is_builtin_module(name):
return not name
def _module_dirname(self, what, name):
if self._is_user_module(name):
return os.path.dirname(name)
return ''
def _module_basename(self, what, name):
ret = '' if self._is_builtin_module(name) else self._prefix
if self._is_user_module(name):
basename = os.path.basename(name)
ret += what
if name != self._main_module:
ret += '-' + os.path.splitext(basename)[0]
else:
name = name[2:] if name else 'builtin'
ret += re.sub(r'-', '-' + name + '-', what)
return ret
def _module_filename(self, what, name):
return os.path.join(self._module_dirname(what, name),
self._module_basename(what, name))
def _add_module(self, name, blurb):
basename = self._module_filename(self._what, name)
genc = QAPIGenC(basename + '.c', blurb, self._pydoc)
genh = QAPIGenH(basename + '.h', blurb, self._pydoc)
self._module[name] = (genc, genh)
self._genc, self._genh = self._module[name]
def _add_user_module(self, name, blurb):
assert self._is_user_module(name)
if self._main_module is None:
self._main_module = name
self._add_module(name, blurb)
def _add_system_module(self, name, blurb):
self._add_module(name and './' + name, blurb)
def write(self, output_dir, opt_builtins=False):
for name in self._module:
if self._is_builtin_module(name) and not opt_builtins:
continue
(genc, genh) = self._module[name]
genc.write(output_dir)
genh.write(output_dir)
def _begin_system_module(self, name):
pass
def _begin_user_module(self, name):
pass
def visit_module(self, name):
if name is None:
if self._builtin_blurb:
self._add_system_module(None, self._builtin_blurb)
self._begin_system_module(name)
else:
# The built-in module has not been created. No code may
# be generated.
self._genc = None
self._genh = None
else:
self._add_user_module(name, self._user_blurb)
self._begin_user_module(name)
def visit_include(self, name, info):
relname = os.path.relpath(self._module_filename(self._what, name),
os.path.dirname(self._genh.fname))
self._genh.preamble_add(mcgen('''
#include "%(relname)s.h"
''',
relname=relname))
|
easyJet, Europe’s leading airline, is today showcasing the Airbus A320neo at Newcastle International Airport. The neo brings significant environmental and operational benefits – up to 15 percent savings in fuel burn and CO2 emissions, and a reduced noise footprint of 50 percent on take-off and landing phase. Crucially, this means the quieter aircraft minimises its noise impact on local communities.
easyJet operates to more destinations than any other airline at Newcastle, serving 15 destinations from the airport. The airline is excited to be introducing the neo to some of these routes this year, as our fleet of A320neos increases.
“We are delighted to be showcasing our new Airbus A320neo in Newcastle today. As Newcastle Airport’s biggest airline, we are committed to sustainable growth. We believe that within the current cap, there is still room to increase efficiency – for example by incentivising higher load factors as well as the use of modern and efficient aircraft like the Airbus A320neo.
“We’re thrilled to be part of easyJet’s neo story and are delighted that the airline chose to showcase the aircraft here today. At Newcastle Airport, we’re committed to ensuring we’re using the latest in technology right across the airport, and the aircraft that operate here are no different.
“I am delighted that easyJet has chosen Newcastle as one of the first bases to showcase the new A320NEO aircraft. Newcastle International Airport is critical to the North East region as a generator of jobs and millions of pounds for the local economy.
|
"""Object-Rlational Mapping classess, based on Sqlalchemy, for representing the
dataset, partitions, configuration, tables and columns.
Copyright (c) 2015 Civic Knowledge. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
__docformat__ = 'restructuredtext en'
from six import iteritems
from sqlalchemy import event
from sqlalchemy import Column as SAColumn, Integer
from sqlalchemy import Text, String, ForeignKey
from ambry.identity import ObjectNumber
from . import Base, MutationDict, JSONEncodedObj
class Code(Base):
"""Code entries for variables."""
__tablename__ = 'codes'
c_vid = SAColumn('cd_c_vid', String(20), ForeignKey('columns.c_vid'), primary_key=True,
index=True, nullable=False)
d_vid = SAColumn('cd_d_vid', String(20), ForeignKey('datasets.d_vid'), primary_key=True,
nullable=False, index=True)
key = SAColumn('cd_skey', String(20), primary_key=True, nullable=False, index=True) # String version of the key, the value in the dataset
ikey = SAColumn('cd_ikey', Integer, index=True) # Set only if the key is actually an integer
value = SAColumn('cd_value', Text, nullable=False) # The value the key maps to
description = SAColumn('cd_description', Text)
source = SAColumn('cd_source', Text)
data = SAColumn('cd_data', MutationDict.as_mutable(JSONEncodedObj))
def __init__(self, **kwargs):
for p in self.__mapper__.attrs:
if p.key in kwargs:
setattr(self, p.key, kwargs[p.key])
del kwargs[p.key]
if self.data:
self.data.update(kwargs)
def __repr__(self):
return "<code: {}->{} >".format(self.key, self.value)
def update(self, f):
"""Copy another files properties into this one."""
for p in self.__mapper__.attrs:
if p.key == 'oid':
continue
try:
setattr(self, p.key, getattr(f, p.key))
except AttributeError:
# The dict() method copies data property values into the main dict,
# and these don't have associated class properties.
continue
@property
def insertable_dict(self):
d = {('cd_' + k).strip('_'): v for k, v in iteritems(self.dict)}
# the `key` property is not named after its db column
d['cd_skey'] = d['cd_key']
del d['cd_key']
return d
@staticmethod
def before_insert(mapper, conn, target):
target.d_vid = str(ObjectNumber.parse(target.c_vid).as_dataset)
event.listen(Code, 'before_insert', Code.before_insert)
|
"Citizens in Louisiana deserve resources to help heal and resolve the ills they face. Future generations are depending on us to make the world a better place, and I possess the insight, values and passion to represent everyone. I PROMISE TO ALWAYS BE TRUTHFUL, TRANSPARENT, AND PUT PEOPLE FIRST!"
I am Sandra “Candy” Shoemaker-Christophe born to high school sweethearts and raised with my sister in the small town of Clinton, Louisiana in a loving home in a blue-collar neighborhood. My community was poor, and this united it more than racially segregating laws could divide it. Neighbors were caring and protective of family. My instincts to lead were nurtured early as I immersed myself in school extracurricular activities; however, my career course to counsel and support victims of broken families quickened when my parents were divorced during my sophomore year in high school. In May 1986, I graduated with 79 of my peers from Clinton High School who had elected me as President of my senior class.
I attended a small private Baptist college on an academic scholarship. Louisiana College is where I studied Psychology and Sociology. There I received my bachelors' degrees as a double major. I then chose to acquire a master’s degree from Grambling State University in Social Work with a minor concentration in Business Administration. I continued to become a Licensed Clinical Social Worker and a Licensed Addiction Counselor. While attending Grambling State University, I enjoyed the cultural environment unique to an HBCU (historically black college and university).
Following academic and certification training, my employment experiences include counseling and administering programs at Rapides Parish School Board, La. Dept of Health & Hospitals, La. Dept. of Corrections, V.A. Medical Center, Christophe Counseling & Associates. My experiences have permitted great intimate insight into the multiple and complex needs of all people, regardless of social economic status, color or cultural background. I have been able to sit on both sides of the desk as a consumer and provider of professional services.
I am the founder of Re-Entry Solutions, a nonprofit organization. With the support of my husband, Andre, I served as an unpaid Executive Director and Fundraiser for eight years. Re-Entry Solutions is dedicated to assisting returning citizens to achieve success as they return to their communities. Empowerment services include employment assistance, housing, and local resource connecting. I have been recognized for excellence in providing supportive services: Hometown Hero Award, Central Louisiana Human Relations Award, Delta Economic Development Award, Louisiana Department of Corrections Secretary Seal Award, featured by Louisana Public Broadcasting and a 2012 Louisiana Justice Hall of Fame inductee. I know that our communities are hurting. I believe that citizens of our state should not have to live life in a state of crisis. Systematic oppression in our government is wrong and better use of our tax dollars will result in meaningful resources to help heal the ills that reduce the empowerment of others.
Future generations are depending on us to make this world a better place for those who will follow! I look for unifying elements in people and circumstances; I believe this allows us to recognize the value of all contributors to society and realize the sacrifices of our forefathers. I am a woman with conviction and follow strong spiritual guidance. As a member of The Pentecostals of Alexandria for the past twenty years, I believe that serving others is an honor and a privilege. I am Candy Christophe, a candidate offering to serve my state in the 5th Congressional District of Louisiana. I believe that I possess the insight, values, skill sets, and passion to represent all people. I am not asking, “What can my country do for me?" Instead, I am asking, "What can I do for my country?” I ask for your prayers, support, and vote as we unite to put, "People First!"
Paid for by the Campaign Committee to elect Sandra "Candy" Christophe.
|
# -*- coding: utf-8 -*-
# WebPyMail - IMAP python/django web mail client
# Copyright (C) 2008 Helder Guerreiro
## This file is part of WebPyMail.
##
## WebPyMail is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## WebPyMail is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with WebPyMail. If not, see <http://www.gnu.org/licenses/>.
#
# Helder Guerreiro <helder@paxjulia.com>
#
# $Id$
#
from django import template
from django.template import resolve_variable
from django.utils.translation import gettext_lazy as _
register = template.Library()
# Tag to retrieve a message part from the server:
@register.tag(name="spaces")
def do_spaces(parser, token):
try:
# split_contents() knows not to split quoted strings.
tag_name, num_spaces = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError, \
"%r tag requires one arg: num_spaces" \
% token.contents.split()[0]
return PartTextNode(num_spaces)
class PartTextNode(template.Node):
def __init__(self, num_spaces):
self.num_spaces = num_spaces
def render(self, context):
num_spaces = resolve_variable(self.num_spaces, context)
try:
num_spaces = int(num_spaces)
except ValueError:
raise template.TemplateSyntaxError, \
"%r tag's num_spaces argument must be an int" % tag_name
return ' ' * num_spaces
|
If you are looking for a HVAC Company, Air Conditioning Repair Service or Heating Repair Service in Lazy Lake, you are in the right place. The FindLocal-HVAC Directory displays Lazy Lake, FL HVAC, Air Conditioning & Heating Repair Services. We also display HVAC, Air Conditioning Repair Services & Heating Repair Services throughout the United States.
Our goal is to provide a great experience for consumers looking for HVAC, Air Conditioning Repair Services or Heating Repair Services in Lazy Lake, FL. We keep our directory free of clutter and unnecessary links and ads. Consumers can quickly locate Lazy Lake HVAC, Air Conditioning Repair Services or Heating Repair Services using the FindLocal-HVAC Directory.
Please use our zip code search function at the top of the page to search for HVAC Companies, Air Conditioning Repair Services or Heating Repair Services in your area if Lazy Lake is not the city where you are trying to find a Air Conditioning or Heating Repair Company.
|
from ANPyNetCPU import *
black = vectorf([0,0,0])
white = vectorf([1,1,1])
red = vectorf([1,0,0])
green = vectorf([0,1,0])
blue = vectorf([0,0,1])
trainSet = TrainingSet()
trainSet.AddInput(black)
trainSet.AddInput(white)
trainSet.AddInput(red)
trainSet.AddInput(green)
trainSet.AddInput(blue)
widthMap = 4
heightMap = 1
inpWidth = 3
inpHeight = 1
SOM = SOMNet(inpWidth,inpHeight,widthMap,heightMap)
SOM.SetTrainingSet(trainSet)
SOM.SetLearningRate(0.3)
SOM.Training(1000)
# gets to each input vector the corresponding centroid, eucl. distance and the ID of the BMU
inputv = SOM.GetCentrOInpList()
# gets an ordered list of different centroids with the ID of the corresponding BMU
centroids = SOM.GetCentroidList()
# output for fun
for i in centroids:
print (i)
# .. again
for i in inputv:
print (i)
# Save IDs of the BMUs into a list
IDList = []
for i in inputv:
IDList.append(i.m_iBMUID)
print (IDList)
# Searches the corresponding centroids from the other list based on the index :D
for i in IDList:
for j in centroids:
if i == j.m_iBMUID:
print (j.m_vCentroid)
|
High quality digital and screen print.
Kuhnel are large format POS digital printing specialists that have more than 45 years experience in delivering high quality digital and screen printing solutions at competitive prices. Our digital and screen printing service at Kuhnel Graphics can help to transform the perception of your brand, by producing high quality and effective POS designed to persuade, inform and inspire.
We offer a broad range of products including design, large format digital, screen printing, plus a complete collation, finishing, and distribution service. As a trusted and reliable digital printing company, we pride ourselves in producing graphics that are specifically tailored to your needs. We offer a service that is straightforward, powerful and competitive with satisfaction guaranteed.
What really sets us apart from our competitors however is our commitment to providing exceptional customer service. This means that you will get a consistent, high quality service that delivers your prints on time and commits to our promises.
Kuhnel has more than two decades of experience in producing large format graphics and creative POS displays in a variety of styles, sizes and materials.
Kuhnel is a market leader in producing promotional 2D signage and 3D display stands designed to increase sales and boost brand awareness.
Kuhnel provides state-of-the-art, custom designs that includes a wide variety of banners, graphics, covers and signs designed to capture your target market in the most effective way.
Kuhnel produces an extensive variety of original, bespoke and innovative displays that can be tailored to the specific needs of your business.
Our primary focus is print quality and customer service, achieving tight deadlines and improving cost efficiency. 45 years of retail experience have taught us that timing is crucial – if it’s physically possible, we will deliver on time and on budget!
Copyright © 2017. Kühnel Graphics.
|
# Copyright 2014 Uri Laserson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
def qPCR2quantitation(inputfile,output_formats):
outputbasename = os.path.splitext(os.path.basename(inputfile))[0]
# Learn some things about the data:
# How many curves are there?
ip = open(inputfile,'r')
for line in ip:
if line.startswith('Step'):
# Verify the fields in the line:
fields = line.split(',')
if fields[0] != 'Step' or fields[1] != 'Cycle' or fields[2] != 'Dye' or fields[3] != 'Temp.':
raise ValueError, 'Expected line like: "Step,Cycle,Dye,Temp.,..."'
curve_labels = fields[4:-1] # (skip the above four fields and last extra comma)
break
# What step is the quantitation at?
for line in ip: # advance to data set characterization
if line.strip() == 'Analysis Options':
break
for line in ip:
if line.startswith("Step") and "Quantitation" in line:
line_id = line.split()[1].strip(':')
break
ip.close()
# Create data structures
cycles = []
curves = [[] for curve in curve_labels]
# Load the data
ip = open(inputfile,'r')
for line in ip: # advance to data
if line.startswith('Step'):
break
for line in ip:
if line.strip() == '':
break
if line.split(',')[0] == line_id:
cycles.append(int(line.split(',')[1]))
data = map(float,line.split(',')[4:-1])
for (i,value) in enumerate(data):
curves[i].append(value)
# Make the plots
fig = plt.figure()
ax = fig.add_subplot(111)
for (label,curve) in zip(curve_labels,curves):
ax.plot(cycles,curve,label=label)
ax.legend(loc=2)
ax.set_xlabel('Cycles')
ax.set_ylabel('Fluorescence (a.u.)')
for format in output_formats:
fig.savefig(outputbasename+'.quantitation.'+format)
if __name__ == '__main__':
import sys
import optparse
output_formats = set()
def append_format(option,opt_str,value,parser):
output_formats.add(opt_str.strip('-'))
option_parser = optparse.OptionParser()
option_parser.add_option('--png',action='callback',callback=append_format)
option_parser.add_option('--pdf',action='callback',callback=append_format)
option_parser.add_option('--eps',action='callback',callback=append_format)
(options,args) = option_parser.parse_args()
if len(args) != 1:
raise ValueError, "Must give a single file as input."
output_formats = list(output_formats)
if output_formats == []:
output_formats.append('pdf')
output_formats.append('png')
inputfile = args[0]
qPCR2quantitation(inputfile,output_formats)
|
Sometimes, I’m just not satisified with what my phone comes with my default, such as the messaging app or the file browser. When I first heard about Tomi File Manager, I saw the pictures and was impressed with an app that looks to take file browsing to the next level. Not only was it actually appealing to look at (unlike most file managers), the navigation was easy and everything is categorized very nicely.
The main UI is unique and even provides a quick glimpse at how much space is being used on your phone. I love the Apps tab because you can simply scroll through and check mark multiple apps and uninstall them rather than doing it the longer way in your apps drawer. Managing all other files is nice and easy too. Once in the Music tab, all files are sorted and can be viewed by song title, artist, album or even year created.
The directory is nice and supports rooted phone file managing which can be switched on in the settings page as shown below, which is always a plus considering some file managers charge extra for that feature. Oh, and did I mention this whole app is free and I’ve yet to see a single pesky ad on it? Whether that’s the case in the future, I do not know. But I like where the app is right now.
You can check out this unique and elegant file manager in the Google Play Link below!
|
#coding:utf-8
#author:Yu Junliang
import random
import numpy as np
from attack import Attack
class BandWagonAttack(Attack):
def __init__(self,conf):
super(BandWagonAttack, self).__init__(conf)
self.hotItems = sorted(self.itemProfile.iteritems(), key=lambda d: len(d[1]), reverse=True)[
:int(self.selectedSize * len(self.itemProfile))]
def insertSpam(self,startID=0):
print 'Modeling bandwagon attack...'
itemList = self.itemProfile.keys()
if startID == 0:
self.startUserID = len(self.userProfile)
else:
self.startUserID = startID
for i in range(int(len(self.userProfile)*self.attackSize)):
#fill 装填项目
fillerItems = self.getFillerItems()
for item in fillerItems:
self.spamProfile[str(self.startUserID)][str(itemList[item])] = random.randint(self.minScore,self.maxScore)
#selected 选择项目
selectedItems = self.getSelectedItems()
for item in selectedItems:
self.spamProfile[str(self.startUserID)][item] = self.targetScore
#target 目标项目
for j in range(self.targetCount):
target = np.random.randint(len(self.targetItems))
self.spamProfile[str(self.startUserID)][self.targetItems[target]] = self.targetScore
self.spamItem[str(self.startUserID)].append(self.targetItems[target])
self.startUserID += 1
def getFillerItems(self):
mu = int(self.fillerSize*len(self.itemProfile))
sigma = int(0.1*mu)
markedItemsCount = int(round(random.gauss(mu, sigma)))
if markedItemsCount < 0:
markedItemsCount = 0
markedItems = np.random.randint(len(self.itemProfile), size=markedItemsCount)
return markedItems
def getSelectedItems(self):
mu = int(self.selectedSize * len(self.itemProfile))
sigma = int(0.1 * mu)
markedItemsCount = abs(int(round(random.gauss(mu, sigma))))
markedIndexes = np.random.randint(len(self.hotItems), size=markedItemsCount)
markedItems = [self.hotItems[index][0] for index in markedIndexes]
return markedItems
|
HAVANA – Private restaurants in Havana are exploding in number and soaring in quality, providing a treat for visitors and a surprising bright spot in a nation better known for monotonous food and spotty service.
Havana now boasts nearly 2,000 private restaurants offering a range of cuisine from traditional Cuban to Russian, Spanish, Vietnamese and other ethnicities. From caviar to lobster bisque and on to pizza, everything seems to be available.
Usually set in private homes, some of the restaurants offer Old World charm with starched white tablecloths and real silverware. Heirlooms fill shelves. Other restaurants hunker in basements or peer from walk-up seafront buildings, sometimes with funky or retro décor.
“Gastronomy is on the rise in our country,” said Jorge Luis Trejo, son of the proprietors of La Moraleja, a restaurant in Havana’s Vedado district with wild rabbit flambé and chicken confit on the menu.
His family’s restaurant opened in January 2012. Donning the chef’s apron is a cook who once worked in France, the Netherlands, Greece and England, Trejo said.
“We try to make traditionally Cuban dishes with fusion sauces to entertain our clients,” he said.
At the end of each meal, waitresses carry a humidor to diners and offer them a choice of complimentary hand-rolled cigars.
Private restaurants first arose in Cuba in 1993 amid the collapse of the Soviet Union, Cuba’s longtime patron, only to be reined in as authorities worried that small eateries were relying on pilfered supplies and surpassing the legal limit of 12 chairs, essentially three tables.
The restaurants were known as paladares, a Spanish and Portuguese word that means palates, a moniker taken from the establishment of a food vendor in a popular Brazilian soap opera.
For periods in the 1990s, small restaurants could offer neither seafood nor beef, which were needed for the official tourist industry. Owners were ordered to buy at retail prices in official stores. Most employees had to be family members.
Those rules drove most restaurants out of business, choking them with a web of taxes and arbitrary enforcement that underscored how wary Cuba’s communist officials were of private enterprise.
By 2010, state media reported that as few as 74 private restaurants were operating in Havana.
Then things began to change. Fidel Castro’s brother, Raúl, who’d taken control of the government, ordered more flexible rules for restaurants at the end of 2011, raising the limit on chairs to 50 and issuing new licenses. There are still rules to be skirted, and supplies can be hard to come by, but a rebirth is taking place.
“There’s undeniably a boom, a significant increase in both the numbers of people who have licenses in the food service area and the emergence of a haute cuisine, or as they say in Cuba, cocina de autor,” or creative nouvelle cuisine, said Ted Henken, a Cuba expert at Baruch College in New York who’s written about the phenomenon.
Today, Havana is dotted with private restaurants with elaborate menus, identifiable only by single small signs on the outsides of buildings.
In Cuba’s moribund economy, bad service is the norm in most offices, hotels and state-run businesses, but not in the private restaurants, which often have the cozy feeling of private dining since they occupy what once were people’s homes.
“You feel like, ‘Oh, I’m in someone’s old living room, and sipping a mojito,’” Henken said.
It’s a feeling that more Americans may experience. On Dec. 17, President Barack Obama and Raúl Castro announced the re-establishment of diplomatic relations, broken in 1961. Obama also said he’d further relax restrictions on U.S. citizens’ travel to Cuba without lifting the long-standing trade embargo, which only Congress can do.
The easing of U.S. rules will include permitting U.S. banks to accept credit card transactions conducted in Cuba. Many Cuban restaurateurs await a growing flow of American visitors.
At Paladar Los Mercaderes, which sits on a bustling pedestrian street in renovated Old Havana, handsome waiters in crisp black uniforms buttoned to the neck take orders in a multitude of languages. Modern Cuban art adorns the walls. Musicians croon Cuban ballads as breezes waft through the high-ceilinged rooms.
Among the entrees, one could pick from smoked pork loin in plum sauce ($15.75), filet mignon in mushroom sauce ($18), shrimp risotto ($17) or a grilled seafood platter with lobster tail (variable price), among other dishes.
“We’ve got boats fishing for us, so we always have fresh fish. We’ve got a contract with a farm for fresh produce,” said Alvarez, an engineer who was once a guide at a cigar factory.
While Alvarez aims for a bit of glam, or what he labels a “unique experience,” other restaurants shoot for different diners, mostly foreign but also some Cubans with access to hard currency.
El Litoral, a trendy spot on the seaside boulevard in Vedado, is filled nightly with diplomats, artists, well-heeled tourists and a smattering of Cubans.
Opened a year ago, the restaurant offers a high-end menu that includes a soupçon of molecular cooking (foams), puff pastry entrees, a roasted seafood platter, and a kebab of shrimp and bacon in the fresh split-pea soup, among other offerings.
A different clientele comes to Nazdarovie, mainly those with connections to the former Soviet bloc but also those drawn by Soviet kitsch. The name is a toast to one’s health.
“This restaurant is inspired by the memories and nostalgia felt by the thousands of Cubans who spent many years of their youth studying in the USSR,” the menu notes.
A bust of Lenin peers out from the bar. Copies of Sputnik, a magazine, and matryoshka dolls fill shelves. In a decidedly modern touch, big red art deco lamps shine above deep black tables. A terrace looks out on the sea.
The food, far from bland, includes borscht, stroganoffs, chicken tabaca and the shashlik kebabs popular in Eastern Europe.
“The chef is Cuban but he studied at the Cordon Bleu school in Miami,” said Yansel Sergienko, a 22-year-old bartender sporting a visorless Soviet naval cap.
There still is a Wild West feel to Havana’s private dining scene.
Many restaurateurs must skirt the rules to keep their larders filled, employing “mules” who travel to Mexico, Spain and Florida to bring back supplies and more exotic ingredients. Until the Castro government gets out of the way of the growth and clarifies regulations, the Havana restaurant scene won’t truly take off, experts say.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-26 21:26
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('todo', '0008_auto_20160126_0004'),
]
operations = [
migrations.AddField(
model_name='todolist',
name='list_user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='item',
name='creation_date',
field=models.DateTimeField(default=datetime.datetime(2016, 1, 26, 14, 26, 31, 705576), verbose_name='date created'),
),
migrations.AlterField(
model_name='todolist',
name='creation_date',
field=models.DateTimeField(default=datetime.datetime(2016, 1, 26, 14, 26, 31, 704075), verbose_name='date created'),
),
]
|
FBC Manby Bowdler LLP was a founder member of LawNet in 1989 and the 29-partner firm employs over 160 staff at five offices across the West Midlands and Shropshire.
Alongside its professional rankings in the Legal 500 and Chambers, it was named firm of the year in 2014 by its regional Law Society and has regularly appeared in the Sunday Times Best Companies to Work For listings under Kim’s stewardship.
Welcoming Kim to the role, LawNet chief executive Chris Marston said: “It’s great news for the network that Kim has agreed to take on this key role.
“We’ve seen our network grow over the past year, and our proposition remains rooted in supporting firms to deliver great customer service, providing high quality learning and keeping the lid on PII premiums through a strong risk management culture.
The LawNet network was established in 1989 to enable a collaborative, mutually-owned national network where independent law firms can access big firm resources and benefit from collective purchasing, shared knowledge, best practice and expertise.
As well as quality and risk management accreditation, the network provides training in legal practice and management skills, marketing support and access to a range of office support services. It also provides a group professional indemnity insurance scheme for members.
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a utility class ``CodeRunner`` for use by handlers that execute
Python source code.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import os
import sys
import traceback
from types import ModuleType
# External imports
# Bokeh imports
from ...util.serialization import make_id
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'CodeRunner',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class CodeRunner(object):
''' Compile and run Python source code.
'''
def __init__(self, source, path, argv):
'''
Args:
source (str) : python source code
path (str) : a filename to use in any debugging or error output
argv (list[str]) : a list of string arguments to make available
as ``sys.argv`` when the code executes
'''
self._permanent_error = None
self._permanent_error_detail = None
self.reset_run_errors()
import ast
self._code = None
try:
nodes = ast.parse(source, path)
self._code = compile(nodes, filename=path, mode='exec', dont_inherit=True)
except SyntaxError as e:
import traceback
self._code = None
self._permanent_error = ("Invalid syntax in \"%s\" on line %d:\n%s" % (os.path.basename(e.filename), e.lineno, e.text))
self._permanent_error_detail = traceback.format_exc()
self._path = path
self._source = source
self._argv = argv
self.ran = False
# Properties --------------------------------------------------------------
@property
def error(self):
''' If code execution fails, may contain a related error message.
'''
return self._error if self._permanent_error is None else self._permanent_error
@property
def error_detail(self):
''' If code execution fails, may contain a traceback or other details.
'''
return self._error_detail if self._permanent_error_detail is None else self._permanent_error_detail
@property
def failed(self):
''' ``True`` if code execution failed
'''
return self._failed or self._code is None
@property
def path(self):
''' The path that new modules will be configured with.
'''
return self._path
@property
def source(self):
''' The configured source code that will be executed when ``run`` is
called.
'''
return self._source
# Public methods ----------------------------------------------------------
def new_module(self):
''' Make a fresh module to run in.
Returns:
Module
'''
self.reset_run_errors()
if self._code is None:
return None
module_name = 'bk_script_' + make_id().replace('-', '')
module = ModuleType(str(module_name)) # str needed for py2.7
module.__dict__['__file__'] = os.path.abspath(self._path)
return module
def reset_run_errors(self):
''' Clears any transient error conditions from a previous run.
Returns
None
'''
self._failed = False
self._error = None
self._error_detail = None
def run(self, module, post_check):
''' Execute the configured source code in a module and run any post
checks.
Args:
module (Module) : a module to execute the configured code in.
post_check(callable) : a function that can raise an exception
if expected post-conditions are not met after code execution.
'''
try:
# Simulate the sys.path behaviour decribed here:
#
# https://docs.python.org/2/library/sys.html#sys.path
_cwd = os.getcwd()
_sys_path = list(sys.path)
_sys_argv = list(sys.argv)
sys.path.insert(0, os.path.dirname(self._path))
sys.argv = [os.path.basename(self._path)] + self._argv
exec(self._code, module.__dict__)
post_check()
except Exception as e:
self._failed = True
self._error_detail = traceback.format_exc()
_exc_type, _exc_value, exc_traceback = sys.exc_info()
filename, line_number, func, txt = traceback.extract_tb(exc_traceback)[-1]
self._error = "%s\nFile \"%s\", line %d, in %s:\n%s" % (str(e), os.path.basename(filename), line_number, func, txt)
finally:
# undo sys.path, CWD fixups
os.chdir(_cwd)
sys.path = _sys_path
sys.argv = _sys_argv
self.ran = True
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
Okay, so the name might sound a bit dry, but regulatory affairs officers have pretty important jobs. These guys work for research organisations, medical, pharmaceutical, chemicals and other alternative medicines manufacturers to ensure that what their company produces complies with registration.
In a nutshell, regulatory affairs officers ensure manufactured products are appropriately licensed, produced and marketed.
Without this lot, the shelves of Boots, pharmacists and… um… fertiliser stores would be empty. Why? Because they are part of the process that ensures the chemical and medical products we buy and use are safe and work.
Without their expertise, products wouldn’t pass regulations set by regulatory bodies, such as the Medicines and Healthcare products Regulatory Agency (MHRA), and we wouldn’t be able to buy them.
Regulatory affairs officers are there at nearly every stage in a product’s life span: from conducting and devising product trials, to getting licenses for the product and ensuring it meets regulations, to actually writing the product labels and accompanying information leaflets.
In a sense, they are the go-between, negotiating between the company they work for and the appropriate regulatory body to ensure that their product meets all the necessary regulations.
It’s a job that requires a unique blend of scientific, legal and business knowledge, as regulatory affairs officers spend much of their time at desks scrutinising scientific and legal documents.
Those starting in assistant and junior roles might command salaries between £18,000 and £24,000, whilst regulatory affairs officers with more experience are looking at yearly earnings between £28,000 and £50,000.
After ten years, regulatory affairs officers in the most senior roles might earn a salary of anything between £40,000 and £100,000 a year.
Working hours are pretty regular, but officers might work overtime to meet product and license deadlines.
This is a graduate-level job. Most employers will be looking for candidates with degrees in the sciences, such as in pharmacy, biochemistry, chemical and physical sciences, medicinal chemistry, biotechnology and biomedical science. Some might prefer candidates who are educated to a PhD level.
Most candidates will have previous experience under their belts, such as working as a regulatory affairs assistant or in other areas, such as research or quality assurance.
Do you have the makings of a regulatory affairs officer? You’ll need a solid scientific and legal understanding, have top analytical and problem solving skills and winning written and oral communication skills. You’ll need to be super organised and happy to work to strict deadlines. People skills and the ability to network and negotiate are a must. Fluency in another language will be useful.
Although there is no formal training, with many learning ‘on-the-job’ or through in-house training, gaining a diploma and MSc in Regulatory Affairs from The Organisation for Professionals in Regulatory Affairs (TOPRA) is increasingly seen as important for career progression, but is by no means mandatory.
Training never really stops for a regulatory affairs officer, as they have to continuously keep abreast of changes in regulatory matters and new research and scientific developments.
In terms of career progression, regulatory affairs officers might look to progress to more senior managerial roles (although there might not be much scope for promotion in smaller companies with only one or two regulatory specialists) or to specialise in certain areas.
Some regulatory affairs officers go on to set up their own consultancies or work as freelancers.
|
#!/usr/bin/env python3
# --------------------- #
# -- SEVERAL IMPORTS -- #
# --------------------- #
from pathlib import Path
from pytest import fixture, raises
from orpyste.data import ReadBlock as READ
# ------------------- #
# -- MODULE TESTED -- #
# ------------------- #
from mistool import string_use
from mistool.config.pattern import PATTERNS_WORDS
# ----------------------- #
# -- GENERAL CONSTANTS -- #
# ----------------------- #
THIS_DIR = Path(__file__).parent
CLASS_MULTI_SPLIT = string_use.MultiSplit
# ----------------------- #
# -- DATAS FOR TESTING -- #
# ----------------------- #
THE_DATAS_FOR_TESTING = READ(
content = THIS_DIR / 'multisplit_iterator.txt',
mode = {
'container' : ":default:",
'verbatim' : ["text", "seps", "listiter"]
}
)
@fixture(scope="module")
def or_datas(request):
THE_DATAS_FOR_TESTING.build()
def remove_extras():
THE_DATAS_FOR_TESTING.remove_extras()
request.addfinalizer(remove_extras)
# --------------- #
# -- REPLACING -- #
# --------------- #
def test_string_use_multisplit_iterator(or_datas):
tests = THE_DATAS_FOR_TESTING.mydict("tree std nosep nonb")
for testname, infos in tests.items():
text = infos['text'][0].strip()
seps = eval(infos['seps'][0])
listiter_wanted = [
eval("({0})".format(l))
for l in infos['listiter']
]
msplit = CLASS_MULTI_SPLIT(
seps = seps,
strip = True
)
listview = msplit(text)
listiter_found = [(x.type, x.val) for x in msplit.iter()]
assert listiter_wanted == listiter_found
|
onto the page my friends flow.
The word ‘universe’ literally means ‘one song’.
offered with a grain of salt, words old and tainted.
there is truth there, impressionistic, by life it is painted.
|
#!/usr/bin/python
import os
from sys import argv
from PIL import Image
script, rootdir = argv
COLOR_WHITE = "\033[1;37m{0}\033[00m"
COLOR_BLUE = "\033[1;36m{0}\033[00m"
folder_size = 0
for (path, dirs, files) in os.walk(rootdir):
for file in files:
filename = os.path.join(path, file)
folder_size += os.path.getsize(filename)
pre_size = "%0.1f MB" % (folder_size/(1024*1024.0))
i = 0
for subdir, dirs, files in os.walk(rootdir):
for file in files:
image = os.path.join(subdir, file)
if image.endswith('.jpg'):
i = i + 1
print COLOR_WHITE.format('Compressing: \t %s' % image)
im = Image.open(image)
im.save(image, quality=30)
print '\n'
print COLOR_BLUE.format('Compresson completed.')
print COLOR_BLUE.format('Compressed %s images' % i)
folder_size = 0
for (path, dirs, files) in os.walk(rootdir):
for file in files:
filename = os.path.join(path, file)
folder_size += os.path.getsize(filename)
after_size = "%0.1f MB" % (folder_size/(1024*1024.0))
print COLOR_BLUE.format('Size of folder went from %s to %s' % (pre_size, after_size))
|
Tully De'Ath are working with Willmott Dixon and Hunter and Partners Architects on four schools based on the Sunesis 'Dewey' model. The Sunesis Dewey Model has been developed to provide an efficient footprint ideally suited to restricted sites.
The footprint is built from a number of standard components that can be put together in a variety of configurations to fit almost any site. Each component has been robustly designed to ensure comfortable internal environments are delivered no matter the existing restrictions.
As well as working on the foundations and oversite for the schools Tully De'Ath are also involved in the development of the prototype 'Dewey' school design.
Three of the four schools are located in Southampton; Bannister Infant and Nursery School, Wordsworth Infant School and Moorlands Primary School. The fourth school is The Bay Church of England Primary School in Sandown on the Isle of Wight. Tully De'Ath provided Flood Risk Assessments (FRAs) for each of these sites.
Two further projects are for London Borough of Croydon.
Lower costs with typical schools costing 10% less than more traditional procurement.
Greater cost certainty due to tried and tested design and construction.
Time savings through the advance of OJEU procurement and a far reduced design phase.
Improved quality results from tried and tested construction details and techniques.
|
# disjoint sets
# https://github.com/mission-peace/interview/blob/master/src/com/interview/graph/DisjointSet.java
class Node(object):
def __init__(self, data, parent = None, rank = 0):
self.data = data
self.parent = parent
self.rank = rank
def __str__(self):
return str(self.data)
def __repr__(self):
return self.__str__()
class DisjointSet(object):
def __init__(self):
self.map = {}
def make_set(self, data):
node = Node(data)
node.parent = node
self.map[data] = node
def union(self, data1, data2):
node1 = self.map[data1]
node2 = self.map[data2]
parent1 = self.find_set_util(node1)
parent2 = self.find_set_util(node2)
if parent1.data == parent2.data:
return
if parent1.rank >= parent2.rank:
if parent1.rank == parent2.rank:
parent1.rank = parent1.rank + 1
parent2.parent = parent1
else:
parent1.parent = parent2
def find_set(self, data):
return self.find_set_util(self.map[data])
def find_set_util(self, node):
parent = node.parent
if parent == node:
return parent
node.parent = self.find_set_util(node.parent)
return node.parent
if __name__ == '__main__':
ds = DisjointSet()
ds.make_set(1)
ds.make_set(2)
ds.make_set(3)
ds.make_set(4)
ds.make_set(5)
ds.make_set(6)
ds.make_set(7)
ds.union(1,2)
ds.union(2,3)
ds.union(4,5)
ds.union(6,7)
ds.union(5,6)
ds.union(3,7)
for i in range(1,8):
print(ds.find_set(i))
|
Windows 8.1 – Due to the abundance of changes coming with Windows 8.1, new training materials will become available soon. It’s unclear at this point when that will be, but expect to see something tangible from Microsoft once 8.1 hits GA.
Windows Server 2012 R2 – More than likely 2012 R2 will affect most IT Pros out there, so it’s important to know how the new release will affect certifications. If you’re in the middle of your progress towards MCSA 2012 (like me), then you’ll be happy to know that you’re not wasting your time. The majority of questions found in the current exams should largely remain the same. The recommendation is to keep working towards your exam, as you won’t be penalized for not taking the updated exams for 2012 R2. In the mean time you can check out the What’s New in Windows Server 2012 R2 Jump Start on July 10th and 11th 2013. Microsoft also posted updates to their FAQ about this change.
MCSM Program – Starting this month, Microsoft is dropping the training requirement that was needed to gain MCSM certifications. And by the looks of it you can take all required labs and exams remotely. The previous required training was an intense 3 week program that set you back just under $20K! The current MCSM requirements are now much cheaper and friendlier to people with tight schedules (which is just about everyone).
I do not have it yet. However, I noticed that Mark Minasi has a book out on Windows Server 2012 R2. His bio states that he has been educating on Microsoft since 1984, well, that would have been about when I began using him as a favorite author! My first cert exam was Windows 3.1 and no doubt Mark was there to help out. I WILL be getting this resource, you should look closely at it too, IMHO.
Oops, make that 1993/4, not 1984! LOL, I am losing track of decades. In 1984, it would have been MS Dos 2.0 and MS Basic (which I was doing too). I remember we played with Windows 2.0, but, the hardware wasn’t there yet. Microsoft wasn’t in Enterprise Computing then. Heck, PC barely was!
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-03-28 17:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lotes', '0009_remove_modelotermica_receita'),
]
operations = [
migrations.CreateModel(
name='Lote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lote', models.CharField(max_length=20, verbose_name='lote')),
('op', models.IntegerField(blank=True, null=True, verbose_name='OP')),
('referencia', models.CharField(max_length=5, verbose_name='Referência')),
('tamanho', models.CharField(max_length=3, verbose_name='Tamanho')),
('cor', models.CharField(max_length=6, verbose_name='Cor')),
('qtd_produzir', models.IntegerField(verbose_name='quantidade')),
('create_at', models.DateTimeField(blank=True, null=True, verbose_name='criado em')),
('update_at', models.DateTimeField(blank=True, null=True, verbose_name='alterado em')),
],
options={
'db_table': 'fo2_cd_lote',
'verbose_name': 'lote',
},
),
]
|
We provide a full selection of genuine Ford Five Hundred Air Suspensions, tested and validated by Ford Five Hundred for fit, form and function. Please filter the Air Suspension results by choosing a vehicle.
Your vehicle consists of all various important automotive parts. The Ford Five Hundred Air Suspension is essential to keeping your auto in prime form. When you buy a new Air Suspension from FordPartsPrime.com, you spend less and save more on auto care.
|
from openerp import models, fields, api, _
from openerp import netsvc
from openerp import tools
from itertools import groupby
class confirm_quotation_franco(models.TransientModel):
_name = 'confirm.sale.order.franco'
_description = 'Confirm Sale Order'
@api.multi
def confirm_sale_order_franco(self):
wf_service = netsvc.LocalService('workflow')
sale_orders = self.env['sale.order'].browse(self._context.get('active_ids', []))
filtered_sale_orders = filter(lambda order: order.state =='draft', sale_orders) # only consider quotations
sorted_sale_orders = sorted(filtered_sale_orders, key=lambda order: order.partner_id) # necessary for group_by
for partner, ordrs in groupby(sorted_sale_orders, lambda order: order.partner_id):
orders = [order for order in ordrs] # iterator only allows one iteration
orders_franco_check = filter(lambda order: order.franco_check, orders)
orders_to_be_confirmed = filter(lambda order: not order.franco_check, orders)
if not partner.franco_amount or partner.franco_amount <= 0.0:
amount_total = float("inf")
else:
amount_total = sum(map(lambda order: order.amount_untaxed, orders_franco_check))
if amount_total >= partner.franco_amount:
orders_to_be_confirmed += orders_franco_check
for order in orders_to_be_confirmed:
wf_service.trg_validate(self.env.uid, 'sale.order', order.id, 'order_confirm', self.env.cr)
return {'type': 'ir.actions.act_window_close'}
|
William S. Farish’s homebred Code of Honor earned 50 points on the Road to the Kentucky Derby following his game tally in the Fountain of Youth Stakes (G2) at Gulfstream Park on Saturday. The sophomore colt now sits in second with 54 total points following his triumph at 9-1 odds.
Racing near midpack early beneath John Velazquez, the Shug McGaughey trainee moved into third at the top of the lane and was in second nearing midstretch with momentum. The son of Noble Mission powered to the lead inside the final furlong and held a hard-charging Bourbon War at bay late to earn his initial graded stakes win, clocking 1 1/16 miles in 1:43.85 in the process.
Code of Honor debuted at Saratoga in the summer and led at every call en route to a 1 1/2-length tally going six furlongs. Returning six weeks later at Belmont Park, the chestnut colt overcame a bad stumble at the break to rally for a fine second-place finish in the Champagne Stakes (G1).
The Kentucky-bred kicked off his much-anticipated three-year-old campaign in the Mucho Macho Man Stakes at Gulfstream, but he never fired in running fourth as the 4-5 choice. He showed marked improvement on Saturday when rallying off a swift pace to earn graded glory.
Code of Honor is the first stakes winner out of the Grade-3 winning Dixie Union mare Reunited. She herself is a half-sister to a pair of stakes winners; the Summer Squall mare Wind Tunnel and Dixieland Band colt Deal Breaker.
The winner is bred to handle a route of ground and is likely to progress on the Road to the Kentucky Derby as a serious Triple Crown contender. He has earned respectable BRIS Speed figures of 93-95-91-95 to date with room for improvement as he continues to develop.
Code of Honor has shown versatility, with gate speed as well as a stout turn of foot. The colt is in the hands of a patient conditioner who knows how to point a horse towards a big race. Code of Honor appears to have all the tools to be an impactful three-year-old throughout the spring.
|
# Copyright (c) 2015-2020 Matthias Geier
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Play and Record Sound with Python.
API overview:
* Convenience functions to play and record NumPy arrays:
`play()`, `rec()`, `playrec()` and the related functions
`wait()`, `stop()`, `get_status()`, `get_stream()`
* Functions to get information about the available hardware:
`query_devices()`, `query_hostapis()`,
`check_input_settings()`, `check_output_settings()`
* Module-wide default settings: `default`
* Platform-specific settings:
`AsioSettings`, `CoreAudioSettings`, `WasapiSettings`
* PortAudio streams, using NumPy arrays:
`Stream`, `InputStream`, `OutputStream`
* PortAudio streams, using Python buffer objects (NumPy not needed):
`RawStream`, `RawInputStream`, `RawOutputStream`
* Miscellaneous functions and classes:
`sleep()`, `get_portaudio_version()`, `CallbackFlags`,
`CallbackStop`, `CallbackAbort`
Online documentation:
https://python-sounddevice.readthedocs.io/
"""
__version__ = '0.4.1'
import atexit as _atexit
import os as _os
import platform as _platform
import sys as _sys
from ctypes.util import find_library as _find_library
from _sounddevice import ffi as _ffi
try:
for _libname in (
'portaudio', # Default name on POSIX systems
'bin\\libportaudio-2.dll', # DLL from conda-forge
'lib/libportaudio.dylib', # dylib from anaconda
):
_libname = _find_library(_libname)
if _libname is not None:
break
else:
raise OSError('PortAudio library not found')
_lib = _ffi.dlopen(_libname)
except OSError:
if _platform.system() == 'Darwin':
_libname = 'libportaudio.dylib'
elif _platform.system() == 'Windows':
_libname = 'libportaudio' + _platform.architecture()[0] + '.dll'
else:
raise
import _sounddevice_data
_libname = _os.path.join(
next(iter(_sounddevice_data.__path__)), 'portaudio-binaries', _libname)
_lib = _ffi.dlopen(_libname)
_sampleformats = {
'float32': _lib.paFloat32,
'int32': _lib.paInt32,
'int24': _lib.paInt24,
'int16': _lib.paInt16,
'int8': _lib.paInt8,
'uint8': _lib.paUInt8,
}
_initialized = 0
_last_callback = None
def play(data, samplerate=None, mapping=None, blocking=False, loop=False,
**kwargs):
"""Play back a NumPy array containing audio data.
This is a convenience function for interactive use and for small
scripts. It cannot be used for multiple overlapping playbacks.
This function does the following steps internally:
* Call `stop()` to terminate any currently running invocation
of `play()`, `rec()` and `playrec()`.
* Create an `OutputStream` and a callback function for taking care
of the actual playback.
* Start the stream.
* If ``blocking=True`` was given, wait until playback is done.
If not, return immediately.
If you need more control (e.g. block-wise gapless playback, multiple
overlapping playbacks, ...), you should explicitly create an
`OutputStream` yourself.
If NumPy is not available, you can use a `RawOutputStream`.
Parameters
----------
data : array_like
Audio data to be played back. The columns of a two-dimensional
array are interpreted as channels, one-dimensional arrays are
treated as mono data.
The data types *float64*, *float32*, *int32*, *int16*, *int8*
and *uint8* can be used.
*float64* data is simply converted to *float32* before passing
it to PortAudio, because it's not supported natively.
mapping : array_like, optional
List of channel numbers (starting with 1) where the columns of
*data* shall be played back on. Must have the same length as
number of channels in *data* (except if *data* is mono, in which
case the signal is played back on all given output channels).
Each channel number may only appear once in *mapping*.
blocking : bool, optional
If ``False`` (the default), return immediately (but playback
continues in the background), if ``True``, wait until playback
is finished. A non-blocking invocation can be stopped with
`stop()` or turned into a blocking one with `wait()`.
loop : bool, optional
Play *data* in a loop.
Other Parameters
----------------
samplerate, **kwargs
All parameters of `OutputStream` -- except *channels*, *dtype*,
*callback* and *finished_callback* -- can be used.
Notes
-----
If you don't specify the correct sampling rate
(either with the *samplerate* argument or by assigning a value to
`default.samplerate`), the audio data will be played back,
but it might be too slow or too fast!
See Also
--------
rec, playrec
"""
ctx = _CallbackContext(loop=loop)
ctx.frames = ctx.check_data(data, mapping, kwargs.get('device'))
def callback(outdata, frames, time, status):
assert len(outdata) == frames
ctx.callback_enter(status, outdata)
ctx.write_outdata(outdata)
ctx.callback_exit()
ctx.start_stream(OutputStream, samplerate, ctx.output_channels,
ctx.output_dtype, callback, blocking,
prime_output_buffers_using_stream_callback=False,
**kwargs)
def rec(frames=None, samplerate=None, channels=None, dtype=None,
out=None, mapping=None, blocking=False, **kwargs):
"""Record audio data into a NumPy array.
This is a convenience function for interactive use and for small
scripts.
This function does the following steps internally:
* Call `stop()` to terminate any currently running invocation
of `play()`, `rec()` and `playrec()`.
* Create an `InputStream` and a callback function for taking care
of the actual recording.
* Start the stream.
* If ``blocking=True`` was given, wait until recording is done.
If not, return immediately.
If you need more control (e.g. block-wise gapless recording,
overlapping recordings, ...), you should explicitly create an
`InputStream` yourself.
If NumPy is not available, you can use a `RawInputStream`.
Parameters
----------
frames : int, sometimes optional
Number of frames to record. Not needed if *out* is given.
channels : int, optional
Number of channels to record. Not needed if *mapping* or *out*
is given. The default value can be changed with
`default.channels`.
dtype : str or numpy.dtype, optional
Data type of the recording. Not needed if *out* is given.
The data types *float64*, *float32*, *int32*, *int16*, *int8*
and *uint8* can be used. For ``dtype='float64'``, audio data is
recorded in *float32* format and converted afterwards, because
it's not natively supported by PortAudio. The default value can
be changed with `default.dtype`.
mapping : array_like, optional
List of channel numbers (starting with 1) to record.
If *mapping* is given, *channels* is silently ignored.
blocking : bool, optional
If ``False`` (the default), return immediately (but recording
continues in the background), if ``True``, wait until recording
is finished.
A non-blocking invocation can be stopped with `stop()` or turned
into a blocking one with `wait()`.
Returns
-------
numpy.ndarray or type(out)
The recorded data.
.. note:: By default (``blocking=False``), an array of data is
returned which is still being written to while recording!
The returned data is only valid once recording has stopped.
Use `wait()` to make sure the recording is finished.
Other Parameters
----------------
out : numpy.ndarray or subclass, optional
If *out* is specified, the recorded data is written into the
given array instead of creating a new array.
In this case, the arguments *frames*, *channels* and *dtype* are
silently ignored!
If *mapping* is given, its length must match the number of
channels in *out*.
samplerate, **kwargs
All parameters of `InputStream` -- except *callback* and
*finished_callback* -- can be used.
Notes
-----
If you don't specify a sampling rate (either with the *samplerate*
argument or by assigning a value to `default.samplerate`),
the default sampling rate of the sound device will be used
(see `query_devices()`).
See Also
--------
play, playrec
"""
ctx = _CallbackContext()
out, ctx.frames = ctx.check_out(out, frames, channels, dtype, mapping)
def callback(indata, frames, time, status):
assert len(indata) == frames
ctx.callback_enter(status, indata)
ctx.read_indata(indata)
ctx.callback_exit()
ctx.start_stream(InputStream, samplerate, ctx.input_channels,
ctx.input_dtype, callback, blocking, **kwargs)
return out
def playrec(data, samplerate=None, channels=None, dtype=None,
out=None, input_mapping=None, output_mapping=None, blocking=False,
**kwargs):
"""Simultaneous playback and recording of NumPy arrays.
This function does the following steps internally:
* Call `stop()` to terminate any currently running invocation
of `play()`, `rec()` and `playrec()`.
* Create a `Stream` and a callback function for taking care of the
actual playback and recording.
* Start the stream.
* If ``blocking=True`` was given, wait until playback/recording is
done. If not, return immediately.
If you need more control (e.g. block-wise gapless playback and
recording, realtime processing, ...),
you should explicitly create a `Stream` yourself.
If NumPy is not available, you can use a `RawStream`.
Parameters
----------
data : array_like
Audio data to be played back. See `play()`.
channels : int, sometimes optional
Number of input channels, see `rec()`.
The number of output channels is obtained from *data.shape*.
dtype : str or numpy.dtype, optional
Input data type, see `rec()`.
If *dtype* is not specified, it is taken from *data.dtype*
(i.e. `default.dtype` is ignored).
The output data type is obtained from *data.dtype* anyway.
input_mapping, output_mapping : array_like, optional
See the parameter *mapping* of `rec()` and `play()`,
respectively.
blocking : bool, optional
If ``False`` (the default), return immediately (but continue
playback/recording in the background), if ``True``, wait until
playback/recording is finished.
A non-blocking invocation can be stopped with `stop()` or turned
into a blocking one with `wait()`.
Returns
-------
numpy.ndarray or type(out)
The recorded data. See `rec()`.
Other Parameters
----------------
out : numpy.ndarray or subclass, optional
See `rec()`.
samplerate, **kwargs
All parameters of `Stream` -- except *channels*, *dtype*,
*callback* and *finished_callback* -- can be used.
Notes
-----
If you don't specify the correct sampling rate
(either with the *samplerate* argument or by assigning a value to
`default.samplerate`), the audio data will be played back,
but it might be too slow or too fast!
See Also
--------
play, rec
"""
ctx = _CallbackContext()
output_frames = ctx.check_data(data, output_mapping, kwargs.get('device'))
if dtype is None:
dtype = ctx.data.dtype # ignore module defaults
out, input_frames = ctx.check_out(out, output_frames, channels, dtype,
input_mapping)
if input_frames != output_frames:
raise ValueError('len(data) != len(out)')
ctx.frames = input_frames
def callback(indata, outdata, frames, time, status):
assert len(indata) == len(outdata) == frames
ctx.callback_enter(status, indata)
ctx.read_indata(indata)
ctx.write_outdata(outdata)
ctx.callback_exit()
ctx.start_stream(Stream, samplerate,
(ctx.input_channels, ctx.output_channels),
(ctx.input_dtype, ctx.output_dtype),
callback, blocking,
prime_output_buffers_using_stream_callback=False,
**kwargs)
return out
def wait(ignore_errors=True):
"""Wait for `play()`/`rec()`/`playrec()` to be finished.
Playback/recording can be stopped with a `KeyboardInterrupt`.
Returns
-------
CallbackFlags or None
If at least one buffer over-/underrun happened during the last
playback/recording, a `CallbackFlags` object is returned.
See Also
--------
get_status
"""
if _last_callback:
return _last_callback.wait(ignore_errors)
def stop(ignore_errors=True):
"""Stop playback/recording.
This only stops `play()`, `rec()` and `playrec()`, but has no
influence on streams created with `Stream`, `InputStream`,
`OutputStream`, `RawStream`, `RawInputStream`, `RawOutputStream`.
"""
if _last_callback:
# Calling stop() before close() is necessary for older PortAudio
# versions, see issue #87:
_last_callback.stream.stop(ignore_errors)
_last_callback.stream.close(ignore_errors)
def get_status():
"""Get info about over-/underflows in `play()`/`rec()`/`playrec()`.
Returns
-------
CallbackFlags
A `CallbackFlags` object that holds information about the last
invocation of `play()`, `rec()` or `playrec()`.
See Also
--------
wait
"""
if _last_callback:
return _last_callback.status
else:
raise RuntimeError('play()/rec()/playrec() was not called yet')
def get_stream():
"""Get a reference to the current stream.
This applies only to streams created by calls to `play()`, `rec()`
or `playrec()`.
Returns
-------
Stream
An `OutputStream`, `InputStream` or `Stream` associated with
the last invocation of `play()`, `rec()` or `playrec()`,
respectively.
"""
if _last_callback:
return _last_callback.stream
else:
raise RuntimeError('play()/rec()/playrec() was not called yet')
def query_devices(device=None, kind=None):
"""Return information about available devices.
Information and capabilities of PortAudio devices.
Devices may support input, output or both input and output.
To find the default input/output device(s), use `default.device`.
Parameters
----------
device : int or str, optional
Numeric device ID or device name substring(s).
If specified, information about only the given *device* is
returned in a single dictionary.
kind : {'input', 'output'}, optional
If *device* is not specified and *kind* is ``'input'`` or
``'output'``, a single dictionary is returned with information
about the default input or output device, respectively.
Returns
-------
dict or DeviceList
A dictionary with information about the given *device* or -- if
no arguments were specified -- a `DeviceList` containing one
dictionary for each available device.
The dictionaries have the following keys:
``'name'``
The name of the device.
``'hostapi'``
The ID of the corresponding host API. Use
`query_hostapis()` to get information about a host API.
``'max_input_channels'``, ``'max_output_channels'``
The maximum number of input/output channels supported by the
device. See `default.channels`.
``'default_low_input_latency'``, ``'default_low_output_latency'``
Default latency values for interactive performance.
This is used if `default.latency` (or the *latency* argument
of `playrec()`, `Stream` etc.) is set to ``'low'``.
``'default_high_input_latency'``, ``'default_high_output_latency'``
Default latency values for robust non-interactive
applications (e.g. playing sound files).
This is used if `default.latency` (or the *latency* argument
of `playrec()`, `Stream` etc.) is set to ``'high'``.
``'default_samplerate'``
The default sampling frequency of the device.
This is used if `default.samplerate` is not set.
Notes
-----
The list of devices can also be displayed in a terminal:
.. code-block:: sh
python3 -m sounddevice
Examples
--------
The returned `DeviceList` can be indexed and iterated over like any
sequence type (yielding the abovementioned dictionaries), but it
also has a special string representation which is shown when used in
an interactive Python session.
Each available device is listed on one line together with the
corresponding device ID, which can be assigned to `default.device`
or used as *device* argument in `play()`, `Stream` etc.
The first character of a line is ``>`` for the default input device,
``<`` for the default output device and ``*`` for the default
input/output device. After the device ID and the device name, the
corresponding host API name is displayed. In the end of each line,
the maximum number of input and output channels is shown.
On a GNU/Linux computer it might look somewhat like this:
>>> import sounddevice as sd
>>> sd.query_devices()
0 HDA Intel: ALC662 rev1 Analog (hw:0,0), ALSA (2 in, 2 out)
1 HDA Intel: ALC662 rev1 Digital (hw:0,1), ALSA (0 in, 2 out)
2 HDA Intel: HDMI 0 (hw:0,3), ALSA (0 in, 8 out)
3 sysdefault, ALSA (128 in, 128 out)
4 front, ALSA (0 in, 2 out)
5 surround40, ALSA (0 in, 2 out)
6 surround51, ALSA (0 in, 2 out)
7 surround71, ALSA (0 in, 2 out)
8 iec958, ALSA (0 in, 2 out)
9 spdif, ALSA (0 in, 2 out)
10 hdmi, ALSA (0 in, 8 out)
* 11 default, ALSA (128 in, 128 out)
12 dmix, ALSA (0 in, 2 out)
13 /dev/dsp, OSS (16 in, 16 out)
Note that ALSA provides access to some "real" and some "virtual"
devices. The latter sometimes have a ridiculously high number of
(virtual) inputs and outputs.
On macOS, you might get something similar to this:
>>> sd.query_devices()
0 Built-in Line Input, Core Audio (2 in, 0 out)
> 1 Built-in Digital Input, Core Audio (2 in, 0 out)
< 2 Built-in Output, Core Audio (0 in, 2 out)
3 Built-in Line Output, Core Audio (0 in, 2 out)
4 Built-in Digital Output, Core Audio (0 in, 2 out)
"""
if kind not in ('input', 'output', None):
raise ValueError('Invalid kind: {!r}'.format(kind))
if device is None and kind is None:
return DeviceList(query_devices(i)
for i in range(_check(_lib.Pa_GetDeviceCount())))
device = _get_device_id(device, kind, raise_on_error=True)
info = _lib.Pa_GetDeviceInfo(device)
if not info:
raise PortAudioError('Error querying device {}'.format(device))
assert info.structVersion == 2
name_bytes = _ffi.string(info.name)
try:
# We don't know beforehand if DirectSound and MME device names use
# 'utf-8' or 'mbcs' encoding. Let's try 'utf-8' first, because it more
# likely raises an exception on 'mbcs' data than vice versa, see also
# https://github.com/spatialaudio/python-sounddevice/issues/72.
# All other host APIs use 'utf-8' anyway.
name = name_bytes.decode('utf-8')
except UnicodeDecodeError:
if info.hostApi in (
_lib.Pa_HostApiTypeIdToHostApiIndex(_lib.paDirectSound),
_lib.Pa_HostApiTypeIdToHostApiIndex(_lib.paMME)):
name = name_bytes.decode('mbcs')
else:
raise
device_dict = {
'name': name,
'hostapi': info.hostApi,
'max_input_channels': info.maxInputChannels,
'max_output_channels': info.maxOutputChannels,
'default_low_input_latency': info.defaultLowInputLatency,
'default_low_output_latency': info.defaultLowOutputLatency,
'default_high_input_latency': info.defaultHighInputLatency,
'default_high_output_latency': info.defaultHighOutputLatency,
'default_samplerate': info.defaultSampleRate,
}
if kind and device_dict['max_' + kind + '_channels'] < 1:
raise ValueError(
'Not an {} device: {!r}'.format(kind, device_dict['name']))
return device_dict
def query_hostapis(index=None):
"""Return information about available host APIs.
Parameters
----------
index : int, optional
If specified, information about only the given host API *index*
is returned in a single dictionary.
Returns
-------
dict or tuple of dict
A dictionary with information about the given host API *index*
or -- if no *index* was specified -- a tuple containing one
dictionary for each available host API.
The dictionaries have the following keys:
``'name'``
The name of the host API.
``'devices'``
A list of device IDs belonging to the host API.
Use `query_devices()` to get information about a device.
``'default_input_device'``, ``'default_output_device'``
The device ID of the default input/output device of the host
API. If no default input/output device exists for the given
host API, this is -1.
.. note:: The overall default device(s) -- which can be
overwritten by assigning to `default.device` -- take(s)
precedence over `default.hostapi` and the information in
the abovementioned dictionaries.
See Also
--------
query_devices
"""
if index is None:
return tuple(query_hostapis(i)
for i in range(_check(_lib.Pa_GetHostApiCount())))
info = _lib.Pa_GetHostApiInfo(index)
if not info:
raise PortAudioError('Error querying host API {}'.format(index))
assert info.structVersion == 1
return {
'name': _ffi.string(info.name).decode(),
'devices': [_lib.Pa_HostApiDeviceIndexToDeviceIndex(index, i)
for i in range(info.deviceCount)],
'default_input_device': info.defaultInputDevice,
'default_output_device': info.defaultOutputDevice,
}
def check_input_settings(device=None, channels=None, dtype=None,
extra_settings=None, samplerate=None):
"""Check if given input device settings are supported.
All parameters are optional, `default` settings are used for any
unspecified parameters. If the settings are supported, the function
does nothing; if not, an exception is raised.
Parameters
----------
device : int or str, optional
Device ID or device name substring(s), see `default.device`.
channels : int, optional
Number of input channels, see `default.channels`.
dtype : str or numpy.dtype, optional
Data type for input samples, see `default.dtype`.
extra_settings : settings object, optional
This can be used for host-API-specific input settings.
See `default.extra_settings`.
samplerate : float, optional
Sampling frequency, see `default.samplerate`.
"""
parameters, dtype, samplesize, samplerate = _get_stream_parameters(
'input', device=device, channels=channels, dtype=dtype, latency=None,
extra_settings=extra_settings, samplerate=samplerate)
_check(_lib.Pa_IsFormatSupported(parameters, _ffi.NULL, samplerate))
def check_output_settings(device=None, channels=None, dtype=None,
extra_settings=None, samplerate=None):
"""Check if given output device settings are supported.
Same as `check_input_settings()`, just for output device
settings.
"""
parameters, dtype, samplesize, samplerate = _get_stream_parameters(
'output', device=device, channels=channels, dtype=dtype, latency=None,
extra_settings=extra_settings, samplerate=samplerate)
_check(_lib.Pa_IsFormatSupported(_ffi.NULL, parameters, samplerate))
def sleep(msec):
"""Put the caller to sleep for at least *msec* milliseconds.
The function may sleep longer than requested so don't rely on this
for accurate musical timing.
"""
_lib.Pa_Sleep(msec)
def get_portaudio_version():
"""Get version information for the PortAudio library.
Returns the release number and a textual description of the current
PortAudio build, e.g. ::
(1899, 'PortAudio V19-devel (built Feb 15 2014 23:28:00)')
"""
return _lib.Pa_GetVersion(), _ffi.string(_lib.Pa_GetVersionText()).decode()
class _StreamBase(object):
"""Direct or indirect base class for all stream classes."""
def __init__(self, kind, samplerate=None, blocksize=None, device=None,
channels=None, dtype=None, latency=None, extra_settings=None,
callback=None, finished_callback=None, clip_off=None,
dither_off=None, never_drop_input=None,
prime_output_buffers_using_stream_callback=None,
userdata=None, wrap_callback=None):
"""Base class for PortAudio streams.
This class should only be used by library authors who want to
create their own custom stream classes.
Most users should use the derived classes
`Stream`, `InputStream`, `OutputStream`,
`RawStream`, `RawInputStream` and `RawOutputStream` instead.
This class has the same properties and methods as `Stream`,
except for `read_available`/:meth:`~Stream.read` and
`write_available`/:meth:`~Stream.write`.
It can be created with the same parameters as `Stream`,
except that there are three additional parameters
and the *callback* parameter also accepts a C function pointer.
Parameters
----------
kind : {'input', 'output', 'duplex'}
The desired type of stream: for recording, playback or both.
callback : Python callable or CData function pointer, optional
If *wrap_callback* is ``None`` this can be a function pointer
provided by CFFI.
Otherwise, it has to be a Python callable.
wrap_callback : {'array', 'buffer'}, optional
If *callback* is a Python callable, this selects whether
the audio data is provided as NumPy array (like in `Stream`)
or as Python buffer object (like in `RawStream`).
userdata : CData void pointer
This is passed to the underlying C callback function
on each call and can only be accessed from a *callback*
provided as ``CData`` function pointer.
Examples
--------
A usage example of this class can be seen at
https://github.com/spatialaudio/python-rtmixer.
"""
assert kind in ('input', 'output', 'duplex')
assert wrap_callback in ('array', 'buffer', None)
if blocksize is None:
blocksize = default.blocksize
if clip_off is None:
clip_off = default.clip_off
if dither_off is None:
dither_off = default.dither_off
if never_drop_input is None:
never_drop_input = default.never_drop_input
if prime_output_buffers_using_stream_callback is None:
prime_output_buffers_using_stream_callback = \
default.prime_output_buffers_using_stream_callback
stream_flags = _lib.paNoFlag
if clip_off:
stream_flags |= _lib.paClipOff
if dither_off:
stream_flags |= _lib.paDitherOff
if never_drop_input:
stream_flags |= _lib.paNeverDropInput
if prime_output_buffers_using_stream_callback:
stream_flags |= _lib.paPrimeOutputBuffersUsingStreamCallback
if kind == 'duplex':
idevice, odevice = _split(device)
ichannels, ochannels = _split(channels)
idtype, odtype = _split(dtype)
ilatency, olatency = _split(latency)
iextra, oextra = _split(extra_settings)
iparameters, idtype, isize, isamplerate = _get_stream_parameters(
'input', idevice, ichannels, idtype, ilatency, iextra,
samplerate)
oparameters, odtype, osize, osamplerate = _get_stream_parameters(
'output', odevice, ochannels, odtype, olatency, oextra,
samplerate)
self._dtype = idtype, odtype
self._device = iparameters.device, oparameters.device
self._channels = iparameters.channelCount, oparameters.channelCount
self._samplesize = isize, osize
if isamplerate != osamplerate:
raise ValueError(
'Input and output device must have the same samplerate')
else:
samplerate = isamplerate
else:
parameters, self._dtype, self._samplesize, samplerate = \
_get_stream_parameters(kind, device, channels, dtype, latency,
extra_settings, samplerate)
self._device = parameters.device
self._channels = parameters.channelCount
if kind == 'input':
iparameters = parameters
oparameters = _ffi.NULL
elif kind == 'output':
iparameters = _ffi.NULL
oparameters = parameters
ffi_callback = _ffi.callback('PaStreamCallback', error=_lib.paAbort)
if callback is None:
callback_ptr = _ffi.NULL
elif kind == 'input' and wrap_callback == 'buffer':
@ffi_callback
def callback_ptr(iptr, optr, frames, time, status, _):
data = _buffer(iptr, frames, self._channels, self._samplesize)
return _wrap_callback(callback, data, frames, time, status)
elif kind == 'input' and wrap_callback == 'array':
@ffi_callback
def callback_ptr(iptr, optr, frames, time, status, _):
data = _array(
_buffer(iptr, frames, self._channels, self._samplesize),
self._channels, self._dtype)
return _wrap_callback(callback, data, frames, time, status)
elif kind == 'output' and wrap_callback == 'buffer':
@ffi_callback
def callback_ptr(iptr, optr, frames, time, status, _):
data = _buffer(optr, frames, self._channels, self._samplesize)
return _wrap_callback(callback, data, frames, time, status)
elif kind == 'output' and wrap_callback == 'array':
@ffi_callback
def callback_ptr(iptr, optr, frames, time, status, _):
data = _array(
_buffer(optr, frames, self._channels, self._samplesize),
self._channels, self._dtype)
return _wrap_callback(callback, data, frames, time, status)
elif kind == 'duplex' and wrap_callback == 'buffer':
@ffi_callback
def callback_ptr(iptr, optr, frames, time, status, _):
ichannels, ochannels = self._channels
isize, osize = self._samplesize
idata = _buffer(iptr, frames, ichannels, isize)
odata = _buffer(optr, frames, ochannels, osize)
return _wrap_callback(
callback, idata, odata, frames, time, status)
elif kind == 'duplex' and wrap_callback == 'array':
@ffi_callback
def callback_ptr(iptr, optr, frames, time, status, _):
ichannels, ochannels = self._channels
idtype, odtype = self._dtype
isize, osize = self._samplesize
idata = _array(_buffer(iptr, frames, ichannels, isize),
ichannels, idtype)
odata = _array(_buffer(optr, frames, ochannels, osize),
ochannels, odtype)
return _wrap_callback(
callback, idata, odata, frames, time, status)
else:
# Use cast() to allow CData from different FFI instance:
callback_ptr = _ffi.cast('PaStreamCallback*', callback)
# CFFI callback object must be kept alive during stream lifetime:
self._callback = callback_ptr
if userdata is None:
userdata = _ffi.NULL
self._ptr = _ffi.new('PaStream**')
_check(_lib.Pa_OpenStream(self._ptr, iparameters, oparameters,
samplerate, blocksize, stream_flags,
callback_ptr, userdata),
'Error opening {}'.format(self.__class__.__name__))
# dereference PaStream** --> PaStream*
self._ptr = self._ptr[0]
self._blocksize = blocksize
info = _lib.Pa_GetStreamInfo(self._ptr)
if not info:
raise PortAudioError('Could not obtain stream info')
# TODO: assert info.structVersion == 1
self._samplerate = info.sampleRate
if not oparameters:
self._latency = info.inputLatency
elif not iparameters:
self._latency = info.outputLatency
else:
self._latency = info.inputLatency, info.outputLatency
if finished_callback:
if isinstance(finished_callback, _ffi.CData):
self._finished_callback = finished_callback
else:
def finished_callback_wrapper(_):
return finished_callback()
# CFFI callback object is kept alive during stream lifetime:
self._finished_callback = _ffi.callback(
'PaStreamFinishedCallback', finished_callback_wrapper)
_check(_lib.Pa_SetStreamFinishedCallback(self._ptr,
self._finished_callback))
# Avoid confusion if something goes wrong before assigning self._ptr:
_ptr = _ffi.NULL
@property
def samplerate(self):
"""The sampling frequency in Hertz (= frames per second).
In cases where the hardware sampling frequency is inaccurate and
PortAudio is aware of it, the value of this field may be
different from the *samplerate* parameter passed to `Stream()`.
If information about the actual hardware sampling frequency is
not available, this field will have the same value as the
*samplerate* parameter passed to `Stream()`.
"""
return self._samplerate
@property
def blocksize(self):
"""Number of frames per block.
The special value 0 means that the blocksize can change between
blocks. See the *blocksize* argument of `Stream`.
"""
return self._blocksize
@property
def device(self):
"""IDs of the input/output device."""
return self._device
@property
def channels(self):
"""The number of input/output channels."""
return self._channels
@property
def dtype(self):
"""Data type of the audio samples.
See Also
--------
default.dtype, samplesize
"""
return self._dtype
@property
def samplesize(self):
"""The size in bytes of a single sample.
See Also
--------
dtype
"""
return self._samplesize
@property
def latency(self):
"""The input/output latency of the stream in seconds.
This value provides the most accurate estimate of input/output
latency available to the implementation.
It may differ significantly from the *latency* value(s) passed
to `Stream()`.
"""
return self._latency
@property
def active(self):
"""``True`` when the stream is active, ``False`` otherwise.
A stream is active after a successful call to `start()`, until
it becomes inactive either as a result of a call to `stop()` or
`abort()`, or as a result of an exception raised in the stream
callback. In the latter case, the stream is considered inactive
after the last buffer has finished playing.
See Also
--------
stopped
"""
if self.closed:
return False
return _check(_lib.Pa_IsStreamActive(self._ptr)) == 1
@property
def stopped(self):
"""``True`` when the stream is stopped, ``False`` otherwise.
A stream is considered to be stopped prior to a successful call
to `start()` and after a successful call to `stop()` or
`abort()`. If a stream callback is cancelled (by raising an
exception) the stream is *not* considered to be stopped.
See Also
--------
active
"""
if self.closed:
return True
return _check(_lib.Pa_IsStreamStopped(self._ptr)) == 1
@property
def closed(self):
"""``True`` after a call to `close()`, ``False`` otherwise."""
return self._ptr == _ffi.NULL
@property
def time(self):
"""The current stream time in seconds.
This is according to the same clock used to generate the
timestamps passed with the *time* argument to the stream
callback (see the *callback* argument of `Stream`).
The time values are monotonically increasing and have
unspecified origin.
This provides valid time values for the entire life of the
stream, from when the stream is opened until it is closed.
Starting and stopping the stream does not affect the passage of
time as provided here.
This time may be used for synchronizing other events to the
audio stream, for example synchronizing audio to MIDI.
"""
time = _lib.Pa_GetStreamTime(self._ptr)
if not time:
raise PortAudioError('Error getting stream time')
return time
@property
def cpu_load(self):
"""CPU usage information for the stream.
The "CPU Load" is a fraction of total CPU time consumed by a
callback stream's audio processing routines including, but not
limited to the client supplied stream callback. This function
does not work with blocking read/write streams.
This may be used in the stream callback function or in the
application.
It provides a floating point value, typically between 0.0 and
1.0, where 1.0 indicates that the stream callback is consuming
the maximum number of CPU cycles possible to maintain real-time
operation. A value of 0.5 would imply that PortAudio and the
stream callback was consuming roughly 50% of the available CPU
time. The value may exceed 1.0. A value of 0.0 will always be
returned for a blocking read/write stream, or if an error
occurs.
"""
return _lib.Pa_GetStreamCpuLoad(self._ptr)
def __enter__(self):
"""Start the stream in the beginning of a "with" statement."""
self.start()
return self
def __exit__(self, *args):
"""Stop and close the stream when exiting a "with" statement."""
self.stop()
self.close()
def start(self):
"""Commence audio processing.
See Also
--------
stop, abort
"""
err = _lib.Pa_StartStream(self._ptr)
if err != _lib.paStreamIsNotStopped:
_check(err, 'Error starting stream')
def stop(self, ignore_errors=True):
"""Terminate audio processing.
This waits until all pending audio buffers have been played
before it returns.
See Also
--------
start, abort
"""
err = _lib.Pa_StopStream(self._ptr)
if not ignore_errors:
_check(err, 'Error stopping stream')
def abort(self, ignore_errors=True):
"""Terminate audio processing immediately.
This does not wait for pending buffers to complete.
See Also
--------
start, stop
"""
err = _lib.Pa_AbortStream(self._ptr)
if not ignore_errors:
_check(err, 'Error aborting stream')
def close(self, ignore_errors=True):
"""Close the stream.
If the audio stream is active any pending buffers are discarded
as if `abort()` had been called.
"""
err = _lib.Pa_CloseStream(self._ptr)
self._ptr = _ffi.NULL
if not ignore_errors:
_check(err, 'Error closing stream')
class RawInputStream(_StreamBase):
"""Raw stream for recording only. See __init__() and RawStream."""
def __init__(self, samplerate=None, blocksize=None,
device=None, channels=None, dtype=None, latency=None,
extra_settings=None, callback=None, finished_callback=None,
clip_off=None, dither_off=None, never_drop_input=None,
prime_output_buffers_using_stream_callback=None):
"""PortAudio input stream (using buffer objects).
This is the same as `InputStream`, except that the *callback*
function and :meth:`~RawStream.read` work on plain Python buffer
objects instead of on NumPy arrays.
NumPy is not necessary for using this.
Parameters
----------
dtype : str
See `RawStream`.
callback : callable
User-supplied function to consume audio data in response to
requests from an active stream.
The callback must have this signature::
callback(indata: buffer, frames: int,
time: CData, status: CallbackFlags) -> None
The arguments are the same as in the *callback* parameter of
`RawStream`, except that *outdata* is missing.
See Also
--------
RawStream, Stream
"""
_StreamBase.__init__(self, kind='input', wrap_callback='buffer',
**_remove_self(locals()))
@property
def read_available(self):
"""The number of frames that can be read without waiting.
Returns a value representing the maximum number of frames that
can be read from the stream without blocking or busy waiting.
"""
return _check(_lib.Pa_GetStreamReadAvailable(self._ptr))
def read(self, frames):
"""Read samples from the stream into a buffer.
This is the same as `Stream.read()`, except that it returns
a plain Python buffer object instead of a NumPy array.
NumPy is not necessary for using this.
Parameters
----------
frames : int
The number of frames to be read. See `Stream.read()`.
Returns
-------
data : buffer
A buffer of interleaved samples. The buffer contains
samples in the format specified by the *dtype* parameter
used to open the stream, and the number of channels
specified by *channels*.
See also `samplesize`.
overflowed : bool
See `Stream.read()`.
"""
channels, _ = _split(self._channels)
samplesize, _ = _split(self._samplesize)
data = _ffi.new('signed char[]', channels * samplesize * frames)
err = _lib.Pa_ReadStream(self._ptr, data, frames)
if err == _lib.paInputOverflowed:
overflowed = True
else:
_check(err)
overflowed = False
return _ffi.buffer(data), overflowed
class RawOutputStream(_StreamBase):
"""Raw stream for playback only. See __init__() and RawStream."""
def __init__(self, samplerate=None, blocksize=None,
device=None, channels=None, dtype=None, latency=None,
extra_settings=None, callback=None, finished_callback=None,
clip_off=None, dither_off=None, never_drop_input=None,
prime_output_buffers_using_stream_callback=None):
"""PortAudio output stream (using buffer objects).
This is the same as `OutputStream`, except that the *callback*
function and :meth:`~RawStream.write` work on plain Python
buffer objects instead of on NumPy arrays.
NumPy is not necessary for using this.
Parameters
----------
dtype : str
See `RawStream`.
callback : callable
User-supplied function to generate audio data in response to
requests from an active stream.
The callback must have this signature::
callback(outdata: buffer, frames: int,
time: CData, status: CallbackFlags) -> None
The arguments are the same as in the *callback* parameter of
`RawStream`, except that *indata* is missing.
See Also
--------
RawStream, Stream
"""
_StreamBase.__init__(self, kind='output', wrap_callback='buffer',
**_remove_self(locals()))
@property
def write_available(self):
"""The number of frames that can be written without waiting.
Returns a value representing the maximum number of frames that
can be written to the stream without blocking or busy waiting.
"""
return _check(_lib.Pa_GetStreamWriteAvailable(self._ptr))
def write(self, data):
"""Write samples to the stream.
This is the same as `Stream.write()`, except that it expects
a plain Python buffer object instead of a NumPy array.
NumPy is not necessary for using this.
Parameters
----------
data : buffer or bytes or iterable of int
A buffer of interleaved samples. The buffer contains
samples in the format specified by the *dtype* argument used
to open the stream, and the number of channels specified by
*channels*. The length of the buffer is not constrained to
a specific range, however high performance applications will
want to match this parameter to the *blocksize* parameter
used when opening the stream. See also `samplesize`.
Returns
-------
underflowed : bool
See `Stream.write()`.
"""
try:
data = _ffi.from_buffer(data)
except AttributeError:
pass # from_buffer() not supported
except TypeError:
pass # input is not a buffer
_, samplesize = _split(self._samplesize)
_, channels = _split(self._channels)
samples, remainder = divmod(len(data), samplesize)
if remainder:
raise ValueError('len(data) not divisible by samplesize')
frames, remainder = divmod(samples, channels)
if remainder:
raise ValueError('Number of samples not divisible by channels')
err = _lib.Pa_WriteStream(self._ptr, data, frames)
if err == _lib.paOutputUnderflowed:
underflowed = True
else:
_check(err)
underflowed = False
return underflowed
class RawStream(RawInputStream, RawOutputStream):
"""Raw stream for playback and recording. See __init__()."""
def __init__(self, samplerate=None, blocksize=None,
device=None, channels=None, dtype=None, latency=None,
extra_settings=None, callback=None, finished_callback=None,
clip_off=None, dither_off=None, never_drop_input=None,
prime_output_buffers_using_stream_callback=None):
"""PortAudio input/output stream (using buffer objects).
This is the same as `Stream`, except that the *callback*
function and `read()`/`write()` work on plain Python buffer
objects instead of on NumPy arrays.
NumPy is not necessary for using this.
To open a "raw" input-only or output-only stream use
`RawInputStream` or `RawOutputStream`, respectively.
If you want to handle audio data as NumPy arrays instead of
buffer objects, use `Stream`, `InputStream` or `OutputStream`.
Parameters
----------
dtype : str or pair of str
The sample format of the buffers provided to the stream
callback, `read()` or `write()`.
In addition to the formats supported by `Stream`
(``'float32'``, ``'int32'``, ``'int16'``, ``'int8'``,
``'uint8'``), this also supports ``'int24'``, i.e.
packed 24 bit format.
The default value can be changed with `default.dtype`.
See also `samplesize`.
callback : callable
User-supplied function to consume, process or generate audio
data in response to requests from an active stream.
The callback must have this signature::
callback(indata: buffer, outdata: buffer, frames: int,
time: CData, status: CallbackFlags) -> None
The arguments are the same as in the *callback* parameter of
`Stream`, except that *indata* and *outdata* are plain
Python buffer objects instead of NumPy arrays.
See Also
--------
RawInputStream, RawOutputStream, Stream
"""
_StreamBase.__init__(self, kind='duplex', wrap_callback='buffer',
**_remove_self(locals()))
class InputStream(RawInputStream):
"""Stream for input only. See __init__() and Stream."""
def __init__(self, samplerate=None, blocksize=None,
device=None, channels=None, dtype=None, latency=None,
extra_settings=None, callback=None, finished_callback=None,
clip_off=None, dither_off=None, never_drop_input=None,
prime_output_buffers_using_stream_callback=None):
"""PortAudio input stream (using NumPy).
This has the same methods and attributes as `Stream`, except
:meth:`~Stream.write` and `write_available`.
Furthermore, the stream callback is expected to have a different
signature (see below).
Parameters
----------
callback : callable
User-supplied function to consume audio in response to
requests from an active stream.
The callback must have this signature::
callback(indata: numpy.ndarray, frames: int,
time: CData, status: CallbackFlags) -> None
The arguments are the same as in the *callback* parameter of
`Stream`, except that *outdata* is missing.
See Also
--------
Stream, RawInputStream
"""
_StreamBase.__init__(self, kind='input', wrap_callback='array',
**_remove_self(locals()))
def read(self, frames):
"""Read samples from the stream into a NumPy array.
The function doesn't return until all requested *frames* have
been read -- this may involve waiting for the operating system
to supply the data (except if no more than `read_available`
frames were requested).
This is the same as `RawStream.read()`, except that it
returns a NumPy array instead of a plain Python buffer object.
Parameters
----------
frames : int
The number of frames to be read. This parameter is not
constrained to a specific range, however high performance
applications will want to match this parameter to the
*blocksize* parameter used when opening the stream.
Returns
-------
data : numpy.ndarray
A two-dimensional `numpy.ndarray` with one column per
channel (i.e. with a shape of ``(frames, channels)``) and
with a data type specified by `dtype`.
overflowed : bool
``True`` if input data was discarded by PortAudio after the
previous call and before this call.
"""
dtype, _ = _split(self._dtype)
channels, _ = _split(self._channels)
data, overflowed = RawInputStream.read(self, frames)
data = _array(data, channels, dtype)
return data, overflowed
class OutputStream(RawOutputStream):
"""Stream for output only. See __init__() and Stream."""
def __init__(self, samplerate=None, blocksize=None,
device=None, channels=None, dtype=None, latency=None,
extra_settings=None, callback=None, finished_callback=None,
clip_off=None, dither_off=None, never_drop_input=None,
prime_output_buffers_using_stream_callback=None):
"""PortAudio output stream (using NumPy).
This has the same methods and attributes as `Stream`, except
:meth:`~Stream.read` and `read_available`.
Furthermore, the stream callback is expected to have a different
signature (see below).
Parameters
----------
callback : callable
User-supplied function to generate audio data in response to
requests from an active stream.
The callback must have this signature::
callback(outdata: numpy.ndarray, frames: int,
time: CData, status: CallbackFlags) -> None
The arguments are the same as in the *callback* parameter of
`Stream`, except that *indata* is missing.
See Also
--------
Stream, RawOutputStream
"""
_StreamBase.__init__(self, kind='output', wrap_callback='array',
**_remove_self(locals()))
def write(self, data):
"""Write samples to the stream.
This function doesn't return until the entire buffer has been
consumed -- this may involve waiting for the operating system to
consume the data (except if *data* contains no more than
`write_available` frames).
This is the same as `RawStream.write()`, except that it
expects a NumPy array instead of a plain Python buffer object.
Parameters
----------
data : array_like
A two-dimensional array-like object with one column per
channel (i.e. with a shape of ``(frames, channels)``) and
with a data type specified by `dtype`. A one-dimensional
array can be used for mono data. The array layout must be
C-contiguous (see :func:`numpy.ascontiguousarray`).
The length of the buffer is not constrained to a specific
range, however high performance applications will want to
match this parameter to the *blocksize* parameter used when
opening the stream.
Returns
-------
underflowed : bool
``True`` if additional output data was inserted after the
previous call and before this call.
"""
import numpy as np
data = np.asarray(data)
_, dtype = _split(self._dtype)
_, channels = _split(self._channels)
if data.ndim > 1 and data.shape[1] != channels:
raise ValueError('Number of channels must match')
if data.dtype != dtype:
raise TypeError('dtype mismatch: {!r} vs {!r}'.format(
data.dtype.name, dtype))
if not data.flags.c_contiguous:
raise TypeError('data must be C-contiguous')
return RawOutputStream.write(self, data)
class Stream(InputStream, OutputStream):
"""Stream for input and output. See __init__()."""
def __init__(self, samplerate=None, blocksize=None,
device=None, channels=None, dtype=None, latency=None,
extra_settings=None, callback=None, finished_callback=None,
clip_off=None, dither_off=None, never_drop_input=None,
prime_output_buffers_using_stream_callback=None):
"""PortAudio stream for simultaneous input and output (using NumPy).
To open an input-only or output-only stream use `InputStream` or
`OutputStream`, respectively. If you want to handle audio data
as plain buffer objects instead of NumPy arrays, use
`RawStream`, `RawInputStream` or `RawOutputStream`.
A single stream can provide multiple channels of real-time
streaming audio input and output to a client application. A
stream provides access to audio hardware represented by one or
more devices. Depending on the underlying host API, it may be
possible to open multiple streams using the same device, however
this behavior is implementation defined. Portable applications
should assume that a device may be simultaneously used by at
most one stream.
The arguments *device*, *channels*, *dtype* and *latency* can be
either single values (which will be used for both input and
output parameters) or pairs of values (where the first one is
the value for the input and the second one for the output).
All arguments are optional, the values for unspecified
parameters are taken from the `default` object.
If one of the values of a parameter pair is ``None``, the
corresponding value from `default` will be used instead.
The created stream is inactive (see `active`, `stopped`).
It can be started with `start()`.
Every stream object is also a
:ref:`context manager <python:context-managers>`, i.e. it can be
used in a :ref:`with statement <python:with>` to automatically
call `start()` in the beginning of the statement and `stop()`
and `close()` on exit.
Parameters
----------
samplerate : float, optional
The desired sampling frequency (for both input and output).
The default value can be changed with `default.samplerate`.
blocksize : int, optional
The number of frames passed to the stream callback function,
or the preferred block granularity for a blocking read/write
stream.
The special value ``blocksize=0`` (which is the default) may
be used to request that the stream callback will receive an
optimal (and possibly varying) number of frames based on
host requirements and the requested latency settings.
The default value can be changed with `default.blocksize`.
.. note:: With some host APIs, the use of non-zero
*blocksize* for a callback stream may introduce an
additional layer of buffering which could introduce
additional latency. PortAudio guarantees that the
additional latency will be kept to the theoretical
minimum however, it is strongly recommended that a
non-zero *blocksize* value only be used when your
algorithm requires a fixed number of frames per stream
callback.
device : int or str or pair thereof, optional
Device index(es) or query string(s) specifying the device(s)
to be used. The default value(s) can be changed with
`default.device`.
channels : int or pair of int, optional
The number of channels of sound to be delivered to the
stream callback or accessed by `read()` or `write()`. It
can range from 1 to the value of ``'max_input_channels'`` or
``'max_output_channels'`` in the dict returned by
`query_devices()`. By default, the maximum possible number
of channels for the selected device is used (which may not
be what you want; see `query_devices()`). The default
value(s) can be changed with `default.channels`.
dtype : str or numpy.dtype or pair thereof, optional
The sample format of the `numpy.ndarray` provided to the
stream callback, `read()` or `write()`.
It may be any of *float32*, *int32*, *int16*, *int8*,
*uint8*. See `numpy.dtype`.
The *float64* data type is not supported, this is only
supported for convenience in `play()`/`rec()`/`playrec()`.
The packed 24 bit format ``'int24'`` is only supported in
the "raw" stream classes, see `RawStream`. The default
value(s) can be changed with `default.dtype`.
latency : float or {'low', 'high'} or pair thereof, optional
The desired latency in seconds. The special values
``'low'`` and ``'high'`` (latter being the default) select
the default low and high latency, respectively (see
`query_devices()`). The default value(s) can be changed
with `default.latency`.
Where practical, implementations should configure their
latency based on this parameter, otherwise they may choose
the closest viable latency instead. Unless the suggested
latency is greater than the absolute upper limit for the
device, implementations should round the *latency* up to the
next practical value -- i.e. to provide an equal or higher
latency wherever possible. Actual latency values for an
open stream may be retrieved using the `latency` attribute.
.. note:: Specifying the desired latency as 'high' does
not guarantee a stable audio stream. For reference, by
default Audacity specifies a desired latency of 100ms and
achieves robust performance.
extra_settings : settings object or pair thereof, optional
This can be used for host-API-specific input/output
settings. See `default.extra_settings`.
callback : callable, optional
User-supplied function to consume, process or generate audio
data in response to requests from an `active` stream.
When a stream is running, PortAudio calls the stream
callback periodically. The callback function is responsible
for processing and filling input and output buffers,
respectively.
If no *callback* is given, the stream will be opened in
"blocking read/write" mode. In blocking mode, the client
can receive sample data using `read()` and write sample
data using `write()`, the number of frames that may be
read or written without blocking is returned by
`read_available` and `write_available`, respectively.
The callback must have this signature::
callback(indata: ndarray, outdata: ndarray, frames: int,
time: CData, status: CallbackFlags) -> None
The first and second argument are the input and output
buffer, respectively, as two-dimensional `numpy.ndarray`
with one column per channel (i.e. with a shape of
``(frames, channels)``) and with a data type specified by
`dtype`.
The output buffer contains uninitialized data and the
*callback* is supposed to fill it with proper audio data.
If no data is available, the buffer should be filled with
zeros (e.g. by using ``outdata.fill(0)``).
.. note:: In Python, assigning to an identifier merely
re-binds the identifier to another object, so this *will
not work* as expected::
outdata = my_data # Don't do this!
To actually assign data to the buffer itself, you can use
indexing, e.g.::
outdata[:] = my_data
... which fills the whole buffer, or::
outdata[:, 1] = my_channel_data
... which only fills one channel.
The third argument holds the number of frames to be
processed by the stream callback. This is the same as the
length of the input and output buffers.
The forth argument provides a CFFI structure with
timestamps indicating the ADC capture time of the first
sample in the input buffer (``time.inputBufferAdcTime``),
the DAC output time of the first sample in the output buffer
(``time.outputBufferDacTime``) and the time the callback was
invoked (``time.currentTime``).
These time values are expressed in seconds and are
synchronised with the time base used by `time` for the
associated stream.
The fifth argument is a `CallbackFlags` instance indicating
whether input and/or output buffers have been inserted or
will be dropped to overcome underflow or overflow
conditions.
If an exception is raised in the *callback*, it will not be
called again. If `CallbackAbort` is raised, the stream will
finish as soon as possible. If `CallbackStop` is raised,
the stream will continue until all buffers generated by the
callback have been played. This may be useful in
applications such as soundfile players where a specific
duration of output is required. If another exception is
raised, its traceback is printed to `sys.stderr`.
Exceptions are *not* propagated to the main thread, i.e. the
main Python program keeps running as if nothing had
happened.
.. note:: The *callback* must always fill the entire output
buffer, no matter if or which exceptions are raised.
If no exception is raised in the *callback*, it
automatically continues to be called until `stop()`,
`abort()` or `close()` are used to stop the stream.
The PortAudio stream callback runs at very high or real-time
priority. It is required to consistently meet its time
deadlines. Do not allocate memory, access the file system,
call library functions or call other functions from the
stream callback that may block or take an unpredictable
amount of time to complete. With the exception of
`cpu_load` it is not permissible to call PortAudio API
functions from within the stream callback.
In order for a stream to maintain glitch-free operation the
callback must consume and return audio data faster than it
is recorded and/or played. PortAudio anticipates that each
callback invocation may execute for a duration approaching
the duration of *frames* audio frames at the stream's
sampling frequency. It is reasonable to expect to be able
to utilise 70% or more of the available CPU time in the
PortAudio callback. However, due to buffer size adaption
and other factors, not all host APIs are able to guarantee
audio stability under heavy CPU load with arbitrary fixed
callback buffer sizes. When high callback CPU utilisation
is required the most robust behavior can be achieved by
using ``blocksize=0``.
finished_callback : callable, optional
User-supplied function which will be called when the stream
becomes inactive (i.e. once a call to `stop()` will not
block).
A stream will become inactive after the stream callback
raises an exception or when `stop()` or `abort()` is called.
For a stream providing audio output, if the stream callback
raises `CallbackStop`, or `stop()` is called, the stream
finished callback will not be called until all generated
sample data has been played. The callback must have this
signature::
finished_callback() -> None
clip_off : bool, optional
See `default.clip_off`.
dither_off : bool, optional
See `default.dither_off`.
never_drop_input : bool, optional
See `default.never_drop_input`.
prime_output_buffers_using_stream_callback : bool, optional
See `default.prime_output_buffers_using_stream_callback`.
"""
_StreamBase.__init__(self, kind='duplex', wrap_callback='array',
**_remove_self(locals()))
class DeviceList(tuple):
"""A list with information about all available audio devices.
This class is not meant to be instantiated by the user.
Instead, it is returned by `query_devices()`.
It contains a dictionary for each available device, holding the keys
described in `query_devices()`.
This class has a special string representation that is shown as
return value of `query_devices()` if used in an interactive
Python session. It will also be shown when using the :func:`print`
function. Furthermore, it can be obtained with :func:`repr` and
:class:`str() <str>`.
"""
__slots__ = ()
def __repr__(self):
idev = _get_device_id(default.device['input'], 'input')
odev = _get_device_id(default.device['output'], 'output')
digits = len(str(_lib.Pa_GetDeviceCount() - 1))
hostapi_names = [hostapi['name'] for hostapi in query_hostapis()]
text = '\n'.join(
u'{mark} {idx:{dig}} {name}, {ha} ({ins} in, {outs} out)'.format(
mark=(' ', '>', '<', '*')[(idx == idev) + 2 * (idx == odev)],
idx=idx,
dig=digits,
name=info['name'],
ha=hostapi_names[info['hostapi']],
ins=info['max_input_channels'],
outs=info['max_output_channels'])
for idx, info in enumerate(self))
return text
class CallbackFlags(object):
"""Flag bits for the *status* argument to a stream *callback*.
If you experience under-/overflows, you can try to increase the
``latency`` and/or ``blocksize`` settings.
You should also avoid anything that could block the callback
function for a long time, e.g. extensive computations, waiting for
another thread, reading/writing files, network connections, etc.
See Also
--------
Stream
Examples
--------
This can be used to collect the errors of multiple *status* objects:
>>> import sounddevice as sd
>>> errors = sd.CallbackFlags()
>>> errors |= status1
>>> errors |= status2
>>> errors |= status3
>>> # and so on ...
>>> errors.input_overflow
True
The values may also be set and cleared by the user:
>>> import sounddevice as sd
>>> cf = sd.CallbackFlags()
>>> cf
<sounddevice.CallbackFlags: no flags set>
>>> cf.input_underflow = True
>>> cf
<sounddevice.CallbackFlags: input underflow>
>>> cf.input_underflow = False
>>> cf
<sounddevice.CallbackFlags: no flags set>
"""
__slots__ = '_flags'
def __init__(self, flags=0x0):
self._flags = flags
def __repr__(self):
flags = str(self)
if not flags:
flags = 'no flags set'
return '<sounddevice.CallbackFlags: {}>'.format(flags)
def __str__(self):
return ', '.join(name.replace('_', ' ') for name in dir(self)
if not name.startswith('_') and getattr(self, name))
def __bool__(self):
return bool(self._flags)
def __ior__(self, other):
if not isinstance(other, CallbackFlags):
return NotImplemented
self._flags |= other._flags
return self
@property
def input_underflow(self):
"""Input underflow.
In a stream opened with ``blocksize=0``, indicates that input
data is all silence (zeros) because no real data is available.
In a stream opened with a non-zero *blocksize*, it indicates
that one or more zero samples have been inserted into the input
buffer to compensate for an input underflow.
This can only happen in full-duplex streams (including
`playrec()`).
"""
return self._hasflag(_lib.paInputUnderflow)
@input_underflow.setter
def input_underflow(self, value):
self._updateflag(_lib.paInputUnderflow, value)
@property
def input_overflow(self):
"""Input overflow.
In a stream opened with ``blocksize=0``, indicates that data
prior to the first sample of the input buffer was discarded due
to an overflow, possibly because the stream callback is using
too much CPU time. In a stream opened with a non-zero
*blocksize*, it indicates that data prior to one or more samples
in the input buffer was discarded.
This can happen in full-duplex and input-only streams (including
`playrec()` and `rec()`).
"""
return self._hasflag(_lib.paInputOverflow)
@input_overflow.setter
def input_overflow(self, value):
self._updateflag(_lib.paInputOverflow, value)
@property
def output_underflow(self):
"""Output underflow.
Indicates that output data (or a gap) was inserted, possibly
because the stream callback is using too much CPU time.
This can happen in full-duplex and output-only streams
(including `playrec()` and `play()`).
"""
return self._hasflag(_lib.paOutputUnderflow)
@output_underflow.setter
def output_underflow(self, value):
self._updateflag(_lib.paOutputUnderflow, value)
@property
def output_overflow(self):
"""Output overflow.
Indicates that output data will be discarded because no room is
available.
This can only happen in full-duplex streams (including
`playrec()`), but only when ``never_drop_input=True`` was
specified. See `default.never_drop_input`.
"""
return self._hasflag(_lib.paOutputOverflow)
@output_overflow.setter
def output_overflow(self, value):
self._updateflag(_lib.paOutputOverflow, value)
@property
def priming_output(self):
"""Priming output.
Some of all of the output data will be used to prime the stream,
input data may be zero.
This will only take place with some of the host APIs, and only
if ``prime_output_buffers_using_stream_callback=True`` was
specified.
See `default.prime_output_buffers_using_stream_callback`.
"""
return self._hasflag(_lib.paPrimingOutput)
def _hasflag(self, flag):
"""Check a given flag."""
return bool(self._flags & flag)
def _updateflag(self, flag, value):
"""Set/clear a given flag."""
if value:
self._flags |= flag
else:
self._flags &= ~flag
class _InputOutputPair(object):
"""Parameter pairs for device, channels, dtype and latency."""
_indexmapping = {'input': 0, 'output': 1}
def __init__(self, parent, default_attr):
self._pair = [None, None]
self._parent = parent
self._default_attr = default_attr
def __getitem__(self, index):
index = self._indexmapping.get(index, index)
value = self._pair[index]
if value is None:
value = getattr(self._parent, self._default_attr)[index]
return value
def __setitem__(self, index, value):
index = self._indexmapping.get(index, index)
self._pair[index] = value
def __repr__(self):
return '[{0[0]!r}, {0[1]!r}]'.format(self)
class default(object):
"""Get/set defaults for the *sounddevice* module.
The attributes `device`, `channels`, `dtype`, `latency` and
`extra_settings` accept single values which specify the given
property for both input and output. However, if the property
differs between input and output, pairs of values can be used, where
the first value specifies the input and the second value specifies
the output. All other attributes are always single values.
Examples
--------
>>> import sounddevice as sd
>>> sd.default.samplerate = 48000
>>> sd.default.dtype
['float32', 'float32']
Different values for input and output:
>>> sd.default.channels = 1, 2
A single value sets both input and output at the same time:
>>> sd.default.device = 5
>>> sd.default.device
[5, 5]
An attribute can be set to the "factory default" by assigning
``None``:
>>> sd.default.samplerate = None
>>> sd.default.device = None, 4
Use `reset()` to reset all attributes:
>>> sd.default.reset()
"""
_pairs = 'device', 'channels', 'dtype', 'latency', 'extra_settings'
# The class attributes listed in _pairs are only provided here for static
# analysis tools and for the docs. They're overwritten in __init__().
device = None, None
"""Index or query string of default input/output device.
If not overwritten, this is queried from PortAudio.
If a string is given, the device is selected which contains all
space-separated parts in the right order. Each device string
contains the name of the corresponding host API in the end.
The string comparison is case-insensitive.
See Also
--------
:func:`query_devices`
"""
channels = _default_channels = None, None
"""Number of input/output channels.
The maximum number of channels for a given device can be found out
with `query_devices()`.
"""
dtype = _default_dtype = 'float32', 'float32'
"""Data type used for input/output samples.
The types ``'float32'``, ``'int32'``, ``'int16'``, ``'int8'`` and
``'uint8'`` can be used for all streams and functions.
Additionally, `play()`, `rec()` and `playrec()` support
``'float64'`` (for convenience, data is merely converted from/to
``'float32'``) and `RawInputStream`, `RawOutputStream` and
`RawStream` support ``'int24'`` (packed 24 bit format, which is
*not* supported in NumPy!).
If NumPy is available, the corresponding `numpy.dtype` objects can
be used as well.
The floating point representations ``'float32'`` and ``'float64'``
use +1.0 and -1.0 as the maximum and minimum values, respectively.
``'uint8'`` is an unsigned 8 bit format where 128 is considered
"ground".
"""
latency = _default_latency = 'high', 'high'
"""Suggested input/output latency in seconds.
The special values ``'low'`` and ``'high'`` can be used to select
the default low/high latency of the chosen device.
``'high'`` is typically more robust (i.e. buffer under-/overflows
are less likely), but the latency may be too large for interactive
applications.
See Also
--------
:func:`query_devices`
"""
extra_settings = _default_extra_settings = None, None
"""Host-API-specific input/output settings.
See Also
--------
AsioSettings, CoreAudioSettings, WasapiSettings
"""
samplerate = None
"""Sampling frequency in Hertz (= frames per second).
See Also
--------
:func:`query_devices`
"""
blocksize = _lib.paFramesPerBufferUnspecified
"""See the *blocksize* argument of `Stream`."""
clip_off = False
"""Disable clipping.
Set to ``True`` to disable default clipping of out of range samples.
"""
dither_off = False
"""Disable dithering.
Set to ``True`` to disable default dithering.
"""
never_drop_input = False
"""Set behavior for input overflow of full-duplex streams.
Set to ``True`` to request that where possible a full duplex stream
will not discard overflowed input samples without calling the stream
callback. This flag is only valid for full-duplex callback streams
(i.e. only `Stream` and `RawStream` and only if *callback* was
specified; this includes `playrec()`) and only when used in
combination with ``blocksize=0`` (the default). Using this flag
incorrectly results in an error being raised. See also
http://www.portaudio.com/docs/proposals/001-UnderflowOverflowHandling.html.
"""
prime_output_buffers_using_stream_callback = False
"""How to fill initial output buffers.
Set to ``True`` to call the stream callback to fill initial output
buffers, rather than the default behavior of priming the buffers
with zeros (silence). This flag has no effect for input-only
(`InputStream` and `RawInputStream`) and blocking read/write streams
(i.e. if *callback* wasn't specified). See also
http://www.portaudio.com/docs/proposals/020-AllowCallbackToPrimeStream.html.
"""
def __init__(self):
for attr in self._pairs:
# __setattr__() must be avoided here
vars(self)[attr] = _InputOutputPair(self, '_default_' + attr)
def __setattr__(self, name, value):
"""Only allow setting existing attributes."""
if name in self._pairs:
getattr(self, name)._pair[:] = _split(value)
elif name in dir(self) and name != 'reset':
object.__setattr__(self, name, value)
else:
raise AttributeError(
"'default' object has no attribute " + repr(name))
@property
def _default_device(self):
return (_lib.Pa_GetDefaultInputDevice(),
_lib.Pa_GetDefaultOutputDevice())
@property
def hostapi(self):
"""Index of the default host API (read-only)."""
return _check(_lib.Pa_GetDefaultHostApi())
def reset(self):
"""Reset all attributes to their "factory default"."""
vars(self).clear()
self.__init__()
if not hasattr(_ffi, 'I_AM_FAKE'):
# This object shadows the 'default' class, except when building the docs.
default = default()
class PortAudioError(Exception):
"""This exception will be raised on PortAudio errors.
Attributes
----------
args
A variable length tuple containing the following elements when
available:
1) A string describing the error
2) The PortAudio ``PaErrorCode`` value
3) A 3-tuple containing the host API index, host error code, and the
host error message (which may be an empty string)
"""
def __str__(self):
errormsg = self.args[0] if self.args else ''
if len(self.args) > 1:
errormsg = '{} [PaErrorCode {}]'.format(errormsg, self.args[1])
if len(self.args) > 2:
host_api, hosterror_code, hosterror_text = self.args[2]
hostname = query_hostapis(host_api)['name']
errormsg = "{}: '{}' [{} error {}]".format(
errormsg, hosterror_text, hostname, hosterror_code)
return errormsg
class CallbackStop(Exception):
"""Exception to be raised by the user to stop callback processing.
If this is raised in the stream callback, the callback will not be
invoked anymore (but all pending audio buffers will be played).
See Also
--------
CallbackAbort, :meth:`Stream.stop`, Stream
"""
class CallbackAbort(Exception):
"""Exception to be raised by the user to abort callback processing.
If this is raised in the stream callback, all pending buffers are
discarded and the callback will not be invoked anymore.
See Also
--------
CallbackStop, :meth:`Stream.abort`, Stream
"""
class AsioSettings(object):
def __init__(self, channel_selectors):
"""ASIO-specific input/output settings.
Objects of this class can be used as *extra_settings* argument
to `Stream()` (and variants) or as `default.extra_settings`.
Parameters
----------
channel_selectors : list of int
Support for opening only specific channels of an ASIO
device. *channel_selectors* is a list of integers
specifying the (zero-based) channel numbers to use.
The length of *channel_selectors* must match the
corresponding *channels* parameter of `Stream()` (or
variants), otherwise a crash may result.
The values in the *channel_selectors* array must specify
channels within the range of supported channels.
Examples
--------
Setting output channels when calling `play()`:
>>> import sounddevice as sd
>>> asio_out = sd.AsioSettings(channel_selectors=[12, 13])
>>> sd.play(..., extra_settings=asio_out)
Setting default output channels:
>>> sd.default.extra_settings = asio_out
>>> sd.play(...)
Setting input channels as well:
>>> asio_in = sd.AsioSettings(channel_selectors=[8])
>>> sd.default.extra_settings = asio_in, asio_out
>>> sd.playrec(..., channels=1, ...)
"""
if isinstance(channel_selectors, int):
raise TypeError('channel_selectors must be a list or tuple')
# int array must be kept alive!
self._selectors = _ffi.new('int[]', channel_selectors)
self._streaminfo = _ffi.new('PaAsioStreamInfo*', dict(
size=_ffi.sizeof('PaAsioStreamInfo'),
hostApiType=_lib.paASIO,
version=1,
flags=_lib.paAsioUseChannelSelectors,
channelSelectors=self._selectors))
class CoreAudioSettings(object):
def __init__(self, channel_map=None, change_device_parameters=False,
fail_if_conversion_required=False, conversion_quality='max'):
"""Mac Core Audio-specific input/output settings.
Objects of this class can be used as *extra_settings* argument
to `Stream()` (and variants) or as `default.extra_settings`.
Parameters
----------
channel_map : sequence of int, optional
Support for opening only specific channels of a Core Audio
device. Note that *channel_map* is treated differently
between input and output channels.
For input devices, *channel_map* is a list of integers
specifying the (zero-based) channel numbers to use.
For output devices, *channel_map* must have the same length
as the number of output channels of the device. Specify
unused channels with -1, and a 0-based index for any desired
channels.
See the example below. For additional information, see the
`PortAudio documentation`__.
__ https://app.assembla.com/spaces/portaudio/git/source/
master/src/hostapi/coreaudio/notes.txt
change_device_parameters : bool, optional
If ``True``, allows PortAudio to change things like the
device's frame size, which allows for much lower latency,
but might disrupt the device if other programs are using it,
even when you are just querying the device. ``False`` is
the default.
fail_if_conversion_required : bool, optional
In combination with the above flag, ``True`` causes the
stream opening to fail, unless the exact sample rates are
supported by the device.
conversion_quality : {'min', 'low', 'medium', 'high', 'max'}, optional
This sets Core Audio's sample rate conversion quality.
``'max'`` is the default.
Example
-------
This example assumes a device having 6 input and 6 output
channels. Input is from the second and fourth channels, and
output is to the device's third and fifth channels:
>>> import sounddevice as sd
>>> ca_in = sd.CoreAudioSettings(channel_map=[1, 3])
>>> ca_out = sd.CoreAudioSettings(channel_map=[-1, -1, 0, -1, 1, -1])
>>> sd.playrec(..., channels=2, extra_settings=(ca_in, ca_out))
"""
conversion_dict = {
'min': _lib.paMacCoreConversionQualityMin,
'low': _lib.paMacCoreConversionQualityLow,
'medium': _lib.paMacCoreConversionQualityMedium,
'high': _lib.paMacCoreConversionQualityHigh,
'max': _lib.paMacCoreConversionQualityMax,
}
# Minimal checking on channel_map to catch errors that might
# otherwise go unnoticed:
if isinstance(channel_map, int):
raise TypeError('channel_map must be a list or tuple')
try:
self._flags = conversion_dict[conversion_quality.lower()]
except (KeyError, AttributeError) as e:
raise ValueError('conversion_quality must be one of ' +
repr(list(conversion_dict))) from e
if change_device_parameters:
self._flags |= _lib.paMacCoreChangeDeviceParameters
if fail_if_conversion_required:
self._flags |= _lib.paMacCoreFailIfConversionRequired
# this struct must be kept alive!
self._streaminfo = _ffi.new('PaMacCoreStreamInfo*')
_lib.PaMacCore_SetupStreamInfo(self._streaminfo, self._flags)
if channel_map is not None:
# this array must be kept alive!
self._channel_map = _ffi.new('SInt32[]', channel_map)
if len(self._channel_map) == 0:
raise TypeError('channel_map must not be empty')
_lib.PaMacCore_SetupChannelMap(self._streaminfo,
self._channel_map,
len(self._channel_map))
class WasapiSettings(object):
def __init__(self, exclusive=False):
"""WASAPI-specific input/output settings.
Objects of this class can be used as *extra_settings* argument
to `Stream()` (and variants) or as `default.extra_settings`.
They can also be used in `check_input_settings()` and
`check_output_settings()`.
Parameters
----------
exclusive : bool
Exclusive mode allows to deliver audio data directly to
hardware bypassing software mixing.
Examples
--------
Setting exclusive mode when calling `play()`:
>>> import sounddevice as sd
>>> wasapi_exclusive = sd.WasapiSettings(exclusive=True)
>>> sd.play(..., extra_settings=wasapi_exclusive)
Setting exclusive mode as default:
>>> sd.default.extra_settings = wasapi_exclusive
>>> sd.play(...)
"""
flags = 0x0
if exclusive:
flags |= _lib.paWinWasapiExclusive
self._streaminfo = _ffi.new('PaWasapiStreamInfo*', dict(
size=_ffi.sizeof('PaWasapiStreamInfo'),
hostApiType=_lib.paWASAPI,
version=1,
flags=flags,
))
class _CallbackContext(object):
"""Helper class for re-use in play()/rec()/playrec() callbacks."""
blocksize = None
data = None
out = None
frame = 0
input_channels = output_channels = None
input_dtype = output_dtype = None
input_mapping = output_mapping = None
silent_channels = None
def __init__(self, loop=False):
import threading
try:
import numpy
assert numpy # avoid "imported but unused" message (W0611)
except ImportError as e:
raise ImportError(
'NumPy must be installed for play()/rec()/playrec()') from e
self.loop = loop
self.event = threading.Event()
self.status = CallbackFlags()
def check_data(self, data, mapping, device):
"""Check data and output mapping."""
import numpy as np
data = np.asarray(data)
if data.ndim < 2:
data = data.reshape(-1, 1)
frames, channels = data.shape
dtype = _check_dtype(data.dtype)
mapping_is_explicit = mapping is not None
mapping, channels = _check_mapping(mapping, channels)
if data.shape[1] == 1:
pass # No problem, mono data is duplicated into arbitrary channels
elif data.shape[1] != len(mapping):
raise ValueError(
'number of output channels != size of output mapping')
# Apparently, some PortAudio host APIs duplicate mono streams to the
# first two channels, which is unexpected when specifying mapping=[1].
# In this case, we play silence on the second channel, but only if the
# device actually supports a second channel:
if (mapping_is_explicit and np.array_equal(mapping, [0]) and
query_devices(device, 'output')['max_output_channels'] >= 2):
channels = 2
silent_channels = np.setdiff1d(np.arange(channels), mapping)
if len(mapping) + len(silent_channels) != channels:
raise ValueError('each channel may only appear once in mapping')
self.data = data
self.output_channels = channels
self.output_dtype = dtype
self.output_mapping = mapping
self.silent_channels = silent_channels
return frames
def check_out(self, out, frames, channels, dtype, mapping):
"""Check out, frames, channels, dtype and input mapping."""
import numpy as np
if out is None:
if frames is None:
raise TypeError('frames must be specified')
if channels is None:
channels = default.channels['input']
if channels is None:
if mapping is None:
raise TypeError(
'Unable to determine number of input channels')
else:
channels = len(np.atleast_1d(mapping))
if dtype is None:
dtype = default.dtype['input']
out = np.empty((frames, channels), dtype, order='C')
else:
frames, channels = out.shape
dtype = out.dtype
dtype = _check_dtype(dtype)
mapping, channels = _check_mapping(mapping, channels)
if out.shape[1] != len(mapping):
raise ValueError(
'number of input channels != size of input mapping')
self.out = out
self.input_channels = channels
self.input_dtype = dtype
self.input_mapping = mapping
return out, frames
def callback_enter(self, status, data):
"""Check status and blocksize."""
self.status |= status
self.blocksize = min(self.frames - self.frame, len(data))
def read_indata(self, indata):
# We manually iterate over each channel in mapping because
# numpy.take(..., out=...) has a bug:
# https://github.com/numpy/numpy/pull/4246.
# Note: using indata[:blocksize, mapping] (a.k.a. 'fancy' indexing)
# would create unwanted copies (and probably memory allocations).
for target, source in enumerate(self.input_mapping):
# If out.dtype is 'float64', 'float32' data is "upgraded" here:
self.out[self.frame:self.frame + self.blocksize, target] = \
indata[:self.blocksize, source]
def write_outdata(self, outdata):
# 'float64' data is cast to 'float32' here:
outdata[:self.blocksize, self.output_mapping] = \
self.data[self.frame:self.frame + self.blocksize]
outdata[:self.blocksize, self.silent_channels] = 0
if self.loop and self.blocksize < len(outdata):
self.frame = 0
outdata = outdata[self.blocksize:]
self.blocksize = min(self.frames, len(outdata))
self.write_outdata(outdata)
else:
outdata[self.blocksize:] = 0
def callback_exit(self):
if not self.blocksize:
raise CallbackAbort
self.frame += self.blocksize
def finished_callback(self):
self.event.set()
# Drop temporary audio buffers to free memory
self.data = None
self.out = None
# Drop CFFI objects to avoid reference cycles
self.stream._callback = None
self.stream._finished_callback = None
def start_stream(self, StreamClass, samplerate, channels, dtype, callback,
blocking, **kwargs):
stop() # Stop previous playback/recording
self.stream = StreamClass(samplerate=samplerate,
channels=channels,
dtype=dtype,
callback=callback,
finished_callback=self.finished_callback,
**kwargs)
self.stream.start()
global _last_callback
_last_callback = self
if blocking:
self.wait()
def wait(self, ignore_errors=True):
"""Wait for finished_callback.
Can be interrupted with a KeyboardInterrupt.
"""
try:
self.event.wait()
finally:
self.stream.close(ignore_errors)
return self.status if self.status else None
def _remove_self(d):
"""Return a copy of d without the 'self' entry."""
d = d.copy()
del d['self']
return d
def _check_mapping(mapping, channels):
"""Check mapping, obtain channels."""
import numpy as np
if mapping is None:
mapping = np.arange(channels)
else:
mapping = np.array(mapping, copy=True)
mapping = np.atleast_1d(mapping)
if mapping.min() < 1:
raise ValueError('channel numbers must not be < 1')
channels = mapping.max()
mapping -= 1 # channel numbers start with 1
return mapping, channels
def _check_dtype(dtype):
"""Check dtype."""
import numpy as np
dtype = np.dtype(dtype).name
if dtype in _sampleformats:
pass
elif dtype == 'float64':
dtype = 'float32'
else:
raise TypeError('Unsupported data type: ' + repr(dtype))
return dtype
def _get_stream_parameters(kind, device, channels, dtype, latency,
extra_settings, samplerate):
"""Get parameters for one direction (input or output) of a stream."""
assert kind in ('input', 'output')
if device is None:
device = default.device[kind]
if channels is None:
channels = default.channels[kind]
if dtype is None:
dtype = default.dtype[kind]
if latency is None:
latency = default.latency[kind]
if extra_settings is None:
extra_settings = default.extra_settings[kind]
if samplerate is None:
samplerate = default.samplerate
device = _get_device_id(device, kind, raise_on_error=True)
info = query_devices(device)
if channels is None:
channels = info['max_' + kind + '_channels']
try:
# If NumPy is available, get canonical dtype name
dtype = _sys.modules['numpy'].dtype(dtype).name
except Exception:
pass # NumPy not available or invalid dtype (e.g. 'int24') or ...
try:
sampleformat = _sampleformats[dtype]
except KeyError as e:
raise ValueError('Invalid ' + kind + ' sample format') from e
samplesize = _check(_lib.Pa_GetSampleSize(sampleformat))
if latency in ('low', 'high'):
latency = info['default_' + latency + '_' + kind + '_latency']
if samplerate is None:
samplerate = info['default_samplerate']
parameters = _ffi.new('PaStreamParameters*', (
device, channels, sampleformat, latency,
extra_settings._streaminfo if extra_settings else _ffi.NULL))
return parameters, dtype, samplesize, samplerate
def _wrap_callback(callback, *args):
"""Invoke callback function and check for custom exceptions."""
args = args[:-1] + (CallbackFlags(args[-1]),)
try:
callback(*args)
except CallbackStop:
return _lib.paComplete
except CallbackAbort:
return _lib.paAbort
return _lib.paContinue
def _buffer(ptr, frames, channels, samplesize):
"""Create a buffer object from a pointer to some memory."""
return _ffi.buffer(ptr, frames * channels * samplesize)
def _array(buffer, channels, dtype):
"""Create NumPy array from a buffer object."""
import numpy as np
data = np.frombuffer(buffer, dtype=dtype)
data.shape = -1, channels
return data
def _split(value):
"""Split input/output value into two values.
This can be useful for generic code that allows using the same value
for input and output but also a pair of two separate values.
"""
if isinstance(value, (str, bytes)):
# iterable, but not meant for splitting
return value, value
try:
invalue, outvalue = value
except TypeError:
invalue = outvalue = value
except ValueError as e:
raise ValueError('Only single values and pairs are allowed') from e
return invalue, outvalue
def _check(err, msg=''):
"""Raise PortAudioError for below-zero error codes."""
if err >= 0:
return err
errormsg = _ffi.string(_lib.Pa_GetErrorText(err)).decode()
if msg:
errormsg = '{}: {}'.format(msg, errormsg)
if err == _lib.paUnanticipatedHostError:
# (gh82) We grab the host error info here rather than inside
# PortAudioError since _check should only ever be called after a
# failing API function call. This way we can avoid any potential issues
# in scenarios where multiple APIs are being used simultaneously.
info = _lib.Pa_GetLastHostErrorInfo()
host_api = _lib.Pa_HostApiTypeIdToHostApiIndex(info.hostApiType)
hosterror_text = _ffi.string(info.errorText).decode()
hosterror_info = host_api, info.errorCode, hosterror_text
raise PortAudioError(errormsg, err, hosterror_info)
raise PortAudioError(errormsg, err)
def _get_device_id(id_or_query_string, kind, raise_on_error=False):
"""Return device ID given space-separated substrings."""
assert kind in ('input', 'output', None)
if id_or_query_string is None:
id_or_query_string = default.device
idev, odev = _split(id_or_query_string)
if kind == 'input':
id_or_query_string = idev
elif kind == 'output':
id_or_query_string = odev
else:
if idev == odev:
id_or_query_string = idev
else:
raise ValueError('Input and output device are different: {!r}'
.format(id_or_query_string))
if isinstance(id_or_query_string, int):
return id_or_query_string
device_list = []
for id, info in enumerate(query_devices()):
if not kind or info['max_' + kind + '_channels'] > 0:
hostapi_info = query_hostapis(info['hostapi'])
device_list.append((id, info['name'], hostapi_info['name']))
query_string = id_or_query_string.lower()
substrings = query_string.split()
matches = []
exact_device_matches = []
for id, device_string, hostapi_string in device_list:
full_string = device_string + ', ' + hostapi_string
pos = 0
for substring in substrings:
pos = full_string.lower().find(substring, pos)
if pos < 0:
break
pos += len(substring)
else:
matches.append((id, full_string))
if device_string.lower() == query_string:
exact_device_matches.append(id)
if kind is None:
kind = 'input/output' # Just used for error messages
if not matches:
if raise_on_error:
raise ValueError(
'No ' + kind + ' device matching ' + repr(id_or_query_string))
else:
return -1
if len(matches) > 1:
if len(exact_device_matches) == 1:
return exact_device_matches[0]
if raise_on_error:
raise ValueError('Multiple ' + kind + ' devices found for ' +
repr(id_or_query_string) + ':\n' +
'\n'.join('[{}] {}'.format(id, name)
for id, name in matches))
else:
return -1
return matches[0][0]
def _initialize():
"""Initialize PortAudio.
This temporarily forwards messages from stderr to ``/dev/null``
(where supported).
In most cases, this doesn't have to be called explicitly, because it
is automatically called with the ``import sounddevice`` statement.
"""
old_stderr = None
try:
stdio = _ffi.dlopen(None)
except OSError:
pass
else:
for stderr_name in 'stderr', '__stderrp':
try:
old_stderr = getattr(stdio, stderr_name)
except _ffi.error:
continue
else:
devnull = stdio.fopen(_os.devnull.encode(), b'w')
setattr(stdio, stderr_name, devnull)
break
try:
_check(_lib.Pa_Initialize(), 'Error initializing PortAudio')
global _initialized
_initialized += 1
finally:
if old_stderr is not None:
setattr(stdio, stderr_name, old_stderr)
stdio.fclose(devnull)
def _terminate():
"""Terminate PortAudio.
In most cases, this doesn't have to be called explicitly.
"""
global _initialized
_check(_lib.Pa_Terminate(), 'Error terminating PortAudio')
_initialized -= 1
def _exit_handler():
assert _initialized >= 0
# We cleanup any open streams here since older versions of portaudio don't
# manage this (see github issue #1)
if _last_callback:
# NB: calling stop() first is required; without it portaudio hangs when
# calling close()
_last_callback.stream.stop()
_last_callback.stream.close()
while _initialized:
_terminate()
_atexit.register(_exit_handler)
_initialize()
if __name__ == '__main__':
print(query_devices())
|
This is not the first entry that I have done on Tamannaah.
I did one quite a while back, but back then she was known as Tamannaah Bhatia, or Tamanna Bhatia.
Tamannaah is an Indian actress that usually works in Telugu and Tamil films, though she has done a few Hindi as well.
She made her acting debut in the 2005 Bollywood film, Chand Sa Roshan Chehra.
Since then, Tamannaah has gone on to appear in close to 40 films.
|
# Copyright (c) 2017 Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import client as http_client
from ironic_lib import metrics_utils
from oslo_utils import uuidutils
from pecan import rest
from ironic import api
from ironic.api.controllers import link
from ironic.api.controllers.v1 import collection
from ironic.api.controllers.v1 import notification_utils as notify
from ironic.api.controllers.v1 import utils as api_utils
from ironic.api import method
from ironic.common import args
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import policy
from ironic import objects
METRICS = metrics_utils.get_metrics_logger(__name__)
_DEFAULT_RETURN_FIELDS = ['uuid', 'node_uuid', 'volume_type',
'boot_index', 'volume_id']
TARGET_SCHEMA = {
'type': 'object',
'properties': {
'boot_index': {'type': 'integer'},
'extra': {'type': ['object', 'null']},
'node_uuid': {'type': 'string'},
'properties': {'type': ['object', 'null']},
'volume_id': {'type': 'string'},
'volume_type': {'type': 'string'},
'uuid': {'type': ['string', 'null']},
},
'required': ['boot_index', 'node_uuid', 'volume_id', 'volume_type'],
'additionalProperties': False,
}
TARGET_VALIDATOR_EXTRA = args.dict_valid(
node_uuid=args.uuid,
uuid=args.uuid,
)
TARGET_VALIDATOR = args.and_valid(
args.schema(TARGET_SCHEMA),
TARGET_VALIDATOR_EXTRA
)
PATCH_ALLOWED_FIELDS = [
'boot_index',
'extra',
'node_uuid',
'properties',
'volume_id',
'volume_type'
]
def convert_with_links(rpc_target, fields=None, sanitize=True):
target = api_utils.object_to_dict(
rpc_target,
link_resource='volume/targets',
fields=(
'boot_index',
'extra',
'properties',
'volume_id',
'volume_type'
)
)
api_utils.populate_node_uuid(rpc_target, target)
if fields is not None:
api_utils.check_for_invalid_fields(fields, target)
if not sanitize:
return target
api_utils.sanitize_dict(target, fields)
return target
def list_convert_with_links(rpc_targets, limit, url=None, fields=None,
detail=None, **kwargs):
if detail:
kwargs['detail'] = detail
return collection.list_convert_with_links(
items=[convert_with_links(p, fields=fields, sanitize=False)
for p in rpc_targets],
item_name='targets',
limit=limit,
url=url,
fields=fields,
sanitize_func=api_utils.sanitize_dict,
**kwargs
)
class VolumeTargetsController(rest.RestController):
"""REST controller for VolumeTargets."""
invalid_sort_key_list = ['extra', 'properties']
def __init__(self, node_ident=None):
super(VolumeTargetsController, self).__init__()
self.parent_node_ident = node_ident
def _redact_target_properties(self, target):
# Filters what could contain sensitive information. For iSCSI
# volumes this can include iscsi connection details which may
# be sensitive.
redacted = ('** Value redacted: Requires permission '
'baremetal:volume:view_target_properties '
'access. Permission denied. **')
redacted_message = {
'redacted_contents': redacted
}
target.properties = redacted_message
def _get_volume_targets_collection(self, node_ident, marker, limit,
sort_key, sort_dir, resource_url=None,
fields=None, detail=None,
project=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.VolumeTarget.get_by_uuid(
api.request.context, marker)
if sort_key in self.invalid_sort_key_list:
raise exception.InvalidParameterValue(
_("The sort_key value %(key)s is an invalid field for "
"sorting") % {'key': sort_key})
node_ident = self.parent_node_ident or node_ident
if node_ident:
# FIXME(comstud): Since all we need is the node ID, we can
# make this more efficient by only querying
# for that column. This will get cleaned up
# as we move to the object interface.
node = api_utils.get_rpc_node(node_ident)
targets = objects.VolumeTarget.list_by_node_id(
api.request.context, node.id, limit, marker_obj,
sort_key=sort_key, sort_dir=sort_dir, project=project)
else:
targets = objects.VolumeTarget.list(api.request.context,
limit, marker_obj,
sort_key=sort_key,
sort_dir=sort_dir,
project=project)
cdict = api.request.context.to_policy_values()
if not policy.check_policy('baremetal:volume:view_target_properties',
cdict, cdict):
for target in targets:
self._redact_target_properties(target)
return list_convert_with_links(targets, limit,
url=resource_url,
fields=fields,
sort_key=sort_key,
sort_dir=sort_dir,
detail=detail)
@METRICS.timer('VolumeTargetsController.get_all')
@method.expose()
@args.validate(node=args.uuid_or_name, marker=args.uuid,
limit=args.integer, sort_key=args.string,
sort_dir=args.string, fields=args.string_list,
detail=args.boolean)
def get_all(self, node=None, marker=None, limit=None, sort_key='id',
sort_dir='asc', fields=None, detail=None, project=None):
"""Retrieve a list of volume targets.
:param node: UUID or name of a node, to get only volume targets
for that node.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
This value cannot be larger than the value of max_limit
in the [api] section of the ironic configuration, or only
max_limit resources will be returned.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: "asc".
:param fields: Optional, a list with a specified set of fields
of the resource to be returned.
:param detail: Optional, whether to retrieve with detail.
:param project: Optional, an associated node project (owner,
or lessee) to filter the query upon.
:returns: a list of volume targets, or an empty list if no volume
target is found.
:raises: InvalidParameterValue if sort_key does not exist
:raises: InvalidParameterValue if sort key is invalid for sorting.
:raises: InvalidParameterValue if both fields and detail are specified.
"""
project = api_utils.check_volume_list_policy(
parent_node=self.parent_node_ident)
if fields is None and not detail:
fields = _DEFAULT_RETURN_FIELDS
if fields and detail:
raise exception.InvalidParameterValue(
_("Can't fetch a subset of fields with 'detail' set"))
resource_url = 'volume/targets'
return self._get_volume_targets_collection(node, marker, limit,
sort_key, sort_dir,
resource_url=resource_url,
fields=fields,
detail=detail,
project=project)
@METRICS.timer('VolumeTargetsController.get_one')
@method.expose()
@args.validate(target_uuid=args.uuid, fields=args.string_list)
def get_one(self, target_uuid, fields=None):
"""Retrieve information about the given volume target.
:param target_uuid: UUID of a volume target.
:param fields: Optional, a list with a specified set of fields
of the resource to be returned.
:returns: API-serializable volume target object.
:raises: OperationNotPermitted if accessed with specifying a parent
node.
:raises: VolumeTargetNotFound if no volume target with this UUID exists
"""
rpc_target, _ = api_utils.check_volume_policy_and_retrieve(
'baremetal:volume:get',
target_uuid,
target=True)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
cdict = api.request.context.to_policy_values()
if not policy.check_policy('baremetal:volume:view_target_properties',
cdict, cdict):
self._redact_target_properties(rpc_target)
return convert_with_links(rpc_target, fields=fields)
@METRICS.timer('VolumeTargetsController.post')
@method.expose(status_code=http_client.CREATED)
@method.body('target')
@args.validate(target=TARGET_VALIDATOR)
def post(self, target):
"""Create a new volume target.
:param target: a volume target within the request body.
:returns: API-serializable volume target object.
:raises: OperationNotPermitted if accessed with specifying a parent
node.
:raises: VolumeTargetBootIndexAlreadyExists if a volume target already
exists with the same node ID and boot index
:raises: VolumeTargetAlreadyExists if a volume target with the same
UUID exists
"""
context = api.request.context
raise_node_not_found = False
node = None
owner = None
lessee = None
node_uuid = target.get('node_uuid')
try:
node = api_utils.replace_node_uuid_with_id(target)
owner = node.owner
lessee = node.lessee
except exception.NotFound:
raise_node_not_found = True
api_utils.check_owner_policy('node', 'baremetal:volume:create',
owner, lessee=lessee,
conceal_node=False)
if raise_node_not_found:
raise exception.InvalidInput(fieldname='node_uuid',
value=node_uuid)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
# NOTE(hshiina): UUID is mandatory for notification payload
if not target.get('uuid'):
target['uuid'] = uuidutils.generate_uuid()
new_target = objects.VolumeTarget(context, **target)
notify.emit_start_notification(context, new_target, 'create',
node_uuid=node.uuid)
with notify.handle_error_notification(context, new_target, 'create',
node_uuid=node.uuid):
new_target.create()
notify.emit_end_notification(context, new_target, 'create',
node_uuid=node.uuid)
# Set the HTTP Location Header
api.response.location = link.build_url('volume/targets',
new_target.uuid)
return convert_with_links(new_target)
@METRICS.timer('VolumeTargetsController.patch')
@method.expose()
@method.body('patch')
@args.validate(target_uuid=args.uuid, patch=args.patch)
def patch(self, target_uuid, patch):
"""Update an existing volume target.
:param target_uuid: UUID of a volume target.
:param patch: a json PATCH document to apply to this volume target.
:returns: API-serializable volume target object.
:raises: OperationNotPermitted if accessed with specifying a
parent node.
:raises: PatchError if a given patch can not be applied.
:raises: InvalidParameterValue if the volume target's UUID is being
changed
:raises: NodeLocked if the node is already locked
:raises: NodeNotFound if the node associated with the volume target
does not exist
:raises: VolumeTargetNotFound if the volume target cannot be found
:raises: VolumeTargetBootIndexAlreadyExists if a volume target already
exists with the same node ID and boot index values
:raises: InvalidUUID if invalid node UUID is passed in the patch.
:raises: InvalidStateRequested If a node associated with the
volume target is not powered off.
"""
context = api.request.context
api_utils.check_volume_policy_and_retrieve('baremetal:volume:update',
target_uuid,
target=True)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
api_utils.patch_validate_allowed_fields(patch, PATCH_ALLOWED_FIELDS)
values = api_utils.get_patch_values(patch, '/node_uuid')
for value in values:
if not uuidutils.is_uuid_like(value):
message = _("Expected a UUID for node_uuid, but received "
"%(uuid)s.") % {'uuid': str(value)}
raise exception.InvalidUUID(message=message)
rpc_target = objects.VolumeTarget.get_by_uuid(context, target_uuid)
target_dict = rpc_target.as_dict()
# NOTE(smoriya):
# 1) Remove node_id because it's an internal value and
# not present in the API object
# 2) Add node_uuid
rpc_node = api_utils.replace_node_id_with_uuid(target_dict)
target_dict = api_utils.apply_jsonpatch(target_dict, patch)
try:
if target_dict['node_uuid'] != rpc_node.uuid:
# TODO(TheJulia): I guess the intention is to
# permit the mapping to be changed
# should we even allow this at all?
rpc_node = objects.Node.get(
api.request.context, target_dict['node_uuid'])
except exception.NodeNotFound as e:
# Change error code because 404 (NotFound) is inappropriate
# response for a PATCH request to change a volume target
e.code = http_client.BAD_REQUEST # BadRequest
raise
api_utils.patched_validate_with_schema(
target_dict, TARGET_SCHEMA, TARGET_VALIDATOR)
api_utils.patch_update_changed_fields(
target_dict, rpc_target, fields=objects.VolumeTarget.fields,
schema=TARGET_SCHEMA, id_map={'node_id': rpc_node.id}
)
notify.emit_start_notification(context, rpc_target, 'update',
node_uuid=rpc_node.uuid)
with notify.handle_error_notification(context, rpc_target, 'update',
node_uuid=rpc_node.uuid):
topic = api.request.rpcapi.get_topic_for(rpc_node)
new_target = api.request.rpcapi.update_volume_target(
context, rpc_target, topic)
api_target = convert_with_links(new_target)
notify.emit_end_notification(context, new_target, 'update',
node_uuid=rpc_node.uuid)
return api_target
@METRICS.timer('VolumeTargetsController.delete')
@method.expose(status_code=http_client.NO_CONTENT)
@args.validate(target_uuid=args.uuid)
def delete(self, target_uuid):
"""Delete a volume target.
:param target_uuid: UUID of a volume target.
:raises: OperationNotPermitted if accessed with specifying a
parent node.
:raises: NodeLocked if node is locked by another conductor
:raises: NodeNotFound if the node associated with the target does
not exist
:raises: VolumeTargetNotFound if the volume target cannot be found
:raises: InvalidStateRequested If a node associated with the
volume target is not powered off.
"""
context = api.request.context
api_utils.check_volume_policy_and_retrieve('baremetal:volume:delete',
target_uuid,
target=True)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
rpc_target = objects.VolumeTarget.get_by_uuid(context, target_uuid)
rpc_node = objects.Node.get_by_id(context, rpc_target.node_id)
notify.emit_start_notification(context, rpc_target, 'delete',
node_uuid=rpc_node.uuid)
with notify.handle_error_notification(context, rpc_target, 'delete',
node_uuid=rpc_node.uuid):
topic = api.request.rpcapi.get_topic_for(rpc_node)
api.request.rpcapi.destroy_volume_target(context,
rpc_target, topic)
notify.emit_end_notification(context, rpc_target, 'delete',
node_uuid=rpc_node.uuid)
|
(1381) In 5 Richard II is a law absolutely forbidding the sale of 아찔한달리기 sweet wines at retail. This law, with the testimony of Shakespeare, goes to show that England liked their wines dry (sack), but the act is repealed the following year, only that sweet wines must be sold at the same price as the wines of the Rhine and Gascony; and in the same year, more intelligent than we, is a statute permitting merchants to ship goods in foreign ships when no English ships are to be had. In 1383, according to Spence, the barons protested that they would never suffer the kingdom to be governed by the Roman law, and the judges prohibited it from being any longer cited in the common-law tribunals. The rest of the statutes of Richard II are taken up with the important statutes concerning riots and forcible entries, and regulating labor, as set forth in the last chapter.
The troublesome reign of Richard II closes with an interesting attempt to make its legislation permanent, as has sometimes been attempted in our State constitutions. The last section of the last law of King Richard declares "That the King by the Assent of the said Lords and Knights [note it does not say by consent of the Commons], so assigned by the said Authority of Parliament, will and hath ordained that … to repeal or to attempt the repeal of any of the said Statutes is declared to be high treason," and the man so doing shall have execution as a traitor. Notwithstanding, in the following year the first act of Henry IV repeals the whole Parliament of the 21st of Richard II and all their statutes; that it be "wholly reversed, revoked, voided, undone, repealed, and adnulled for ever"—so we with the States in rebellion, and so Charles II with the acts of Cromwell.
(1400) Under Henry IV is the first secular law against heresy, making it a capital offence. Upon conviction by the ordinary the heretic is to be delivered to the secular arm, i.e., burnt. Note that the trial, however, still remains with the ordinary, i.e., the clerical court. Under Henry IV also we find a statute banishing all Welshmen and forbidding them to buy land or become freemen in England; and under Henry VI the same law is applied to Irishmen, and in the next reign to Scotchmen as well. The Irishmen complained of, however, were only those attending the University of Oxford. In 1402 we find Parliament asserting its right to ratify treaties and to be consulted on wars; matters not without interest to President Roosevelt's Congress, and in 1407 we find definite recognition of the principle that money bills must originate in the lower house.
For the purpose of his Chicago speech, it is a pity that Mr. Bryan's attention was never called to the Statute of the 8th of Henry VI, which forbids merchants from compelling payment in gold and from refusing silver, "which Gold they do carry out of the Realm into other strange Countries." An enlightened civic spirit is shown in the Statute of 1433, which prohibits any person dwelling at the Stews in Southwark from serving on juries in Surrey, whereby "many Murderers and notorious Thieves have been saved, great Murders and Robberies concealed and not punished." And the statute sweepingly declares everybody inhabiting that part of Southwark to be thieves, common women, and other misdoers. Fortunately, this was before the time that John Harvard took up his residence there.
In 1430 was the first statute imposing a property qualification upon voters.
In 1452 is a curious statute reciting that "Whereas in all Parts of this Realm divers People of great Power, moved with unsatiable Covetousness … have sought and found new Inventions, and them continually do execute, to the Danger, Trouble and great abusing of all Ladies, Gentlewomen, and having any Substance … perceiving their great Weakness and Simplicity, will take them by Force, or otherwise come to them seeming to be their great Friends … and so by great Dissimulation … get them into their Possession; also they will many Times compell them to be married by them, contrary to their own liking." A writ of chancery is given to persons so constrained of their liberty to summon the person complained of, and if he make default be outlawed—an early example of "government by injunction" applied to other than labor disputes! I know no example of an American statute to this effect; presumably our women are lacking in "weakness and simplicity."
In 1463 is another curious sumptuary law prescribing with great care the apparel of knights, bachelors, gentlemen and their wives, making it criminal for tailors to make cloths not according to this fashion, and for shoemakers to make boots or shoes having pikes more than two inches long. No draper shall sell or women wear hose to the value of more than fourteen pence, nor kerchiefs worth more than ten shillings, but scholars of the universities "may wear such Array as they may," nor does the ordinance extend to judges or soldiers. The provision against long pikes to shoes appears to be considered of importance, for it was re-enacted in 1464. I have searched in vain for a statute relating to hatpins. Again in 1482 there is another long statute concerning apparel which seems to have been considered under the reign of Edward IV quite the most important thing in life. A more manly clause of the statute is concerned with the benefits of archery to England, reciting that "In the Time of the victorious Reign … the King's Subjects have virtuously occupied and used shooting with their Bows, whereby and under the Protection of Almighty God, victorious acts have been done in Defence of this Realm," and the price of long bows of yew is limited to three and four pence. The statutes now begin to be in English.
In 1488 the Isle of Wight is to be repeopled with English people for "defence of the King's auncien ennemyes of the realme of Fraunce."
In 1491 all Scots are to depart the realm within forty days upon pain of forfeiture of all their goods; it is not recorded that any remained in England. In 1491 Henry VII levied an amazingly heavy tax upon personal property, that is to say, two fifteenths and tenths upon all "movable goodes cattales and othre thinges usuelly to suche xvmes and xmes contributory," with the exception of Cambridge and a few other favored towns. In 1495 the famous Oklahoma statute is anticipated by a law regulating abuses in the stuffing of feather beds.
In 1503 a statute recites that the "Longe Bowes hathe ben moche used in this his Realme, wherby Honour & Victorie hathe ben goten … and moche more drede amonge all Cristen Princes by reasone of the same, whiche shotyng is now greatly dekayed." So this mediaeval Kipling laments that they now delight in cross-bows to the great hurt and enfeebling of the Realm and to the comfort of outward enemies, wherefore cross-bows are forbidden except to the lords, on penalty of forfeiture of the bow.
(1509) The reign of Henry VIII was one of personal government; and in those days personal government resulted in a small output of law-making by Parliament. Indeed, after 1523, under Cardinal Wolsey, Parliament was not summoned for seven years. In 1539 the attempt to do without popular legislation is shown in the act already referred to, giving royal proclamations of the king and council the force of law, a definite attempt at personal government which might have resulted in the establishment of an administrative law fashioned by the executive, had it not been for the sturdy opposition of the people under weaker reigns. But under the reign of Henry VIII also the great right of free speech in Parliament was established; and in 1514 the king manumitted two villeins with the significant words "Whereas God created all men free," vulgarly supposed to be original with our Declaration of Independence.
The important principle of a limitation for prosecutions by the government for penal offences dates from the first year of Henry VIII, the period being put, as it still is, at three years; and it is expressed to be for better peace and justice and to avoid the taking up of old charges after the evidence has disappeared.
In 1515 is another act of apparel providing, among other things, that the king only shall wear cloth-of-gold or purple color, or black fur, and that no man under the degree of a knight may wear "pinched Shirts." In this reign also comes the famous Statute of Wills, permitting the disposal of land by devise, the Statute of Uses and other matters primarily of interest to the lawyer; the first Bankruptcy Act and the first legislation recognizing the duty of the secular law to support the poor, perfected only under Queen Elizabeth; but in the latter part of his reign there is little law-making that need concern us. The Statutes of Apparel continue, and the statutes fixing the price of wine, which, indeed, seems to have been the last subject so regulated. There is the "Bloody Statute" against heresy, and the first act against witchcraft, Tindale's translation of the Bible is prohibited, and women and laborers forbidden to read the New Testament. There is the first act for the preservation of the river Thames, and also for the cleaning of the river at Canterbury; and the first game law protecting wild-fowl, and a law "for the breeding of horses" to be over fifteen hands. The king is allowed to make bishops and dissolve monasteries; physicians are required to be licensed. The regrating of wools and fish is again forbidden, and finally there is an act for the true making of Pynnes; that is to say, they are to be double headed and the heads "soudered fast to the Shanke."
We are now approaching the end of our task, for the legislation after James I, with the exception of a few great acts, such as the Statute of Frauds and the Habeas Corpus Act, hardly concerns us as not being part of our inherited common law. The reigns of Elizabeth and James are to us principally notable for the increase of the feeling against monopolies, ending in the great Statute of James I. While we still find restrictions upon trade in market towns or in the city of London, they always appear as local restrictions and are usually soon repealed. The prejudice against regrating, that is to say, middlemen, continues, as is shown in a Statute of Edward VI, providing that no one shall buy butter or cheese unless to sell the same only by retail in open shop. That is to say, there must be no middleman between the producer and the retailer, and a definition of the word "retail" is given. In 1552, the 7th of Edward VI is a celebrated statute called the Assize of Fuel, applied to the city of London, notable because it forbids middlemen and provides that no one shall buy wood or coal except such as will burn or consume the same, "Forasmuche as by the gredye appetite and coveteousnes of divers persons, Fuell Coles and Woodd runethe many times throughe foure or fyve severall handes or moe before it comethe to thandes of them that for their necessite doo burne … the same"—under penalty of treble value.
In 1551 is the last elaborate act against regrators, forestallers, and engrossers, made perpetual by 13 Elizabeth, and only repealed in 1772. It recognizes all previous laws against them, but recites that they have not had good effect, and therefore in the first section gives a precise definition. Forestalling—the buying of victuals or other merchandise on their way to a market or port, or contracting to buy the same before they arrive at such market or city, or making any motion for the enhancing of the price thereof, or to prevent the supply, that is, to induce any person coming to the market, etc., to stay away. Regrating is narrowed to victuals, alive or dead, and to the reselling them at the fair or market where they were bought or within four miles thereof; and engrossing is given a definition very similar to our "buying of futures." That is to say, it is the buying or contracting to buy any corn growing in the fields or any other victuals within the Realm of England with intent to sell the same again. The penalty for all such offences is two months' imprisonment and forfeiture of the value of the goods, but for a third offence the person suffers forfeiture and may be imprisoned. There is an important recognition of modern political economy made in the proviso that persons may engross corn, etc., when it sells at or below a certain price, not, however, forestalling it.
In 1554 is a statute for the relief of weavers, prohibiting "the engrossing of looms," thus anticipating one of the principal doctrines of Lassalle. In the same year, 1st of Philip and Mary, is a statute prohibiting countrymen from retailing goods in cities, boroughs, or market towns, but selling by wholesale is allowed, and they may sell if free of a corporation; and so cloth may be retailed by the maker, and the statute only applies to cloth and grocery wares, not apparently to food.
|
#!/usr/bin/env python
# Module: py_utc_timestamp.py
# Purpose: Python UTC timestamp
# Date: N/A
# Notes:
# 1) ...
# Ref:
# http://stackoverflow.com/questions/8777753/converting-datetime-date-to-utc-timestamp-in-python
#
"""py_utc_timestamp.py: Python UTC timestamp test"""
from __future__ import division
from datetime import datetime, timedelta
import subprocess
def totimestamp(dt, epoch=datetime(1970,1,1)):
td = dt - epoch
# return td.total_seconds()
return (td.microseconds + (td.seconds + td.days * 86400) * 10**6) / 10**6
def py_utc_timestamp():
"""py_utc_timestamp.py: run the test"""
print __doc__
now = datetime.utcnow()
print now
print totimestamp(now)
ref_dt = datetime(2016, 7, 13, 1, 2, 3, 0)
subprocess.call("date '+%s' -d '2016/7/13 1:2:3 UTC'", shell=True)
ref_dt_t = 1468371723
ref_dt_res = totimestamp(ref_dt)
delta = ref_dt_t - ref_dt_res
print "ref = {0} t={1} result={2}".format(ref_dt, ref_dt_t, ref_dt_res)
print "delta={0}".format(delta)
if __name__ == "__main__":
py_utc_timestamp()
|
Building on the popularity of our award-winning Morrison 2 tent, the Morrison 2 EVO includes a brow pole that adds internal volume and headspace for a roomier night’s sleep in your favorite backcountry destination.
This 2-person tent features 36 sq ft of interior usable space, accommodating 2 adults in comfort. This two-door, two-vestibule design is easy to set up and offers best-in-class waterproof coatings on the floor and fly. The included footprint not only offers an added layer of protection between the floor and the ground, but it can also be set up with the tent fly for the added versatility of a fast fly, lightweight set-up.
|
# -*- coding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<http://www.micronaet.it>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class LognaetMovement(orm.Model):
''' Default object for manage movement modifications
'''
_name = 'lognaet.movement'
_description = 'Log movement'
_columns = {
'name': fields.char('Reference', size=50),
'cause': fields.selection([
('1', 'Delete'),
('2', 'Variation (Q)'),
('3', 'Variation (Price)'),
('4', 'Variation (Discount)'),
('5', 'Add'),
], 'Cause'),
'hostname': fields.char('Hostname', size=30),
'username': fields.char('Username', size=30),
'document': fields.char('Type of doc.', size=15), # TODO selection!!
#'series': fields.char('Series', size=2),
#'number': fields.char('Number', size=15),
'article': fields.char('Article', size=20),
'lot': fields.char('Lot', size=18),
'code': fields.char('Partner code', size=10),
'date': fields.date('Date'),
'year': fields.char('Year', size=4),
'previous': fields.char('Previous', size=15),
'current': fields.char('Current', size=15),
'timestamp': fields.datetime('Timestamp'),
}
_defaults = {
'timestamp': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
}
class LognaetOrder(orm.Model):
''' Default object for manage order statistics
'''
_name = 'lognaet.order'
_description = 'Log order'
_columns = {
'name': fields.char('Reference', size=100),
'hostname': fields.char('Hostname', size=30),
'username': fields.char('Username', size=30),
'user': fields.char('Mexal user', size=30),
'type': fields.selection([
('ModOC', 'Modify'),
('InsOC', 'Insert'),
], 'Type'),
'start': fields.char('Start time', size=10),
'end': fields.char('End time', size=10),
'date': fields.date('Date'),
'total': fields.integer('Total row'),
'timestamp': fields.datetime('Timestamp'),
}
_defaults = {
'timestamp': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
}
|
Why fell running?: It's a challenge and fun.
Favourite Race(s) why?: Only done 3 fell races so Stanbury Splash is my favourite at the moment because I was first in my age group.
What is your weekly mileage?: Attend training when I can because I also do swimming training 6 days a week.
Favourite movie / TV programme: Sponge Bob.
Give us an interesting fact / something we wouldn't know about you!: I have won the Yorkshire Swimming long course championships at 50m and 200m Breaststroke for the last 3 years.
Complete the sentence: If I wasn't fell running I would be...: ...Swimming or playing football.
|
from celery import Celery
class HadesCelery(Celery):
"""Celery subclass complying with the Hades RPC API
This subclass sets a few options in :meth:`__init__` such as the
default exchange and hooks into :meth:`signature` to set a routing
key if given.
:param str routing_key: The routing key to enforce in the options
given to :meth:`signature`. For unicast messages it is
usually of the format ``<site>`` or ``<site>.<node>``. If not
set, behavior of :meth:`signature` is unchanged.
"""
def __init__(self, *a, routing_key=None, **kw):
super().__init__(*a, **kw)
self.routing_key = routing_key
self.conf['CELERY_DEFAULT_EXCHANGE'] = 'hades.agent.rpc'
self.conf['CELERY_DEFAULT_EXCHANGE_TYPE'] = 'topic'
self.conf['CELERY_CREATE_MISSING_QUEUES'] = True
self.conf['CELERY_TASK_SERIALIZER'] = 'json'
self.conf['CELERY_EVENT_SERIALIZER'] = 'json'
self.conf['CELERY_RESULT_SERIALIZER'] = 'json'
def signature(self, *a, **kw):
if self.routing_key is not None:
kw = kw.copy()
kw.setdefault('options', {})
kw['options']['routing_key'] = self.routing_key
return super().signature(*a, **kw)
|
So, you’re organising a catch-up with someone you met at a gig the other night. Or perhaps you’re taking a gamble on that charming oddball from Tinder. Is there a better social unifier than coffee? (Alright, I’ll concede that hitting the booze is a pretty good option, but bear with me here). Whether it’s meeting an old friend, getting to know a business client or chatting someone up, a cup of coffee is remarkably good at getting us to slow down and be in the moment with another person – and this is no less true for those of us who already have someone special in our lives.
Coffee is ubiquitous in our hectic urban life, but apart from exchanging a handful of pleasantries with our regular barista, most of us have little understanding of what goes into turning a bag of roasted beans into our favourite daily stimulant. And given that baristas quite frequently work in teams, what better way to bond with someone than shoulder-to-shoulder at a coffee machine? A three-hour crash course in espresso-making through WeTeachMe, hosted by the outstandingly knowledgeable David from The Espresso School, is a great way to put teamwork to the test with Nelly – my good friend, life coach, strongest critic, fashion adviser, agitator, counsellor, and above all else, significant other.
To start, we’re given a brief run-down on the history of coffee, from its discovery in the 9th century in Ethiopia, its movement through the Middle East and Southeast Asia, to its cultivation throughout the world. Learning of the wide range of species and cultivars was particularly fascinating; for everyday people ordering a coffee at a cafe, we’re not particularly fussed about the species of bean or where it comes from. But following David’s explanation on the extraordinary flavours that can be found in some of the rarer (and naturally, pricier) types of coffee bean, I’m far more inclined to be a little more adventurous with my consumption in the future.
But we weren’t there for a lecture. Before long, David had us standing front of an espresso machine allowing us to find out for ourselves how the slightest differences in grind, water volume and the time spent in extraction can make a world of difference. At one point, I had my grind settings way off in the wrong direction, resulting in an espresso that took a mere 11 seconds to pass through the handle, barely half of what would be considered a sufficient length of time. “Try it,” said David. “It’ll probably taste far too sour.” He was right. I tend to defer to Nelly’s more refined sense of taste, but even I could tell it was awful, and certainly not something you would serve up in a cafe.
Of course, not all of us drink our coffee black. Nailing the grind and extraction process will keep the purists happy, but unless you can get the milk just right you’re going to lose a lot of customers. And, much to my surprise, this step was not as difficult as I had imagined. The process of creating the perfect texture with milk involves a two-step process of “stretching” and “rolling”, using the senses of sight (looking for the right gloss in the foam), sound (knowing how deep to place the steam tip) and touch (feeling for temperature, and “rolling” the milk before pouring). If anything, pouring the milk is arguably a trickier affair than heating it up.
Not content to school us merely on technique, David emphasised the importance of workflow. People aren’t keen on waiting for their coffee, so churning out cup after cup of quality brew is a test of efficiency in movement and multitasking. Divide the responsibilities of grinding and extraction to one person, and milk texturing and pouring to another, and you’ve got a recipe to test communication and cooperation between a couple. Nelly and I were certainly able to punch out a couple of decent coffees by the end of the three hours, but it certainly gave us a greater appreciation for the baristas who spend eight hours a day mastering this process.
Whilst going out for a coffee is a no-brainer option when getting to know someone you’re keen on, if that person turns out to be a bit of a caffeine fiend then I wouldn’t hesitate to recommend an espresso course to learn how well you can work together, and above all else, give you an appreciation for the history and preparation of our favourite daily pick-me-up. After all, the only thing that should be bitter after this course is that cup you brewed for far too long!
Learn to appreciate your morning coffee even more!
|
# -*- coding: utf-8 -*-
# Create your views here.
from django.shortcuts import render
from django.http import (HttpResponse, HttpResponseRedirect)
from django.contrib.auth.views import (login, logout)
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from django.contrib.auth.decorators import (login_required,
permission_required)
from django.contrib.auth.models import (Permission, Group, User)
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse #reverse函数可以像在模板中使用url那样,在代码中使用
def signin(request):
return login(request, template_name='signin.html')
def signout(request):
return logout(request, next_page=reverse('home'))
def signup(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
# import inspect, sys
# frame = inspect.stack()[1][0]
# caller__name__ = frame.f_locals['__name__']
# print(caller__name__)
if form.is_valid():
content_type = ContentType.objects.get(app_label='account', model='user')
# p, created = Permission.objects.get_or_create(codename=u"can_vote", name=u"can vote", content_type=content_type)
p = Permission.objects.get_or_create(codename=u"can_vote", name=u"can vote", content_type=content_type)
new_user = form.save()
new_user.user_permissions.add(p)
return HttpResponseRedirect(reverse('singin'))
else:
form = UserCreationForm()
return render(request, "signup.html", {
'form': form,
})
@login_required
def profile(request):
print '/////////////////////////////////'
if request.user.has_perm('auth.can_vote'):
print 'you can vote'
form = UserCreationForm()
print dir(request.user.groups)
# print request.user.get_all_permissions()
return render(request, 'profile.html', {
'form': form,
})
@permission_required('auth.can_manage_users', raise_exception=True)
def manage(request):
form = UserCreationForm()
return render(request, 'manage.html', {
'form': form,
})
|
Here is a little story on one of them: We find a nice house that was way over-priced in a great neighborhood. I do a CMA and determine the value. My folks don’t want to make an offer because the asking price was sooo much more than the actual value…..like almost $50k more. A week or so later I get a call from the listing agent telling me that the price is going to be dropped by almost $40k. We write an offer. Well, it appears we weren’t the only ones who wanted the house and thought it was over-priced, because along came somebody else. (If you are a seller, this just goes to show that buyers know what your house is worth and usually don’t like to throw out a realistic offer when you are waaaay over the top.) The fun thing about this house is that there have only been 43 houses in this price range sell in all of Lexington since Jan. 1 of this year. There are 103 active listings on the market right now. You would think that NO house in this price range would have multiple offers, but it just goes to show that everybody always wants the best one they can get. That is why there are “Waits” at nice restaurants and you never see Mercedes-Benz offer 0% financing.
So, if you are a seller, my advice is to price your house realistically and do things to really make it show well. FYI: I have a client right now that has seen dozens of houses and wants me to blog about what a turn off a dirty house is to buyers……and that fresh paint makes a house look so much better. So there, you have had a real buyer in today’s market tell you what little things you can do to try to get a contract on your house.
My advice to a buyer depends on what kind of buyer you are. If you’re the type that doesn’t have to have the most updated and move in ready house on the market, then you have a little more leverage since there aren’t many of you out there like that! If you are the type who wants the WOW! Factor, then you’ll need to act fast since every other like-minded buyer is going to want the same house.
Coming Next week: Ever see a house sell for waaaay more than anybody thinks it is worth? I got a good story about one!
← Real Estate Catch Phrases….EXPOSED!
|
# setup ipython environment
from ipywidgets import interact, fixed
# setup python environment
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
col = plt.rcParams['axes.color_cycle']
def mc(p, q, tmax = 1, x0 = 0):
x = x0
r = np.random.rand(tmax+1)
for t in range(tmax+1):
yield x
if x == 0 and r[t] < 1 - p:
x = 1
elif x == 1 and r[t] < 1 - q:
x = 0
def plot_mc(p:(0,1,0.1)=0.9,q:(0,1,0.1)=0.9):
print('P=|%.1f %.1f|\n |%.1f %.1f|' %(p,1-p,1-q,q))
plt.figure(figsize=[9,4])
plt.plot(list(mc(p,q,100)))
plt.yticks([0,1],['$S_1$, off','$S_2$, on'])
plt.xlabel('time')
plt.ylim(-0.1,1.1)
plt.show()
def mc_sol(s1, p, q, t):
x0 = np.matrix([s1,1-s1])
pij = np.matrix([[p,1-p],[1-q,q]])
return np.array(x0*pij**t)[0]
def plot_mc_sol(s1:(0,1,0.1)=1,p:(0,1,0.1)=0.9,q:(0,1,0.1)=0.9, tmax=fixed(20)):
s1,s2 = list(zip(*[mc_sol(s1,p,q,t) for t in range(tmax+1)]))
plt.figure(figsize=[9,4])
plt.plot(s1, label='S_1, off')
plt.plot(s2, label='S_2, on')
plt.xlabel('time')
plt.ylabel('proportion of channels')
plt.ylim(-0.01,1.01)
plt.legend()
plt.show()
def plot_mc_sol2(p:(0,1,0.1)=0.9,q:(0,1,0.1)=0.9, \
tmax=fixed(20), log_n:(0,10,1)=0):
s1,s2 = list(zip(*[mc_sol(1,p,q,t) for t in range(tmax+1)]))
n = int(np.exp(log_n))
sim = [list(mc(p,q,tmax)) for _ in range(n)]
sim_av = [np.mean(s) for s in list(zip(*sim))]
print("n = %d" % n)
plt.figure(figsize=[9,4])
plt.plot(s2)
plt.plot(sim_av)
plt.xlabel('time')
plt.ylabel('proportion of channels on')
plt.ylim(-0.01,1.01)
plt.show()
def red_sim(i, n, tmax=1):
for t in range(tmax+1):
yield i
i = np.random.binomial(2*n,i/(2*n))
def plot_red_sim(log_n:(0,10,1)=7,prop_i:(0,1,0.1)=0.5,n_sim:(1,20,1)=1):
n = int(np.exp(log_n))
i = int(2*n*prop_i)
print("n = %d, i0 = %d" % (n,i))
plt.figure(figsize=[9,4])
for _ in range(n_sim):
plt.plot(list(red_sim(i,n,50)))
plt.xlabel('time')
plt.ylabel("number of copies of 'a'")
plt.ylim(-2*n*0.01, 2*n*1.01)
plt.show()
def mc4state(p1, p2, tmax=1):
x = np.matrix([1,0,0,0])
p = np.matrix([[1-p1/2-p2/2,p1/2,p2/2,0],[0,1-p2,0,p2],
[0,0,1-p1,p1],[1,0,0,0]])
for t in range(tmax+1):
yield x.tolist()[0]
x = x*p
def plot_mc4state(p1:(0,1,0.1)=1, p2:(0,1,0.1)=1, \
plot_all=False):
pts = list(zip(*mc4state(p1, p2, 30)))
plt.figure(figsize=[9,4])
plt.plot(pts[0], label='A00')
if plot_all:
plt.plot(pts[1], label='A10')
plt.plot(pts[2], label='A01')
plt.plot(pts[3], label='A11', color=col[5])
plt.xlabel('time')
plt.ylabel('proportion of enzymes in state')
plt.ylim(-0.05,1.05)
plt.legend()
plt.show()
|
A Cook County Jail guard was attacked Friday night by an inmate and punched several times.
The guard was trying to secure an inmate in a cell in maximum security around 9 p.m. and was punched repeatedly in the face and head when he tried to close the cell door, according to Cara Smith, spokeswoman for the Cook County sheriff’s office.
She said he was treated at a hospital, and charges will be filed against the inmate, who was in custody for a gun offense.
The attack came a day after two inmates, Martin Alvarado and Cordarryl Stevenson, took over a housing tier at the jail and held another inmate hostage around 4:30 p.m. Thursday, according to Smith.
|
# Copyright 2020 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Basic dense layers."""
import tensorflow as tf
from layers import base_layers # import seq_flow_lite module
from layers import normalization_layers # import seq_flow_lite module
from layers import quantization_layers # import seq_flow_lite module
class BaseQDense(base_layers.BaseLayer):
"""Quantized encoder dense layers."""
def __init__(self,
units,
activation=tf.keras.layers.ReLU(),
bias=True,
rank=2,
normalize=True,
**kwargs):
self.units = units
self.rank = rank
assert rank >= 2 and rank <= 4
self.activation = activation
self.bias = bias
self.normalize = normalize
self.qoutput = quantization_layers.ActivationQuantization(**kwargs)
self._create_normalizer(**kwargs)
super(BaseQDense, self).__init__(**kwargs)
def build(self, input_shapes):
assert len(input_shapes) == self.rank
if self.rank == 4:
assert input_shapes[1] == 1 or input_shapes[2] == 1
self.in_units = input_shapes[-1]
shape = [self.in_units, self.units]
self.w = self.add_qweight(shape=shape)
if self.bias:
self.b = self.add_bias(shape=[self.units])
def _create_normalizer(self, **kwargs):
self.normalization = normalization_layers.BatchNormalization(**kwargs)
def _dense_r2(self, inputs, normalize_method):
outputs = tf.matmul(inputs, self.w)
if self.bias:
outputs = tf.nn.bias_add(outputs, self.b)
if self.normalize:
outputs = normalize_method(outputs)
if self.activation:
outputs = self.activation(outputs)
return self.qoutput(outputs)
def _dense_r34(self, inputs, normalize_method):
bsz = self.get_batch_dimension(inputs)
outputs = tf.reshape(inputs, [-1, self.in_units])
outputs = self._dense_r2(outputs, normalize_method)
if self.rank == 3:
return tf.reshape(outputs, [bsz, -1, self.units])
elif inputs.get_shape().as_list()[1] == 1:
return tf.reshape(outputs, [bsz, 1, -1, self.units])
else:
return tf.reshape(outputs, [bsz, -1, 1, self.units])
def call(self, inputs):
def normalize_method(tensor):
return self.normalization(tensor)
return self._do_call(inputs, normalize_method)
def _do_call(self, inputs, normalize_method):
if self.rank == 2:
return self._dense_r2(inputs, normalize_method)
return self._dense_r34(inputs, normalize_method)
def quantize_using_output_range(self, tensor):
return self.qoutput.quantize_using_range(tensor)
class BaseQDenseVarLen(BaseQDense):
"""Dense on variable length sequence."""
def _create_normalizer(self, **kwargs):
self.normalization = normalization_layers.VarLenBatchNormalization(
rank=2, **kwargs)
def call(self, inputs, mask, inverse_normalizer):
def normalize_method(tensor):
maskr2 = tf.reshape(mask, [-1, 1])
return self.normalization(tensor, maskr2, inverse_normalizer)
return self._do_call(inputs, normalize_method)
|
Michael Moloney and Matthew Ayre discovered the wreck of the Scottish whaling vessel Nova Zembla off the coast of Baffin Island during an expedition in August 2018 funded by The Royal Canadian Geographical Society and outfitted by MEC. The post-doctoral fellows at the University of Calgary’s Arctic Institute of North America embarked on a One Ocean Expeditions voyage aboard the research vessel Akademik Sergey Vavilov to Pond Inlet and set out in a Zodiac, battling one-and-a-half-metre swells, toward a windblown stretch of beach near Buchan Gulf. Here's a look at the MEC gear that helped them make their momentous discovery.
I’m a little particular with my gear, especially when it comes to shells, which probably explains why I’ve opted to endure my decade-old one despite it falling apart around me. This was fine in my small parcel of northeastern England where yes, it rains (a lot), but it’s not an environment you could consider extreme. Moving to Canada changed this, and I now spend a good portion of my time outside, usually on boats in the Arctic or at the Arctic Institute of North America’s Kluane Lake Research Station in Yukon. I was soon aware that effective layering in these cold climates is key, and that my antique shell would no longer cut it. Still, I wasn’t keen on dropping more than $1,000 for a top of the line jacket.
Travelling with One Ocean Expeditions, I had plenty opportunity to get to know the features of MEC's Synergy Gore-Tex Jacket in the variable conditions we encountered. But it was on the search day itself that I gave it its true test — seven hours of exposure on a small Zodiac in the cold early morning hours off a remote point of the Baffin Island coast.
A stiff breeze blowing and a high sea running with constant spray made for less than ideal search conditions. The search was interspersed with long periods of sitting and watching the sonar, and intense periods readying. I was thankful for the jacket’s slim fit and lack of exposed elastic, reducing the risk of getting tangled on something. And I didn’t find it to be restrictive in any way (I am 5’8” with a 40” chest and the medium fit me perfectly). The Gore-Tex did what it does best and kept the elements at bay, while the exterior of the jacket stood up admirably to the rigors of working in close quarters aboard a ship with no hints of any tears or even scuffs, despite ample opportunity for their occurrence. The hood is expansive enough to be worn with a helmet and has a generous semi-rigid peak that effectively keeps Arctic sea spray away from your face, but can be cinched down when you’re not using it.
The zippers on the jacket are generous, but not excessive. The front zip has a waterproof flap behind it to keep any water ingress at bay on those truly wet days. Bigger zip pulls would have been nice, especially if you are predominately using this jacket in cold climates with bulky gloves on, like I do. The Synergy also has underarm vents — an absolute must, in my opinion, for a final outer shell like this. Smaller waterproof zips are used here for increased flexibility. Though they feel a little stiff at first, they break in after the first wear.
The Synergy has three pockets, two external hand pockets and an internal left chest pocket. The hand pocks are generous and you could easily fit a map in there if needed. The inside pocket has information on the installed Recco avalanche rescue reflector printed onto it, an added benefit for those not familiar with the technology.
|
# -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import logging
from rest_framework import serializers
logger = logging.getLogger(__name__)
class PassThroughSerializer(serializers.Serializer): # pylint: disable=abstract-method
def to_representation(self, instance):
return instance
class SearchSerializer(serializers.Serializer): # pylint: disable=abstract-method
type = serializers.SlugRelatedField(slug_field='model', read_only=True)
title = serializers.CharField()
url = serializers.URLField()
object = PassThroughSerializer()
|
OTEX Removalists is not like the average removal service. We care about you and your belongings and always strive to provide you the solutions which will work for you. Naturally enough, relocation needs vary from person to person. That’s why we offer our customers personalized removal plans, on top of the standard plans. This gives the freedom to the customer to select a removal plan that works best for him. For instance, if you are moving far away from your current home, you can hire our interstate Removalists Wilston professionals. As our Removalists Wilston professionals cover the whole of Australia, we can help you relocate to any part of the country. Besides interstate removal, we offer services like home removal, officer removal, etc. All our Removalists Wilston professionals are highly trained and committed and some have years of experience under their belt. OTEX Removalists uses state-of-the-art removal vans and trucks and has all specialized equipment that our experts might need while doing their work. Our Removalists Wilston professionals are adept at handling all kinds of household and office items, including servers, laptops, etc. Give our Removalists Wilston team a chance to serve you and we promise you will not be disappointed.
There are several advantages of hiring experienced, committed removals Wilston professionals, like the ones working for OTEX Removalists. For one, you have the assurance that good care will be taken of your valuables. This kind of assurance is equally important in case of house and office removal. You wouldn’t want your furniture, electronic items, or piano to be damaged while you are moving houses. Similarly, business owners wouldn’t want their servers or important office equipment to be mishandled. Our removals Wilston professionals have successfully handled thousands of house removal jobs and a few hundred office removal jobs. So they know how to take care of house and office items. Two, a committed team is available on all days and the same is the case with Removalists Wilston professionals working with OTEX Removalists. Our team is available throughout the year, so you can schedule your relocation even in holidays and weekends. Three, a committed team like our Removalists Wilston team provides services across Australia. Whether you are moving to a big city or small town, we will carry your stuff there.
OTEX Removalists offer a full range of removal services to its customers. We are a team of experienced and highly-motivated Removalists Wilston professionals who can handle different removal jobs with utmost ease.
House Removalists Wilston – If you are looking for a Removalists Wilston team to help you move your stuff to a new location quickly and efficiently, look no further than OTEX Removalists.
Office Removalists Wilston – It’s no secret that relocating your office is a hard task. However, we can make things easier for you. We are adept in office relocation and take stress and anxiety out of it.
Furniture Removalists Wilston – Only specialized furniture removalists can take great care of your expensive furniture while relocating it. We have some of the best furniture removalists in our Removalists Wilston For hassle-free furniture removal, contact us.
Interstate Removalists Wilston – Are you moving to a new city, far away from your current location, and are worried about how you are going to relocate your stuff? Well, say goodbye to all your worries, because our Removalists Wilston team is there to help you.
Piano Movers Wilston – As you might know, the piano is a delicate item. At the same time, it can also be a heavy item. Relocating it can require a lot of expertise, specialized equipment and even manpower, especially if stairs are involved. We have years of Piano removal experience, so you can count on us to relocate your beloved piano to the new location safely and soundly.
Pool Table Movers Wilston – Our Removalists Wilston team can handle pool table removal better than others, so get in touch with us.
Man with a van Wilston – This is a great option in several situations, like when you need to relocate nearby or when you move only a few items from one place to another.
Top-class Customer Service – At Otex furniture removalists Wilston, we understand better than others how important is for you to get timely and accurate responses to your queries. Our customer service representatives are courteous and professional. Whenever you contact us for any query, our team will give you all the information you require promptly.
Available on all days of the year – You can schedule the moving day on any day of the week, irrespective of whether it is a holiday or weekend. Our furniture Removalists Wilston professionals are available on all days of the year.
No hidden costs – OTEX Removalists is not like those furniture Removalists Wilston who attach hidden fees to the bills of their customers. We stay true to our initial quote always.
Willingly go the extra mile – Our furniture Removalists Wilston will happily walk the extra distance to serve you better, whether it means re-arranging pickup schedule or making extra stops.
Best team – We have the best team of furniture Removalists Wilston professionals, who are highly experienced and committed.
At OTEX Movers Wilston, we pride ourselves at following a well-structured moving process for each and every relocation job. Careful planning, in our opinion, is what helps us to ensure every relocation job goes smoothly and without hiccups.
Initial Enquiry – If you are planning to relocate, simply contact us and raise an inquiry. Don’t worry if you are confused about the whole thing at this stage, because that’s natural. Our movers Wilston professionals will speak to you and allay your fears and anxieties. They might also ask you probing questions to understand your requirements better.
Decide the moving date – If you decide to go ahead with us, we will request you to book a date with us. Our movers Wilston team works on all days of the year, so you can pick a moving date as per your convenience.
Packing, loading, delivering, and unloading – On the scheduled day, OTEX Movers Wilston team will reach you and start packing your stuff. The foreman will tell you our plan for the day. Once all the items have been packed, our Removalists Wilston professionals will load your stuff. You will get an inventory receipt, containing the list of all items that have been loaded.
Follow-up call – The work of our Movers Wilston doesn’t end at unloading your stuff at your new location. Shortly after delivering your stuff, our team will get in touch with you to check if everything went fine and if you require any additional assistance. Otex Wilston local movers will also request you to complete a feedback form.
Talented Staff – This goes without saying. Our Removalists Wilston professionals are highly experienced, trained, and committed. They are adept in handling all kinds of removal jobs, from house removal to office removal, local to interstate removal, furniture removal to piano or pool table removal.
|
class Character():
# Create a character
def __init__(self, char_name, char_description):
self.name = char_name
self.description = char_description
self.conversation = None
# Describe this character
def describe(self):
print( self.name + " is here!" )
print( self.description )
# Set what this character will say when talked to
def set_conversation(self, conversation):
self.conversation = conversation
# Talk to this character
def talk(self):
if self.conversation is not None:
print("[" + self.name + " says]: " + self.conversation)
else:
print(self.name + " doesn't want to talk to you")
# Fight with this character
def fight(self, combat_item):
print(self.name + " doesn't want to fight with you")
return True
class Enemy(Character):
def __init__(self,char_name,char_description):
super().__init__(char_name,char_description)
self.weakness = None
def fight(self, combat_item):
if combat_item == self.weakness:
print("You fend " + self.name + " off with the " + combat_item)
return True
else:
print(self.name + " crushes you, puny adventurer!")
return False
def set_weakness(self, item_weakness):
self.weakness = item_weakness
def get_weakness(self):
return self.weakness
class Friend(Character):
def __init__(self,char_name,char_description):
super().__init__(char_name,char_description)
self.feelings = None
def set_feelings(self, character_feelings):
self.feelings = character_feelings
def get_feelings(self):
return self.feelings
|
Please complete the information below if you would like to access employment services through Buzz Lockleaze.
* 5. What type of support do you need?
* 7. If you're job seeking what hours of work can you do?
If you selected part-time please tell us the hours of the day and days of the week that you want to work.
* 8. Consent. I agree to share my data with Buzz and for them to hold this data on their secure database.
* 9. I agree for Buzz to contact me with information on jobs, training, events, volunteering and support. Please tick how you would like to be contacted.
|
# -*- coding: utf-8 -*-
# TenderQuestionResourceTest
def patch_tender_question(self):
response = self.app.post_json('/tenders/{}/questions'.format(
self.tender_id), {'data': {'title': 'question title', 'description': 'question description',
'author': self.test_bids_data[0]['tenderers'][0]}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
question = response.json['data']
response = self.app.patch_json(
'/tenders/{}/questions/{}?acc_token={}'.format(self.tender_id, question['id'], self.tender_token),
{"data": {"answer": "answer"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["answer"], "answer")
response = self.app.patch_json('/tenders/{}/questions/some_id'.format(self.tender_id),
{"data": {"answer": "answer"}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'question_id'}
])
response = self.app.patch_json('/tenders/some_id/questions/some_id', {"data": {"answer": "answer"}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.get('/tenders/{}/questions/{}'.format(self.tender_id, question['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["answer"], "answer")
self.time_shift('active.pre-qualification')
self.check_chronograph()
response = self.app.patch_json(
'/tenders/{}/questions/{}?acc_token={}'.format(self.tender_id, question['id'], self.tender_token),
{"data": {"answer": "answer"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"],
"Can't update question in current (unsuccessful) tender status")
def answering_question(self):
response = self.app.post_json('/tenders/{}/questions'.format(
self.tender_id), {'data': {'title': 'question title', 'description': 'question description',
'author': self.test_bids_data[0]['tenderers'][0]}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
question = response.json['data']
response = self.app.patch_json(
'/tenders/{}/questions/{}?acc_token={}'.format(self.tender_id, question['id'], self.tender_token),
{"data": {"answer": "answer"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["answer"], "answer")
self.assertIn('dateAnswered', response.json['data'])
question["answer"] = "answer"
question['dateAnswered'] = response.json['data']['dateAnswered']
self.time_shift('active.pre-qualification')
self.check_chronograph()
|
Low table. The original solid shape of the marble base in contrast with the lightness of the table top which seems to softly raise up from the base, achieved by tapering the top of the legs and the underside of the table, it gives the impression of weightlessness.
More information about Seesaw 30?
Fill out the form to receive all the information on Seesaw 30!
I would like more information about the product "Seesaw 30" of category "Low Tables".
|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
"""Generic logging, the way I remember it from scripts gone by.
TODO:
- network logging support.
- log rotation config
"""
from datetime import datetime
import logging
import os
import sys
import traceback
# Define our own FATAL_LEVEL
FATAL_LEVEL = logging.CRITICAL + 10
logging.addLevelName(FATAL_LEVEL, 'FATAL')
# mozharness log levels.
DEBUG, INFO, WARNING, ERROR, CRITICAL, FATAL, IGNORE = (
'debug', 'info', 'warning', 'error', 'critical', 'fatal', 'ignore')
# LogMixin {{{1
class LogMixin(object):
"""This is a mixin for any object to access similar logging
functionality -- more so, of course, for those objects with
self.config and self.log_obj, of course.
"""
def _log_level_at_least(self, level):
log_level = INFO
levels = [DEBUG, INFO, WARNING, ERROR, CRITICAL, FATAL]
if hasattr(self, 'config'):
log_level = self.config.get('log_level', INFO)
return levels.index(level) >= levels.index(log_level)
def _print(self, message, stderr=False):
if not hasattr(self, 'config') or self.config.get('log_to_console', True):
if stderr:
print >> sys.stderr, message
else:
print message
def log(self, message, level=INFO, exit_code=-1):
if self.log_obj:
return self.log_obj.log_message(
message, level=level,
exit_code=exit_code,
post_fatal_callback=self._post_fatal,
)
if level == INFO:
if self._log_level_at_least(level):
self._print(message)
elif level == DEBUG:
if self._log_level_at_least(level):
self._print('DEBUG: %s' % message)
elif level in (WARNING, ERROR, CRITICAL):
if self._log_level_at_least(level):
self._print("%s: %s" % (level.upper(), message), stderr=True)
elif level == FATAL:
if self._log_level_at_least(level):
self._print("FATAL: %s" % message, stderr=True)
raise SystemExit(exit_code)
def worst_level(self, target_level, existing_level, levels=None):
"""returns either existing_level or target level.
This depends on which is closest to levels[0]
By default, levels is the list of log levels"""
if not levels:
levels = [FATAL, CRITICAL, ERROR, WARNING, INFO, DEBUG, IGNORE]
if target_level not in levels:
self.fatal("'%s' not in %s'." % (target_level, levels))
for l in levels:
if l in (target_level, existing_level):
return l
# Copying Bear's dumpException():
# https://hg.mozilla.org/build/tools/annotate/1485f23c38e0/sut_tools/sut_lib.py#l23
def exception(self, message=None, level=ERROR):
tb_type, tb_value, tb_traceback = sys.exc_info()
if message is None:
message = ""
else:
message = "%s\n" % message
for s in traceback.format_exception(tb_type, tb_value, tb_traceback):
message += "%s\n" % s
# Log at the end, as a fatal will attempt to exit after the 1st line.
self.log(message, level=level)
def debug(self, message):
self.log(message, level=DEBUG)
def info(self, message):
self.log(message, level=INFO)
def warning(self, message):
self.log(message, level=WARNING)
def error(self, message):
self.log(message, level=ERROR)
def critical(self, message):
self.log(message, level=CRITICAL)
def fatal(self, message, exit_code=-1):
self.log(message, level=FATAL, exit_code=exit_code)
def _post_fatal(self, message=None, exit_code=None):
""" Sometimes you want to create a report or cleanup
or notify on fatal(); override this method to do so.
Please don't use this for anything significantly long-running.
"""
pass
# OutputParser {{{1
class OutputParser(LogMixin):
""" Helper object to parse command output.
This will buffer output if needed, so we can go back and mark
[(linenum - 10):linenum+10] as errors if need be, without having to
get all the output first.
linenum+10 will be easy; we can set self.num_post_context_lines to 10,
and self.num_post_context_lines-- as we mark each line to at least error
level X.
linenum-10 will be trickier. We'll not only need to save the line
itself, but also the level that we've set for that line previously,
whether by matching on that line, or by a previous line's context.
We should only log that line if all output has ended (self.finish() ?);
otherwise store a list of dictionaries in self.context_buffer that is
buffered up to self.num_pre_context_lines (set to the largest
pre-context-line setting in error_list.)
"""
def __init__(self, config=None, log_obj=None, error_list=None, log_output=True):
self.config = config
self.log_obj = log_obj
self.error_list = error_list or []
self.log_output = log_output
self.num_errors = 0
self.num_warnings = 0
# TODO context_lines.
# Not in use yet, but will be based off error_list.
self.context_buffer = []
self.num_pre_context_lines = 0
self.num_post_context_lines = 0
self.worst_log_level = INFO
def parse_single_line(self, line):
for error_check in self.error_list:
# TODO buffer for context_lines.
match = False
if 'substr' in error_check:
if error_check['substr'] in line:
match = True
elif 'regex' in error_check:
if error_check['regex'].search(line):
match = True
else:
self.warning("error_list: 'substr' and 'regex' not in %s" %
error_check)
if match:
log_level = error_check.get('level', INFO)
if self.log_output:
message = ' %s' % line
if error_check.get('explanation'):
message += '\n %s' % error_check['explanation']
if error_check.get('summary'):
self.add_summary(message, level=log_level)
else:
self.log(message, level=log_level)
if log_level in (ERROR, CRITICAL, FATAL):
self.num_errors += 1
if log_level == WARNING:
self.num_warnings += 1
self.worst_log_level = self.worst_level(log_level,
self.worst_log_level)
break
else:
if self.log_output:
self.info(' %s' % line)
def add_lines(self, output):
if isinstance(output, basestring):
output = [output]
for line in output:
if not line or line.isspace():
continue
line = line.decode("utf-8", 'replace').rstrip()
self.parse_single_line(line)
# BaseLogger {{{1
class BaseLogger(object):
"""Create a base logging class.
TODO: status? There may be a status object or status capability in
either logging or config that allows you to count the number of
error,critical,fatal messages for us to count up at the end (aiming
for 0).
"""
LEVELS = {
DEBUG: logging.DEBUG,
INFO: logging.INFO,
WARNING: logging.WARNING,
ERROR: logging.ERROR,
CRITICAL: logging.CRITICAL,
FATAL: FATAL_LEVEL
}
def __init__(
self, log_level=INFO,
log_format='%(message)s',
log_date_format='%H:%M:%S',
log_name='test',
log_to_console=True,
log_dir='.',
log_to_raw=False,
logger_name='',
append_to_log=False,
):
self.log_format = log_format
self.log_date_format = log_date_format
self.log_to_console = log_to_console
self.log_to_raw = log_to_raw
self.log_level = log_level
self.log_name = log_name
self.log_dir = log_dir
self.append_to_log = append_to_log
# Not sure what I'm going to use this for; useless unless we
# can have multiple logging objects that don't trample each other
self.logger_name = logger_name
self.all_handlers = []
self.log_files = {}
self.create_log_dir()
def create_log_dir(self):
if os.path.exists(self.log_dir):
if not os.path.isdir(self.log_dir):
os.remove(self.log_dir)
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.abs_log_dir = os.path.abspath(self.log_dir)
def init_message(self, name=None):
if not name:
name = self.__class__.__name__
self.log_message("%s online at %s in %s" %
(name, datetime.now().strftime("%Y%m%d %H:%M:%S"),
os.getcwd()))
def get_logger_level(self, level=None):
if not level:
level = self.log_level
return self.LEVELS.get(level, logging.NOTSET)
def get_log_formatter(self, log_format=None, date_format=None):
if not log_format:
log_format = self.log_format
if not date_format:
date_format = self.log_date_format
return logging.Formatter(log_format, date_format)
def new_logger(self, logger_name):
"""Create a new logger.
By default there are no handlers.
"""
self.logger = logging.getLogger(logger_name)
self.logger.setLevel(self.get_logger_level())
self._clear_handlers()
if self.log_to_console:
self.add_console_handler()
if self.log_to_raw:
self.log_files['raw'] = '%s_raw.log' % self.log_name
self.add_file_handler(os.path.join(self.abs_log_dir,
self.log_files['raw']),
log_format='%(message)s')
def _clear_handlers(self):
"""To prevent dups -- logging will preserve Handlers across
objects :(
"""
attrs = dir(self)
if 'all_handlers' in attrs and 'logger' in attrs:
for handler in self.all_handlers:
self.logger.removeHandler(handler)
self.all_handlers = []
def __del__(self):
logging.shutdown()
self._clear_handlers()
def add_console_handler(self, log_level=None, log_format=None,
date_format=None):
console_handler = logging.StreamHandler()
console_handler.setLevel(self.get_logger_level(log_level))
console_handler.setFormatter(self.get_log_formatter(log_format=log_format,
date_format=date_format))
self.logger.addHandler(console_handler)
self.all_handlers.append(console_handler)
def add_file_handler(self, log_path, log_level=None, log_format=None,
date_format=None):
if not self.append_to_log and os.path.exists(log_path):
os.remove(log_path)
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(self.get_logger_level(log_level))
file_handler.setFormatter(self.get_log_formatter(log_format=log_format,
date_format=date_format))
self.logger.addHandler(file_handler)
self.all_handlers.append(file_handler)
def log_message(self, message, level=INFO, exit_code=-1, post_fatal_callback=None):
"""Generic log method.
There should be more options here -- do or don't split by line,
use os.linesep instead of assuming \n, be able to pass in log level
by name or number.
Adding the IGNORE special level for runCommand.
"""
if level == IGNORE:
return
for line in message.splitlines():
self.logger.log(self.get_logger_level(level), line)
if level == FATAL:
if callable(post_fatal_callback):
self.logger.log(FATAL_LEVEL, "Running post_fatal callback...")
post_fatal_callback(message=message, exit_code=exit_code)
self.logger.log(FATAL_LEVEL, 'Exiting %d' % exit_code)
raise SystemExit(exit_code)
# SimpleFileLogger {{{1
class SimpleFileLogger(BaseLogger):
"""Create one logFile. Possibly also output to
the terminal and a raw log (no prepending of level or date)
"""
def __init__(self,
log_format='%(asctime)s %(levelname)8s - %(message)s',
logger_name='Simple', log_dir='logs', **kwargs):
BaseLogger.__init__(self, logger_name=logger_name, log_format=log_format,
log_dir=log_dir, **kwargs)
self.new_logger(self.logger_name)
self.init_message()
def new_logger(self, logger_name):
BaseLogger.new_logger(self, logger_name)
self.log_path = os.path.join(self.abs_log_dir, '%s.log' % self.log_name)
self.log_files['default'] = self.log_path
self.add_file_handler(self.log_path)
# MultiFileLogger {{{1
class MultiFileLogger(BaseLogger):
"""Create a log per log level in log_dir. Possibly also output to
the terminal and a raw log (no prepending of level or date)
"""
def __init__(self, logger_name='Multi',
log_format='%(asctime)s %(levelname)8s - %(message)s',
log_dir='logs', log_to_raw=True, **kwargs):
BaseLogger.__init__(self, logger_name=logger_name,
log_format=log_format,
log_to_raw=log_to_raw, log_dir=log_dir,
**kwargs)
self.new_logger(self.logger_name)
self.init_message()
def new_logger(self, logger_name):
BaseLogger.new_logger(self, logger_name)
min_logger_level = self.get_logger_level(self.log_level)
for level in self.LEVELS.keys():
if self.get_logger_level(level) >= min_logger_level:
self.log_files[level] = '%s_%s.log' % (self.log_name,
level)
self.add_file_handler(os.path.join(self.abs_log_dir,
self.log_files[level]),
log_level=level)
# __main__ {{{1
if __name__ == '__main__':
pass
|
While I did not truly see any play structures deals to mention at Kroger, between scarfing down samples of combined berry healthy smoothies, natural soup, and losing half of my household for rather a very long time in the enormous and hectic shop, I had a look around Kroger visa gift card check balance area and can a minimum of comprehend why folks would like that part of the shop.
It is not a surprise that Kroger has some third-party merchant Kroger visa gift card check balance offered at a discount rate for Kroger members, however this is not really simply a little discount rate, in many cases there are significant discount rates on Kroger visa gift card check balance offered both in-store and online. Let us begin with the one that I bought, 2 $75 Blue Apron gift cards that have a stated value of $150.
Are you searching for a present this Holiday Season for somebody that is challenging to buy? Well, Prepaid Kroger visa gift card check balance might simply be the response! Practical and flexible, a Kroger visa gift card check balance enables the recipient to purchase exactly what they desire, when they desire and who does not take pleasure in that? Kroger Cash Cards are another practical method to patronize our storage facilities, filling station, and online. Kroger visa gift card check balance are best for Valentine Day, birthdays or anniversaries and they can be exchanged straight for meals or hospitality. Kroger visa gift card check balance and coupons are a practical and enjoyable method to delight in a night on the town.
|
from a10sdk.common.A10BaseClass import A10BaseClass
class PriorityPartition(A10BaseClass):
"""Class Description::
Configure partition to always poll for techreport.
Class priority-partition supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param part_name: {"description": "Name of partition always want to show in showtech (shared is always shown by default)", "format": "string", "minLength": 1, "optional": false, "maxLength": 14, "type": "string"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/techreport/priority-partition/{part_name}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "part_name"]
self.b_key = "priority-partition"
self.a10_url="/axapi/v3/techreport/priority-partition/{part_name}"
self.DeviceProxy = ""
self.part_name = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
use_path_style_endpoint: (bool) Set to true to send requests to an S3 path style endpoint by default. Can be enabled or disabled on individual operations by setting '@use_path_style_endpoint\' to true or false. Note: you cannot use it together with an accelerate endpoint. how to become an illustrator Before creating the signed URL, you need to check if the file exists directly from the bucket. One way to do that is by requesting the HEAD metadata.
I’m struggling to rename a file within the same bucket with Amazon S3 SDK. I am referring to copy object in the API docs. Here is my call, but it keeps returning “specified bucket does not exist”.
Here's the catch: you need to read the properties of the object you want to maintain, and re-apply them when you replace an object with itself (i.e., an in-place update) because otherwise S3 will either ignore them, or reset the values to default (e.g., the ContentType value).
The method returns a ListBucketsResponse object which will hold the buckets. You’ll see a lot of these Request and Response objects throughout the AWS SDK. Amazon are fond of wrapping the request parameters and response properties into Request and Response objects adhering to the RequestResponse pattern.
Amazon actually provides one. And there are lots of examples on the web of using it. Google is your friend.
|
# --- Day 25: Let It Snow ---
#
# Merry Christmas! Santa is booting up his weather machine; looks like you might get a white Christmas after all.
#
# The weather machine beeps! On the console of the machine is a copy protection message asking you to enter a code from
# the instruction manual. Apparently, it refuses to run unless you give it that code. No problem; you'll just look up
# the code in the--
#
# "Ho ho ho", Santa ponders aloud. "I can't seem to find the manual."
#
# You look up the support number for the manufacturer and give them a call. Good thing, too - that 49th star wasn't
# going to earn itself.
#
# "Oh, that machine is quite old!", they tell you. "That model went out of support six minutes ago, and we just
# finished shredding all of the manuals. I bet we can find you the code generation algorithm, though."
#
# After putting you on hold for twenty minutes (your call is very important to them, it reminded you repeatedly), they
# finally find an engineer that remembers how the code system works.
#
# The codes are printed on an infinite sheet of paper, starting in the top-left corner. The codes are filled in by
# diagonals: starting with the first row with an empty first box, the codes are filled in diagonally up and to the
# right. This process repeats until the infinite paper is covered. So, the first few codes are filled in in this order:
#
# | 1 2 3 4 5 6
# ---+---+---+---+---+---+---+
# 1 | 1 3 6 10 15 21
# 2 | 2 5 9 14 20
# 3 | 4 8 13 19
# 4 | 7 12 18
# 5 | 11 17
# 6 | 16
#
# For example, the 12th code would be written to row 4, column 2; the 15th code would be written to row 1, column 5.
#
# The voice on the other end of the phone continues with how the codes are actually generated. The first code is
# 20151125. After that, each code is generated by taking the previous one, multiplying it by 252533, and then keeping
# the remainder from dividing that value by 33554393.
#
# So, to find the second code (which ends up in row 2, column 1), start with the previous value, 20151125. Multiply it
# by 252533 to get 5088824049625. Then, divide that by 33554393, which leaves a remainder of 31916031. That remainder
# is the second code.
#
# "Oh!", says the voice. "It looks like we missed a scrap from one of the manuals. Let me read it to you." You write
# down his numbers:
#
# | 1 2 3 4 5 6
# ---+---------+---------+---------+---------+---------+---------+
# 1 | 20151125 18749137 17289845 30943339 10071777 33511524
# 2 | 31916031 21629792 16929656 7726640 15514188 4041754
# 3 | 16080970 8057251 1601130 7981243 11661866 16474243
# 4 | 24592653 32451966 21345942 9380097 10600672 31527494
# 5 | 77061 17552253 28094349 6899651 9250759 31663883
# 6 | 33071741 6796745 25397450 24659492 1534922 27995004
#
# "Now remember", the voice continues, "that's not even all of the first few numbers; for example, you're missing the
# one at 7,1 that would come before 6,2. But, it should be enough to let your-- oh, it's time for lunch! Bye!" The call
# disconnects.
#
# Santa looks nervous. Your puzzle input contains the message on the machine's console. What code do you give the
# machine?
#
# --- Part Two ---
#
# The machine springs to life, then falls silent again. It beeps. "Insufficient fuel", the console reads. "Fifty stars
# are required before proceeding. One star is available."
#
# ..."one star is available"? You check the fuel tank; sure enough, a lone star sits at the bottom, awaiting its
# friends. Looks like you need to provide 49 yourself.
#
# You have enough stars to [Start the machine].
#
# You fill the weather machine with fifty stars. It comes to life!
#
# Snow begins to fall.
#
# Congratulations! You've finished every puzzle! I hope you had as much fun solving them as I had making them for you.
# I'd love to hear about your adventure; you can get in touch with me via contact info on my website or through Twitter.
def next_code(current_code):
return (current_code * 252533) % 33554393
max_x = 1
max_y = 1
code = 20151125
target_x = 2981
target_y = 3075
target_found = False
while not target_found:
y = 1
x = max_x + 1
while x > 0:
max_x = max(max_x, x)
code = next_code(code)
if x == target_x and y == target_y:
print("{0}, {1} = {2}".format(x, y, code))
target_found = True
break
y += 1
x -= 1
|
Section 9: In this video we examine the primary ways to connect Odoo with other applications, import data, and access the models within Odoo remotely. You will learn how to use XML-RPC to search, write, and update data within Odoo's models.
This course is available on Udemy!
While using Odoo's can be useful to import some simple data sets it has extreme limitations. First off, data coming from other systems is rarely organized in the same way as Odoo organizes data within the framework. Even just importing a simple customer list can be a challenge through standard CSV. A best case scenario is that you modify data extensively before importation so that it fits Odoo's structure. Fortunately there is a better way.
In this video we explore Odoo's powerful API to integrate Odoo with other systems and specifically to import or update data within Odoo. Using XML-RPC you can use the API to translate and automate the importing of your data. In this video we even demonstrate how you can sync the data so that you can run your update routines over and over, changing only the data you wish to have changed.
|
#!/usr/bin/env python
"""
NAME
read_tree.py - a pyframe example script
SYNOPSIS
./read_tree.py myntuple.root
DESCRIPTION
Demonstrates how to read trees in pyframe and make some histograms.
OPTIONS
-h, --help
Prints this manual and exits.
-o, --output output.hists.root
Output file name.
-t, --tree myntuple
Input tree name.
AUTHOR
Ryan Reece <ryan.reece@cern.ch>
COPYRIGHT
Copyright 2015 Ryan Reece
License: GPL <http://www.gnu.org/licenses/gpl.html>
SEE ALSO
- pyframe <https://github.com/rreece/pyframe/>
- ROOT <http://root.cern.ch>
TO DO
- One.
- Two.
2015-05-26
"""
#------------------------------------------------------------------------------
# imports
#------------------------------------------------------------------------------
## std
import argparse
## ROOT
import ROOT
ROOT.gROOT.SetBatch(True)
## my modules
import pyrootutils
import pyframe
#------------------------------------------------------------------------------
# globals
#------------------------------------------------------------------------------
GeV = 1000.
#------------------------------------------------------------------------------
# options
#------------------------------------------------------------------------------
def options():
parser = argparse.ArgumentParser()
parser.add_argument('infiles', default=None, nargs='+',
help='Input files as separate args.')
parser.add_argument('-i', '--input', default=None,
help='Input files as a comma-separated list.')
parser.add_argument('-o', '--output', default='output.hists.root',
help='Name of output file.')
parser.add_argument('-t', '--tree', default='myntuple',
help='Name of input tree.')
ops = parser.parse_args()
assert ops.infiles
return ops
#------------------------------------------------------------------------------
# main
#------------------------------------------------------------------------------
def main():
ops = options()
## get input files and output options
input_files = list(ops.infiles)
if ops.input:
s_input = str(ops.input)
input_files.extend( s_input.split(',') )
tree_name = ops.tree
plot_output = ops.output
## make an EventLoop
loop = pyframe.core.EventLoop('TestLoop', tree=tree_name)
loop.add_input_files(input_files)
## schedule algorithms
loop += PlotsAlg()
## run the event loop
loop.run()
print 'Done.'
#------------------------------------------------------------------------------
# PlotsAlg
#------------------------------------------------------------------------------
class PlotsAlg(pyframe.core.Algorithm):
#__________________________________________________________________________
def __init__(self, name='PlotsAlg'):
pyframe.core.Algorithm.__init__(self, name)
#__________________________________________________________________________
def execute(self):
weight = 1.0
## fill event-level histograms
self.hist('h_w', "ROOT.TH1F('$', ';w;Events', 20, -2.0, 3.0)").Fill(self.chain.w, weight)
self.hist('h_ph_n', "ROOT.TH1F('$', ';ph_n;Events', 20, -0.5, 19.5)").Fill(self.chain.ph_n, weight)
## build VarProxies for photons
photons = self.chain.build_var_proxies('ph_', self.chain.ph_n)
## fill histograms per photon
for ph in photons:
self.hist('h_ph_pt', "ROOT.TH1F('$', ';ph_pt;Events / (10 GeV)', 20, 0.0, 200)").Fill(ph.pt/GeV, weight)
#------------------------------------------------------------------------------
if __name__ == '__main__': main()
|
Police investigating fatal crash in New Ulm, Minn.
NEW ULM, Minn. (KMSP) - Police in New Ulm, Minnesota are investigating a fatal accident involving a pedestrian that happened in the area of 6th Street North and Broadway around 4:15 p.m. Thursday afternoon.
Once on scene authorities found a pedestrian had been struck by a motor vehicle. The pedestrian was transported to a hospital in New Ulm before eventually being airlifted to HCMC where they eventually died of their injuries.
The crash remains under investigation and the identities of both the driver and the pedestrian haven’t been released yet.
|
# -*- coding: utf-8 -*-
import classes.level_controller as lc
import classes.game_driver as gd
import classes.extras as ex
import classes.board
import random
import pygame
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self,mainloop,36,6)
gd.BoardGame.__init__(self,mainloop,speaker,config,screen_w,screen_h,23,9)
def create_game_objects(self, level = 1):
self.board.decolorable = False
self.vis_buttons = [1,1,1,1,1,1,1,0,0]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
if self.mainloop.scheme is None:
s = random.randrange(150, 225, 5)
v = random.randrange(190, 225, 5)
h = random.randrange(0, 255, 5)
color0 = ex.hsv_to_rgb(h,40,230) #highlight 1
color1 = ex.hsv_to_rgb(h,70,v) #highlight 2
color2 = ex.hsv_to_rgb(h,s,v) #normal color
color3 = ex.hsv_to_rgb(h,230,100)
task_bg_color = (255,255,255)
task_font_color = (0,0,0)
else:
s = 150
v = 225
h = 170
color0 = ex.hsv_to_rgb(h,40,230) #highlight 1
color1 = ex.hsv_to_rgb(h,70,v) #highlight 2
color2 = ex.hsv_to_rgb(h,s,v) #normal color
color3 = ex.hsv_to_rgb(h,230,100)
task_bg_color = self.mainloop.scheme.u_color
task_font_color = self.mainloop.scheme.u_font_color
white = (255,255,255)
#data = [x_count, y_count, range_from, range_to, max_sum_range, image]
self.points = 1
if self.level.lvl == 1:
data = [23,9]
elif self.level.lvl == 2:
data = [23,9]
color1 = color0
elif self.level.lvl == 3:
data = [23,9]
color1 = color2 = color0
elif self.level.lvl == 4:
data = [23,9]
color1 = color2 = color0
elif self.level.lvl == 5:
data = [23,9]
color0 = (0,0,0)
self.points = 2
elif self.level.lvl == 6:
data = [23,9]
color2 = color1 = color0 = (0,0,0)
color3 = (40,40,40)
self.points = 3
self.data = data
self.board.level_start(data[0],data[1],self.layout.scale)
num1 = random.randrange(1,10)
num2 = random.randrange(1,10)
self.solution = [num1,num2,num1 * num2]
self.digits = ["0","1","2","3","4","5","6","7","8","9"]
unique = set()
for i in range(1,10):
for j in range(1,10):
if i == num1 and j == num2: color=color0
elif i == num1 or j == num2: color=color1
elif self.level.lvl == 2 and (i == num2 or j == num1):color=color1
else: color = color2
mul = i*j
unique.add(mul)
caption = str(mul)
self.board.add_unit(i-1,j-1,1,1,classes.board.Label,caption,color,"",2)
self.board.add_unit(9,0,1,9,classes.board.Obstacle,"",color3)
unique = sorted(unique)
#draw outline with selectable numbers
self.multi = dict()
if self.mainloop.scheme is None:
s = 180
else:
s = 80
v = 240
h = 7
x = 11
y = 0
for i in range(36):
if i < 9: x += 1
elif i == 9: x = 22
elif i < 18: y += 1
elif i == 18: x = 20
elif i < 27: x -= 1
elif i == 27: x = 10
elif i <= 36: y -= 1
color = ex.hsv_to_rgb(h*i,s,v)
self.multi[str(unique[i])]=i
caption = str(unique[i])
self.board.add_unit(x,y,1,1,classes.board.Letter,caption,color,"",2)
self.board.ships[-1].audible = False
if self.lang.lang == "he":
sv = self.lang.n2spk(unique[i])
self.board.ships[-1].speaker_val = sv
self.board.ships[-1].speaker_val_update = False
x=14
y=4
captions = [str(num1),chr(215),str(num2),"="]
if self.level.lvl < 4:
color = self.board.ships[self.multi[str(self.solution[2])]].initcolor
else:
color = (255,255,255)#color4
for i in range(4):
self.board.add_unit(x+i,y,1,1,classes.board.Label,captions[i],color,"",2)
self.outline_all(0,1)
self.board.add_door(18,y,1,1,classes.board.Door,"",task_bg_color,"",font_size = 2)
self.home_square = self.board.units[86]
self.home_square.door_outline = True
self.home_square.font_color = task_font_color
self.board.all_sprites_list.move_to_front(self.home_square)
def handle(self,event):
gd.BoardGame.handle(self, event) #send event handling up
if self.show_msg == False:
if event.type == pygame.KEYDOWN and event.key != pygame.K_RETURN:
lhv = len(self.home_square.value)
self.changed_since_check = True
if event.key == pygame.K_BACKSPACE:
if lhv > 0:
self.home_square.value = self.home_square.value[0:lhv-1]
elif not self.board.grid[4][18]:
char = event.unicode
if len(char)>0 and lhv < 2 and char in self.digits:
self.home_square.value += char
self.home_square.update_me = True
self.mainloop.redraw_needed[0] = True
elif event.type == pygame.MOUSEMOTION and self.drag:
if self.board.grid[4][18]:
self.home_square.value = ""
self.home_square.update_me = True
def update(self,game):
game.fill((255,255,255))
gd.BoardGame.update(self, game) #rest of painting done by parent
def check_result(self):
if self.board.grid[4][18]:
sol = self.board.ships[self.multi[str(self.solution[2])]]
if sol.grid_x == 18 and sol.grid_y == 4:
self.update_score(self.points)
self.passed()
else:
self.failed()
else:
if self.home_square.value != "" and (int(self.home_square.value) == self.solution[2]):
self.update_score(self.points)
self.quick_passed()
else:
self.failed()
def passed(self):
tts = self.d["Perfect!"]#+" "+str(self.solution[0])+" "+self.d["multiplied by"]+" "+str(self.solution[1])+" "+self.d["equals"]+" "+str(self.solution[2])
self.level.next_board(tts)
def quick_passed(self):
tts = self.d["Perfect!"]
self.level.next_board(tts)
def failed(self):
self.level.try_again()
|
One of the biggest security issues a system administrator can run into on a default Linux system, is that it allows everyone to directly log in to your system through ssh. When someone wants to hack your server, the first thing the cracker or bot will do is brute forcing your root account. Since you don't have to guess the username, that's becoming a whole lot easier on the cracker.
That's an issue. Brute forcing root accounts through SSH is still very popular, and any sys admin can verify this by checking the logs on a Linux server. This is easy to fix, by having a seperate account to log into, and use root on the system itself with sudo or su (whatever you prefer). It’s much better to have a separate account that you regularly use and simply sudo to root when necessary. Needless to say, when you edit SSH to disallow root logins, you need to have a seperate account that can log in and use su or sudo.
Your Root account is now excluded from brute force attacks.
|
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify it under
# the terms of the (LGPL) GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Library Lesser General Public License
# for more details at ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jurko Gospodnetiæ ( jurko.gospodnetic@pke.hr )
"""
"poor man's tox" development script used on Windows to run the full suds-jurko
test suite using multiple Python interpreter versions.
Intended to be used as a general 'all tests passed' check. To see more detailed
information on specific failures, run the failed test group manually,
configured for greater verbosity than done here.
"""
import os.path
import shutil
import sys
from suds_devel.configuration import BadConfiguration, Config, configparser
from suds_devel.environment import BadEnvironment
import suds_devel.utility as utility
class MyConfig(Config):
def __init__(self, script, project_folder, ini_file):
"""
Initialize new script configuration.
External configuration parameters may be specified relative to the
following folders:
* script - relative to the current working folder
* project_folder - relative to the script folder
* ini_file - relative to the project folder
"""
super(MyConfig, self).__init__(script, project_folder, ini_file)
try:
self._read_environment_configuration()
except configparser.Error:
raise BadConfiguration(sys.exc_info()[1].message)
def _prepare_configuration():
# We know we are a regular stand-alone script file and not an imported
# module (either frozen, imported from disk, zip-file, external database or
# any other source). That means we can safely assume we have the __file__
# attribute available.
global config
config = MyConfig(__file__, "..", "setup.cfg")
def _print_title(env, message_fmt):
separator = "-" * 63
print("")
print(separator)
print("--- " + message_fmt % (env.name(),))
print(separator)
def _report_startup_information():
print("Running in folder: '%s'" % (os.getcwd(),))
def _run_tests(env):
if env.sys_version_info >= (3,):
_print_title(env, "Building suds for Python %s")
build_folder = os.path.join(config.project_folder, "build")
if os.path.isdir(build_folder):
shutil.rmtree(build_folder)
# Install the project into the target Python environment in editable mode.
# This will actually build Python 3 sources in case we are using a Python 3
# environment.
setup_cmd = ["setup.py", "-q", "develop"]
_, _, return_code = env.execute(setup_cmd, cwd=config.project_folder)
if return_code != 0:
return False
test_folder = os.path.join(config.project_folder, "tests")
pytest_cmd = ["-m", "pytest", "-q", "-x", "--tb=short"]
_print_title(env, "Testing suds with Python %s")
_, _, return_code = env.execute(pytest_cmd, cwd=test_folder)
if return_code != 0:
return False
_print_title(env, "Testing suds with Python %s - no assertions")
pytest_cmd.insert(0, "-O")
_, _, return_code = env.execute(pytest_cmd, cwd=test_folder)
return return_code == 0
def _run_tests_in_all_environments():
if not config.python_environments:
raise BadConfiguration("No Python environments configured.")
for env in config.python_environments:
if not env.initial_scan_completed:
_print_title(env, "Scanning environment Python %s")
env.run_initial_scan()
if not _run_tests(env):
return False
return True
def main():
try:
_report_startup_information()
_prepare_configuration()
success = _run_tests_in_all_environments()
except (BadConfiguration, BadEnvironment):
utility.report_error(sys.exc_info()[1])
return -2
print("")
if not success:
print("Test failed.")
return -3
print("All tests passed.")
return 0
if __name__ == "__main__":
sys.exit(main())
|
Large Cut-Away Barrel is rated 4.0 out of 5 by 1.
In a cut-away barrel, you can easily see the imperfections created during the rifling and machining process. Makes a great addition to the Basic Forensic Firearm Identification Kit (item #212151).
In a cut-away barrel, you can easily see the imperfections created during the rifling and machining process. The Large Cut-Away Barrel is oversized (1" diam x 5" L) aluminum that has been cut in half to expose easy-to-see characteristics and markings. Makes a great addition to the Basic Forensic Firearm Identification Kit (item #212151). Note: Barrel is safe for classroom use.
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import re
from .base import Resource
from .users import User
from .pull_requests import PullRequest
class Label(Resource):
@staticmethod
def is_valid_color(color):
valid_color = re.compile(r'[0-9abcdefABCDEF]{6}')
match = valid_color.match(color)
if match is None:
return False
return match.start() == 0 and match.end() == len(color)
def __str__(self):
return '<Label (%s)>' % getattr(self, 'name', '')
class Milestone(Resource):
_dates = ('created_at', 'due_on')
_maps = {'creator': User}
def __str__(self):
return '<Milestone (%s)>' % getattr(self, 'title', '')
class Issue(Resource):
_dates = ('created_at', 'updated_at', 'closed_at')
_maps = {
'assignee': User,
'user': User,
'milestone': Milestone,
'pull_request': PullRequest
}
_collection_maps = {'labels': Label}
def __str__(self):
return '<Issue (%s)>' % getattr(self, 'number', '')
class Comment(Resource):
_dates = ('created_at', 'updated_at')
_maps = {'user': User}
def __str__(self):
return '<Comment (%s)>' % (getattr(self, 'user', ''))
class Event(Resource):
_dates = ('created_at', )
_maps = {'actor': User, 'issue': Issue}
def __str__(self):
return '<Event (%s)>' % (getattr(self, 'commit_id', ''))
|
Gangster Pursuit Run and jump over obstacles. Shoot the gangsters as you jump over their cover.
Funny Elevator What goes up must get down and funny. Lift your spirits high with this funny elevator.
|
import os
from aleph.util import checksum
class Archive(object):
def _get_file_path(self, meta):
ch = meta.content_hash
if ch is None:
raise ValueError("No content hash available.")
path = os.path.join(ch[:2], ch[2:4], ch[4:6], ch)
file_name = 'data'
if meta.file_name is not None:
file_name = meta.file_name
else:
if meta.extension is not None:
file_name = '%s.%s' % (file_name, meta.extension)
return os.path.join(path, file_name)
def _update_metadata(self, filename, meta):
meta.content_hash = checksum(filename)
return meta
def archive_file(self, filename, meta, move=False):
""" Import the given file into the archive, and return an
updated metadata object. If ``move`` is given, the original
file will not exist afterwards. """
pass
def load_file(self, meta):
pass
def cleanup_file(self, meta):
pass
def generate_url(self, meta):
return
|
Behind Every Successful Small Business There Is a Family.
It all started nearly four decades ago in St. Louis, Missouri, when a first-time mom started designing hair accessories to tame her daughter’s hair and accentuate her style. Her hobby turned into a business, and the business eventually grew to become America’s best-known brand of infant and girl’s hair accessories – Wee Ones™!
In 2010, first-time entrepreneurs Miles and Gina Faust took over Wee Ones to begin the next chapter of the company’s story. Miles runs the business side of things, while Gina leads the design and marketing efforts at Wee Ones. In 2014, their daughter, Allie, joined the family business to help develop the in-house capability of printing custom designs on their ribbons, taking creativity at Wee Ones to a whole new level.
Working together, this family is committed to the success and growth of Wee Ones for years to come.
No Slip Clip, a product improvement designed for active girls who want their bows to stay in place all day long.
the Wee Ones Game Day Collection – a line of officially licensed collegiate hair accessories – in partnership with Divine Creations.
Wee Ones has prided itself on for almost 40 years.
|
#
# Explore
# - The Adventure Interpreter
#
# Copyright (C) 2006 Joe Peterson
#
import sys
import Explore
filename = None
no_delay = False
trs_compat = False
for arg_num in range(len(sys.argv)):
if sys.argv[arg_num] == "-f":
if len(sys.argv) > (arg_num + 1) and (len(sys.argv[arg_num + 1]) == 0 or sys.argv[arg_num + 1][0] != '-'):
filename = sys.argv[arg_num + 1]
else:
print >> sys.stderr, "Error: Missing adventure filename"
sys.exit(1)
elif sys.argv[arg_num] == "-q":
quiet = True
elif sys.argv[arg_num] == "-c":
if len(sys.argv) > (arg_num + 1) and (len(sys.argv[arg_num + 1]) == 0 or sys.argv[arg_num + 1][0] != '-'):
command = sys.argv[arg_num + 1]
elif sys.argv[arg_num] == "-r":
if len(sys.argv) > (arg_num + 1) and (len(sys.argv[arg_num + 1]) == 0 or sys.argv[arg_num + 1][0] != '-'):
resume = sys.argv[arg_num + 1]
elif sys.argv[arg_num] == "-s":
if len(sys.argv) > (arg_num + 1) and (len(sys.argv[arg_num + 1]) == 0 or sys.argv[arg_num + 1][0] != '-'):
last_suspend = sys.argv[arg_num + 1]
elif sys.argv[arg_num] == "--one-shot":
one_shot = True
elif sys.argv[arg_num] == "--no-title":
show_title = False
elif sys.argv[arg_num] == "--title-only":
show_title_only = True
elif sys.argv[arg_num] == "--no-delay":
no_delay = True
elif sys.argv[arg_num] == "--trs-compat":
trs_compat = True
Explore.play(filename, no_delay, trs_compat)
|
Get Telephone Systems - New And Used Here!
Find Telephone Systems - New And Used at Amazon.com and get Free Shipping today!
High quality new and used phones, phone systems and accessories including voicemail and more. Meridian Norstar, Samsung, Panasonic, Toshiba, Vantage, At & T, NEC, TIE and Lucent.
Telephone systems new, used and refurbished equipment.
|
#!/usr/bin/env python3
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import sqlite3
from main_widgets.home_main_widget import home_main_widget
from main_widgets.pvp_main_widget import pvp_main_widget
from main_widgets.pvai_main_widget import pvai_main_widget
from main_widgets.how_to_play import rules_main_widget
from main_widgets.game_history_main_widget import game_history_main_widget
from dialogs.save_dialog import save_dialog
from dialogs.color_select_dialog import color_select_dialog
class main_widget(QWidget):
def __init__(self):
QWidget.__init__(self)
self.main_layout = QVBoxLayout(self)
# # -------- layout declaration -------- # #
self.stack_layout = QStackedLayout()
self.footer_layout = QHBoxLayout()
self.footer_widget = QWidget()
# # -------- widget declaration -------- # #
# window widgets
self.home_widget = home_main_widget()
self.pvp_widget = pvp_main_widget()
self.pvai_widget = pvai_main_widget()
self.rules_widget = rules_main_widget()
self.game_history_widget = game_history_main_widget()
# footer widgets
self.main_menu_push_button = QPushButton("main menu")
self.rules_push_button = QPushButton("how to play")
self.pvp_push_button = QPushButton("player v. player")
self.pvai_push_button = QPushButton("player v. ai")
self.game_history_push_button = QPushButton("saved games")
self.quit_push_button = QPushButton("quit")
# # -------- add to layouts -------- # #
# stack layout
self.stack_layout.addWidget(self.home_widget)
self.stack_layout.addWidget(self.rules_widget)
self.stack_layout.addWidget(self.pvp_widget)
self.stack_layout.addWidget(self.pvai_widget)
self.stack_layout.addWidget(self.game_history_widget)
# footer layout
self.footer_layout.addStretch(0)
self.footer_layout.addWidget(self.main_menu_push_button)
self.footer_layout.addWidget(self.rules_push_button)
self.footer_layout.addWidget(self.pvp_push_button)
self.footer_layout.addWidget(self.pvai_push_button)
self.footer_layout.addWidget(self.game_history_push_button)
self.footer_layout.addWidget(self.quit_push_button)
self.footer_layout.addStretch(0)
# hiding upon opening bc menu
self.main_menu_push_button.hide()
self.pvp_push_button.hide()
self.pvai_push_button.hide()
self.rules_push_button.hide()
self.game_history_push_button.hide()
self.quit_push_button.hide()
# main layout
self.main_layout.addLayout(self.stack_layout)
self.main_layout.addLayout(self.footer_layout)
# # -------- actions -------- # #
self.main_menu_push_button.clicked.connect(self.main_menu_clicked)
self.pvp_push_button.clicked.connect(self.pvp_clicked)
self.pvai_push_button.clicked.connect(self.pvai_clicked)
self.rules_push_button.clicked.connect(self.rules_clicked)
self.game_history_push_button.clicked.connect(self.game_history_clicked)
self.home_widget.rules_push_button.clicked.connect(self.rules_clicked)
self.home_widget.pvp_push_button.clicked.connect(self.pvp_clicked)
self.home_widget.pvai_push_button.clicked.connect(self.pvai_clicked)
self.home_widget.game_history_push_button.clicked.connect(self.game_history_clicked)
self.game_history_widget.load_game_button.clicked.connect(self.load_game)
self.pvp_widget.save_clicked_signal.connect(self.game_history_widget.populate_list)
def main_menu_clicked(self):
self.stack_layout.setCurrentIndex(0)
self.hide_footer()
def pvp_clicked(self):
self.stack_layout.setCurrentIndex(2)
self.show_footer()
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText('welcome to the player vs player game. red goes first, and '
'you can simply click on the column where you want to place your piece!')
msg.setWindowTitle("player vs player")
msg.setStandardButtons(QMessageBox.Ok)
# retval = msg.exec_()
def pvai_clicked(self):
self.stack_layout.setCurrentIndex(3)
self.show_footer()
self.dialog = color_select_dialog()
# self.dialog.exec_()
def rules_clicked(self):
self.stack_layout.setCurrentIndex(1)
self.show_footer()
def game_history_clicked(self):
self.stack_layout.setCurrentIndex(4)
self.show_footer()
def undo_clicked(self):
self.stack_layout.setCurrentIndex(2)
self.pvp_widget.undo_clicked()
def reset_clicked(self):
self.stack_layout.setCurrentIndex(2)
self.pvp_widget.reset_clicked()
def save_clicked(self):
self.stack_layout.setCurrentIndex(2)
self.pvp_widget.save_clicked()
def load_clicked(self):
self.stack_layout.setCurrentIndex(4)
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText('select a game to load and press the load button at the bottom of page ')
msg.setWindowTitle("load")
msg.setStandardButtons(QMessageBox.Ok)
retval = msg.exec_()
def hide_footer(self):
# self.menu_bar.hide()
self.main_menu_push_button.hide()
self.pvp_push_button.hide()
self.pvai_push_button.hide()
self.rules_push_button.hide()
self.game_history_push_button.hide()
self.quit_push_button.hide()
def show_footer(self):
# self.menu_bar.show()
self.main_menu_push_button.show()
self.pvp_push_button.show()
self.pvai_push_button.show()
self.rules_push_button.show()
self.game_history_push_button.show()
self.quit_push_button.show()
def load_game(self):
self.stack_layout.setCurrentIndex(2)
moves = self.game_history_widget.moves
self.pvp_widget.reset_clicked()
for col in moves:
self.pvp_widget.column_clicked(0,int(col))
|
The island of Aegina is mostly family oriented which lays just 10 nm of the coasts of Attica.
Besides the main town's port, there are quite a few more options for mooring/anchoring such as Aghia Marina, Souvala, Perdika or the adjacent island Moni.
Due to its close proximity to Athens, the island has had a significant development that goes back to the very ancient years.
It is certainly worthwhile to visit the ancient temple of Afaia, which is located about 1 1/2 Km (1 mi) north of Aghia Marina, as it is one of the most well-preserved ancient antiquities in Greece.
The town of Aegina is getting more and more touristic every year, yet it does do a good job maintaining its traditional colors.
Fresh fish and local specialties are available in every restaurant, while coffee and beverages are served in the numerous cafes at the seafront.
While in Aegina, do not miss the opportunity to get a fresh bag of pistachio, as Aegina is the "pistachio's homeland".
A large variety of sweets made of this famous nut can also be found in most local stores.
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import redis
#import zmq
import time
from uuid import uuid4
from autostack.utils import get_open_port
__author__ = 'Avi Tal <avi3tal@gmail.com>'
__date__ = 'Sep 6, 2015'
def gen_key(name):
return 'redisqueue:{}'.format(name)
class RedisQueue(object):
"""
Simple Queue with Redis Backend
https://redis-py.readthedocs.org/en/latest/
"""
def __init__(self, name=None, **kwargs):
"""
The default connection parameters are:
host='localhost', port=6379, db=0
"""
self.__db = redis.Redis(**kwargs)
self.__key = name or gen_key(str(uuid4()))
def __len__(self):
"""Return the approximate size of the queue."""
return self.__db.llen(self.key)
@property
def key(self):
return self.__key
def empty(self):
"""Return True if the queue is empty, False otherwise."""
return self.qsize() == 0
def clear(self):
self.__db.delete(self.key)
def put(self, item):
"""Put item into the queue."""
self.__db.rpush(self.key, item)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args block is true and timeout is None (the default), block
if necessary until an item is available."""
if block:
if timeout is None:
timeout = 0
item = self.__db.blpop(self.key, timeout=timeout)
if item is not None:
item = item[1]
else:
item = self.__db.lpop(self.key)
if item is not None:
if isinstance(item, str) and item != 'goodbye':
item = eval(item)
return item
def join(self):
self.put('goodbye')
#class ZeroMQueue(object):
# def __init__(self, name=None, port='5556', host='127.0.0.1'):
# self.topic = name or str(uuid4())
# port = port or get_open_port(host)
#
# subcontext = zmq.Context()
# self._subscriber = subcontext.socket(zmq.PULL)
# self._subscriber.bind('tcp://{}:{}'.format(host, port))
#
# pubcontext = zmq.Context()
# self._publisher = pubcontext.socket(zmq.PUSH)
# self._publisher.connect('tcp://{}:{}'.format(host, port))
#
# def put(self, item):
# self._publisher.send_json(item)
# time.sleep(1)
#
# def get(self, block=True, timeout=None):
# if block:
# item = self._subscriber.recv_json()
# else:
# try:
# item = self._subscriber.recv_json(flags=zmq.NOBLOCK)
# except zmq.Again as e:
# pass
# return item
#
# def join(self):
# self.put('goodbye')
|
Valencia is located in the middle of Europe's most densely developed agricultural region. Originally a Greek settlement, the town was taken over by Romans in 138 BC and turned into a retirement town for old soldiers. The Moors controlled the land for 500 years, and this fertile plain, which today yields three to four crops, was considered to be heaven on earth. El Cid conquered Valencia for Spain in 1094, but it fell back into Moorish hands after his death. Incorporated into Spain in the 15th century, Valencia remains the nation's breadbasket.
|
from django.contrib import admin
from django.conf import settings
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from haystack.views import SearchView
from haystack.query import SearchQuerySet
from .views import (AuthorList, AuthorDetail,
ArticleDetail, ArticleTags,
IssueYear, IssueDetail,
PeriodicalList, PeriodicalDetail,
SeriesList, SeriesDetail)
# query results with most recent publication date first
sqs = SearchQuerySet().order_by('-pub_date')
urlpatterns = \
patterns('',
url(r'^search/',
SearchView(load_all=False,
template="periodicals/search.html",
searchqueryset=sqs,
),
name='haystack_search',
),
# not in sitemap
url(r'^authors/$',
AuthorList.as_view(),
name='periodicals_authors_list',
),
url(r'^authors/(?P<author_slug>[-\w]+)/$',
AuthorDetail.as_view(),
name='periodicals_author_detail'
),
url(r'^tags/$',
TemplateView.as_view(template_name='periodicals/tags.html'),
name='periodicals_tags',
),
url(r'^tag/(?P<tag>[^/]+)/$',
ArticleTags.as_view(template_name='periodicals/article_tag_detail.html'),
name='periodicals_article_tag_detail'
),
)
if settings.PERIODICALS_LINKS_ENABLED:
urlpatterns += \
patterns('',
# success for adding a link - don't include in sitemap
url(r'^links/added/$',
TemplateView.as_view(template_name='periodicals/link_success.html'),
name='periodicals_add_link_success'
),
# add a link to an article - don't include in sitemap
url(r'^links/(?P<periodical_slug>[-\w]+)/(?P<issue_slug>[-\w]+)/(?P<article_slug>[-\w]+)/$',
'periodicals.views.add_article_link',
name='periodicals_add_article_link'
),
# add a link to an issue - don't include in sitemap
url(r'^links/(?P<periodical_slug>[-\w]+)/(?P<issue_slug>[-\w]+)/$',
'periodicals.views.add_issue_link',
name='periodicals_add_issue_link'
),
# Page showing all periodical Issues and Articles with external links
url(r'^links/(?P<periodical_slug>[-\w]+)/$',
'periodicals.views.links',
name='periodicals_links'
),
)
urlpatterns += \
patterns('',
# periodical detail including list of periodical's years
url(r'^(?P<periodical_slug>[-\w]+)/$',
PeriodicalDetail.as_view(),
name='periodicals_periodical_detail'
),
# list of periodical's issues and articles viewable online
url(r'^(?P<periodical_slug>[-\w]+)/online/$',
'periodicals.views.read_online',
name='periodicals_read_online'
),
# list of periodical's issues for a year - not in sitemap
url(r'^(?P<periodical_slug>[-\w]+)/(?P<year>\d{4})/$',
IssueYear.as_view(),
name='periodicals_issue_year'
),
# list of periodical's series - not in sitemap
url(r'^(?P<periodical_slug>[-\w]+)/series/$',
SeriesList.as_view(),
name='periodicals_series_list'
),
# list of articles in a series - not in sitemap
url(r'^(?P<periodical_slug>[-\w]+)/series/(?P<series>.+)/$',
SeriesDetail.as_view(),
name='periodicals_series_detail'
),
# one periodical issue
url(r'^(?P<periodical_slug>[-\w]+)/(?P<issue_slug>[-\w]+)/$',
IssueDetail.as_view(),
name='periodicals_issue_detail'
),
# one article
url(r'^(?P<periodical_slug>[-\w]+)/(?P<issue_slug>[-\w]+)/(?P<article_slug>[-\w]+)/$',
ArticleDetail.as_view(),
name='periodicals_article_detail'
),
# list of periodicals - not in sitemap
url(r'',
PeriodicalList.as_view(),
name='periodicals_list'
),
)
admin.autodiscover()
|
Shop for Garden Border Landscape Border with confidence - Garden Border Landscape Border are our specialty and all Garden Border Landscape Border questions are answered.
Widest selection of Garden Border Landscape Border. in stock and fast shipping. the most exclusive collection of Garden Border Landscape Border from Ebay!
Plastic Landscape Border Garden Lawn Flower Bed Edging Solar Lights Brick 20 ft.
Landscape Edging Lawn Garden Yard Border Aluminum Silver 24' x 4"
|
'''
Basic utilities for matching coordinates to the street grid
using the OSRM match endpoint
'''
import logging
import requests
from django.conf import settings
from core.matchers.base import BaseMatcher
logger = logging.getLogger(__name__)
class OsrmMatcher(BaseMatcher):
def _match_output(self):
coords = self.raw_coords
coord_string = ';'.join(
"%s,%s" % (lon, lat) for lon, lat in coords
)
radiuses = self.radiuses or [settings.OSRM.DEFAULT_RADIUS] * len(coords)
radius_string = ';'.join(map(str, radiuses))
options = {
'radiuses': radius_string,
'geometries': 'geojson',
'annotations': 'true',
'overview': 'full',
}
request_url = '{}/{}'.format(
settings.OSRM.MATCH_ENDPOINT,
coord_string
)
logger.debug('Request url: {}'.format(request_url))
response = requests.get(request_url, params=options)
output = response.json()
if 'tracepoints' not in output:
logger.error('No tracepoints found for {}'.format(output))
raise IOError(output)
logger.debug('Match response: {}'.format(output))
return output
def unsnapped_points(self):
unsnapped_points = []
for index, tracepoint in enumerate(self.output['tracepoints']):
if not tracepoint:
logger.warning('Tracepoint index {} not found'.format(index))
unsnapped_points.append(index)
return unsnapped_points
def snapped_points(self):
return [
tracepoint.get('location') if tracepoint else None
for tracepoint
in self.output['tracepoints']
]
def snapped_names(self):
return [
tracepoint.get('name') if tracepoint else None
for tracepoint
in self.output['tracepoints']
]
def tracepoint_nodes(self, tracepoint_index):
node_lookup = set()
nodes = []
tracepoint = self.output['tracepoints'][tracepoint_index]
if tracepoint:
legs = self.output['matchings'][tracepoint['matchings_index']]['legs']
if len(legs) == tracepoint['waypoint_index']:
return []
leg = legs[tracepoint['waypoint_index']]
for node in leg['annotation']['nodes']:
if node not in node_lookup:
node_lookup.add(node)
nodes.append(node)
return nodes
else:
return []
def _generate_nodes(self):
node_lookup = set()
nodes = []
for index, tracepoint in enumerate(self.output['tracepoints']):
if tracepoint:
route = self.output['matchings'][tracepoint['matchings_index']]
legs = route['legs']
if tracepoint['waypoint_index'] == len(legs):
continue
leg = legs[tracepoint['waypoint_index']]
leg_nodes = leg['annotation']['nodes']
for node in leg_nodes:
if node not in node_lookup:
node_lookup.add(node)
nodes.append((node, index))
return nodes
|
Quick Disconnect Rail Adapter for Bipods that require sling swivel studs.
The adapter allows the mounting of a Harris or ATK folding bipod to rifles equipped with Picatinny or quad rails. Constructed of hard anodized aircraft grade aluminum.
CALIFORNIA PROPOSITION 65 WARNING: This product can expose you to chemical(s) known to the state of California to cause cancer, birth defects or other reproductive harm. For more information go to www.P65Warnings.ca.gov/product.
|
##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import unittest
import random
import imath
import IECore
import IECoreScene
class SmoothSmoothSkinningWeightsOpTest( unittest.TestCase ) :
def mesh( self ) :
vertsPerFace = IECore.IntVectorData( [ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 ] )
vertexIds = IECore.IntVectorData( [
0, 1, 3, 2, 2, 3, 5, 4, 4, 5, 7, 6, 6, 7, 9, 8,
8, 9, 11, 10, 10, 11, 13, 12, 12, 13, 15, 14, 14, 15, 1, 0,
1, 15, 13, 3, 3, 13, 11, 5, 5, 11, 9, 7, 14, 0, 2, 12,
12, 2, 4, 10, 10, 4, 6, 8
] )
return IECoreScene.MeshPrimitive( vertsPerFace, vertexIds )
def createSSD( self, offsets, counts, indices, weights ) :
names = IECore.StringVectorData( [ "|joint1", "|joint1|joint2", "|joint1|joint2|joint3" ] )
poses = IECore.M44fVectorData( [
imath.M44f( 1, -0, 0, -0, -0, 1, -0, 0, 0, -0, 1, -0, -0, 2, -0, 1 ),
imath.M44f( 1, -0, 0, -0, -0, 1, -0, 0, 0, -0, 1, -0, -0, 0, -0, 1 ),
imath.M44f( 1, -0, 0, -0, -0, 1, -0, 0, 0, -0, 1, -0, -0, -2, -0, 1 )
] )
return IECoreScene.SmoothSkinningData( names, poses, offsets, counts, indices, weights )
def original( self ) :
offsets = IECore.IntVectorData( [ 0, 1, 2, 4, 6, 7, 8, 10, 12, 14, 16, 17, 18, 20, 22, 23 ] )
counts = IECore.IntVectorData( [ 1, 1, 2, 2, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2, 1, 1 ] )
indices = IECore.IntVectorData( [ 0, 0, 0, 1, 0, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 1, 0, 1, 0, 1, 0, 0 ] )
weights = IECore.FloatVectorData( [
1, 1, 0.8, 0.2, 0.8, 0.2, 1, 1, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 1, 1, 0.8, 0.2, 0.8, 0.2, 1, 1
] )
return self.createSSD( offsets, counts, indices, weights )
def smooth1_50( self ) :
offsets = IECore.IntVectorData( [ 0, 2, 4, 6, 8, 11, 14, 16, 18, 20, 22, 25, 28, 30, 32, 34 ] )
counts = IECore.IntVectorData( [ 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2 ] )
indices = IECore.IntVectorData( [
0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2,
1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 0, 1, 0, 1, 0, 1
] )
weights = IECore.FloatVectorData( [
0.966667, 0.0333333, 0.966667, 0.0333333, 0.725, 0.275, 0.725, 0.275,
0.1, 0.8375, 0.0625, 0.1, 0.8375, 0.0625, 0.583333, 0.416667,
0.583333, 0.416667, 0.583333, 0.416667, 0.583333, 0.416667, 0.1, 0.8375,
0.0625, 0.1, 0.8375, 0.0625, 0.725, 0.275, 0.725, 0.275,
0.966667, 0.0333333, 0.966667, 0.0333333
] )
return self.createSSD( offsets, counts, indices, weights )
def smooth1_100( self ) :
offsets = IECore.IntVectorData( [ 0, 2, 4, 6, 8, 11, 14, 16, 18, 20, 22, 25, 28, 30, 32, 34 ] )
counts = IECore.IntVectorData( [ 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2 ] )
indices = IECore.IntVectorData( [
0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2,
1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 0, 1, 0, 1, 0, 1
] )
weights = IECore.FloatVectorData( [
0.933333, 0.0666667, 0.933333, 0.0666667, 0.65, 0.35, 0.65, 0.35,
0.2, 0.675, 0.125, 0.2, 0.675, 0.125, 0.666667, 0.333333,
0.666667, 0.333333, 0.666667, 0.333333, 0.666667, 0.333333, 0.2, 0.675,
0.125, 0.2, 0.675, 0.125, 0.65, 0.35, 0.65, 0.35,
0.933333, 0.0666667, 0.933333, 0.0666667
] )
return self.createSSD( offsets, counts, indices, weights )
def smooth3_30( self ) :
offsets = IECore.IntVectorData( [ 0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45 ] )
counts = IECore.IntVectorData( [ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 ] )
indices = IECore.IntVectorData( [
0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2,
0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2
] )
weights = IECore.FloatVectorData( [
0.933725, 0.0659938, 0.00028125, 0.933725, 0.0659938, 0.00028125, 0.691672, 0.301016,
0.0073125, 0.691672, 0.301016, 0.0073125, 0.145912, 0.767439, 0.0866484, 0.145912,
0.767439, 0.0866484, 0.0161625, 0.6094, 0.374438, 0.0161625, 0.6094, 0.374438,
0.0161625, 0.6094, 0.374438, 0.0161625, 0.6094, 0.374438, 0.145912, 0.767439,
0.0866484, 0.145912, 0.767439, 0.0866484, 0.691672, 0.301016, 0.0073125, 0.691672,
0.301016, 0.0073125, 0.933725, 0.0659938, 0.00028125, 0.933725, 0.0659938, 0.00028125
] )
return self.createSSD( offsets, counts, indices, weights )
def smoothSelectVerts( self ) :
offsets = IECore.IntVectorData( [ 0, 1, 2, 4, 6, 9, 10, 12, 14, 16, 18, 21, 24, 26, 28, 29 ] )
counts = IECore.IntVectorData( [ 1, 1, 2, 2, 3, 1, 2, 2, 2, 2, 3, 3, 2, 2, 1, 1 ] )
indices = IECore.IntVectorData( [
0, 0, 0, 1, 0, 1, 0, 1, 2, 1, 1, 2, 1, 2, 1, 2,
1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 0, 1, 0, 0
] )
weights = IECore.FloatVectorData( [
1, 1, 0.725, 0.275, 0.725, 0.275, 0.1, 0.8375, 0.0625,
1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.1,
0.8375, 0.0625, 0.1, 0.8375, 0.0625, 0.725, 0.275, 0.8, 0.2, 1, 1
] )
return self.createSSD( offsets, counts, indices, weights )
def smoothWithLocks( self ) :
offsets = IECore.IntVectorData( [ 0, 1, 2, 5, 8, 10, 12, 14, 16, 18, 20, 22, 24, 27, 30, 31 ] )
counts = IECore.IntVectorData( [ 1, 1, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 1, 1 ] )
indices = IECore.IntVectorData( [
0, 0, 0, 1, 2, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2,
1, 2, 1, 2, 1, 2, 1, 2, 0, 1, 2, 0, 1, 2, 0, 0
] )
weights = IECore.FloatVectorData( [
1, 1, 0.8, 0.193898, 0.00610161, 0.8, 0.193898, 0.00610161,
0.902086, 0.0979137, 0.902086, 0.0979137, 0.624712, 0.375288, 0.624712, 0.375288,
0.624712, 0.375288, 0.624712, 0.375288, 0.902086, 0.0979137, 0.902086, 0.0979137,
0.8, 0.193898, 0.00610161, 0.8, 0.193898, 0.00610161, 1, 1
] )
return self.createSSD( offsets, counts, indices, weights )
def testTypes( self ) :
""" Test SmoothSmoothSkinningWeightsOp types"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
self.assertEqual( type(op), IECoreScene.SmoothSmoothSkinningWeightsOp )
self.assertEqual( op.typeId(), IECoreScene.TypeId.SmoothSmoothSkinningWeightsOp )
op.parameters()['input'].setValue( IECore.IntData(1) )
self.assertRaises( RuntimeError, op.operate )
def testSmooth1_0( self ) :
""" Test SmoothSmoothSkinningWeightsOp with 1 iteration and 0.0 smooth-ratio"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 0.0 )
op.parameters()['iterations'].setValue( 1 )
op.parameters()['applyLocks'].setValue( False )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertEqual( result, ssd )
def testSmooth1_100( self ) :
""" Test SmoothSmoothSkinningWeightsOp with 1 iteration and 1.0 smooth-ratio"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 1.0 )
op.parameters()['iterations'].setValue( 1 )
op.parameters()['applyLocks'].setValue( False )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
smooth = self.smooth1_100()
self.assertEqual( result.influenceNames(), smooth.influenceNames() )
self.assertEqual( result.influencePose(), smooth.influencePose() )
self.assertEqual( result.pointIndexOffsets(), smooth.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), smooth.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), smooth.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
smoothWeights = smooth.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], smoothWeights[i], 6 )
def testSmooth1_50( self ) :
""" Test SmoothSmoothSkinningWeightsOp with 1 iteration and 0.5 smooth-ratio"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 0.5 )
op.parameters()['iterations'].setValue( 1 )
op.parameters()['applyLocks'].setValue( False )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
smooth = self.smooth1_50()
self.assertEqual( result.influenceNames(), smooth.influenceNames() )
self.assertEqual( result.influencePose(), smooth.influencePose() )
self.assertEqual( result.pointIndexOffsets(), smooth.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), smooth.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), smooth.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
smoothWeights = smooth.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], smoothWeights[i], 6 )
def testSmooth3_30( self ) :
""" Test SmoothSmoothSkinningWeightsOp with 3 iterations and 0.3 smooth-ratio"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 0.3 )
op.parameters()['iterations'].setValue( 3 )
op.parameters()['applyLocks'].setValue( False )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
smooth = self.smooth3_30()
self.assertEqual( result.influenceNames(), smooth.influenceNames() )
self.assertEqual( result.influencePose(), smooth.influencePose() )
self.assertEqual( result.pointIndexOffsets(), smooth.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), smooth.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), smooth.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
smoothWeights = smooth.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], smoothWeights[i], 6 )
def testLocks( self ) :
""" Test SmoothSmoothSkinningWeightsOp locking mechanism"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 0.3 )
op.parameters()['iterations'].setValue( 3 )
op.parameters()['applyLocks'].setValue( True )
op.parameters()['influenceLocks'].setValue( IECore.BoolVectorData( [ True, False, False ] ) )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
smooth = self.smoothWithLocks()
self.assertEqual( result.influenceNames(), smooth.influenceNames() )
self.assertEqual( result.influencePose(), smooth.influencePose() )
self.assertEqual( result.pointIndexOffsets(), smooth.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), smooth.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), smooth.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
smoothWeights = smooth.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], smoothWeights[i], 6 )
# make sure locked weights did not change
dop = IECoreScene.DecompressSmoothSkinningDataOp()
dop.parameters()['input'].setValue( result )
decompressedResult = dop.operate()
dop.parameters()['input'].setValue( ssd )
decompressedOrig = dop.operate()
resultIndices = decompressedResult.pointInfluenceIndices()
resultWeights = decompressedResult.pointInfluenceWeights()
origWeights = decompressedOrig.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
if resultIndices[i] == 0 :
self.assertAlmostEqual( resultWeights[i], origWeights[i], 6 )
# make sure the result is normalized
nop = IECoreScene.NormalizeSmoothSkinningWeightsOp()
nop.parameters()['input'].setValue( result )
normalized = nop.operate()
self.assertEqual( result.influenceNames(), normalized.influenceNames() )
self.assertEqual( result.influencePose(), normalized.influencePose() )
self.assertEqual( result.pointIndexOffsets(), normalized.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), normalized.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), normalized.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
normalizedWeights = normalized.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], normalizedWeights[i], 6 )
def testVertexSelection( self ) :
""" Test SmoothSmoothSkinningWeightsOp using selected vertices"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['smoothingRatio'].setValue( 0.5 )
op.parameters()['iterations'].setValue( 1 )
op.parameters()['applyLocks'].setValue( False )
op.parameters()['vertexIndices'].setFrameListValue( IECore.FrameList.parse( "2-4,10-12" ) )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertNotEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertNotEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertNotEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
smooth = self.smoothSelectVerts()
self.assertEqual( result.influenceNames(), smooth.influenceNames() )
self.assertEqual( result.influencePose(), smooth.influencePose() )
self.assertEqual( result.pointIndexOffsets(), smooth.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), smooth.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), smooth.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
smoothWeights = smooth.pointInfluenceWeights()
for i in range( 0, resultWeights.size() ) :
self.assertAlmostEqual( resultWeights[i], smoothWeights[i], 6 )
# make sure only selected vertices changed
dop = IECoreScene.DecompressSmoothSkinningDataOp()
dop.parameters()['input'].setValue( result )
decompressedResult = dop.operate()
dop.parameters()['input'].setValue( ssd )
decompressedOrig = dop.operate()
resultOffsets = decompressedResult.pointIndexOffsets()
resultCounts = decompressedResult.pointInfluenceCounts()
resultIndices = decompressedResult.pointInfluenceIndices()
resultWeights = decompressedResult.pointInfluenceWeights()
origWeights = decompressedOrig.pointInfluenceWeights()
nonSelectedVerts = [ x for x in range( 0, resultOffsets.size() ) if x not in op.parameters()['vertexIndices'].getFrameListValue().asList() ]
for i in nonSelectedVerts :
for j in range( 0, resultCounts[i] ) :
current = resultOffsets[i] + j
self.assertAlmostEqual( resultWeights[current], origWeights[current], 6 )
def testErrorStates( self ) :
""" Test SmoothSmoothSkinningWeightsOp with various error states"""
ssd = self.original()
op = IECoreScene.SmoothSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
# bad mesh
op.parameters()['mesh'].setValue( IECore.IntData(1) )
self.assertRaises( RuntimeError, op.operate )
# wrong number of verts
op.parameters()['mesh'].setValue( op.parameters()['mesh'].defaultValue )
self.assertRaises( RuntimeError, op.operate )
# wrong number of locks
op.parameters()['mesh'].setValue( self.mesh() )
op.parameters()['applyLocks'].setValue( True )
op.parameters()['influenceLocks'].setValue( IECore.BoolVectorData( [ True, False, True, False ] ) )
self.assertRaises( RuntimeError, op.operate )
# invalid vertex ids
op.parameters()['applyLocks'].setValue( False )
op.parameters()['vertexIndices'].setFrameListValue( IECore.FrameList.parse( "10-18" ) )
self.assertRaises( RuntimeError, op.operate )
if __name__ == "__main__":
unittest.main()
|
On this website we recommend many pictures abaout Barista Duties For Resume that we have collected from various sites kayakmedia.ca, and of course what we recommend is the most excellent of picture for Barista Duties For Resume. If you like the picture on our website, please do not hesitate to visit again and get inspiration from our website.
And if you want to see more images more we recommend the gallery below, you can see the picture as a reference design from your Barista Duties For Resume.
Thank you for seeing gallery of Barista Duties For Resume, we would be very happy if you come back.
Similar Keyword Barista Duties For Resume : barista duties for resume, barista job duties for resume, and more.
|
from django.db.models.signals import post_syncdb
from django.conf import settings
from django.core import management
import os
import re
FIXTURE_RE = re.compile(r'^[^.]*.json$')
def load_data(sender, **kwargs):
"""
Loads fixture data after loading the last installed app
"""
if kwargs['app'].__name__ == settings.INSTALLED_APPS[-1] + ".models":
fixture_files = []
for loc in settings.INITIAL_FIXTURE_DIRS:
loc = os.path.abspath(loc)
if os.path.exists(loc):
fixture_files += os.listdir(loc)
fixture_files = filter(lambda v: FIXTURE_RE.match(v), fixture_files)
fixture_files = [os.path.join(loc, f) for f in fixture_files]
if len(fixture_files) > 0:
print "Initializing Fixtures:"
for fixture in fixture_files:
print " >> %s" % (fixture)
management.call_command('loaddata', fixture, verbosity=0)
# Update the index
print 'Generating Index'
management.call_command('index', 'all', flush=True, verbosity=1)
post_syncdb.connect(load_data)
|
Small Business Saturday is THIS SATURDAY…..are you ready?
Small Business Saturday is THIS Saturday, November 29th!!! In 2010, American Express founded Small Business Saturday to help businesses with their most pressing need — getting more customers. The day encourages people to shop at small businesses on the Saturday after Thanksgiving. The single day has grown into a powerful movement, and more people are … Continue reading Small Business Saturday is THIS SATURDAY…..are you ready?
“Shake it Off” by Taylor Swift is my ringtone.
What? A 40+ year old Black woman with locs can’t like a little Pop? I love music. All types of music. My Pandora has everything from Elton John and Coldplay Radio to Little Wayne and Future when I’m feeling extra rachet. I’m usually tuned in to the the local pop station, especially with my son … Continue reading “Shake it Off” by Taylor Swift is my ringtone.
|
# -*- coding: utf-8 -*-
""" Python API for language and translation management. """
from collections import namedtuple
from django.conf import settings
from django.utils.translation import ugettext as _
from openedx.core.djangoapps.dark_lang.models import DarkLangConfig
from openedx.core.djangoapps.site_configuration.helpers import get_value
# Named tuples can be referenced using object-like variable
# deferencing, making the use of tuples more readable by
# eliminating the need to see the context of the tuple packing.
Language = namedtuple('Language', 'code name')
def header_language_selector_is_enabled():
"""Return true if the header language selector has been enabled via settings or site-specific configuration."""
setting = get_value('SHOW_HEADER_LANGUAGE_SELECTOR', settings.FEATURES.get('SHOW_HEADER_LANGUAGE_SELECTOR', False))
# The SHOW_LANGUAGE_SELECTOR setting is deprecated, but might still be in use on some installations.
deprecated_setting = get_value('SHOW_LANGUAGE_SELECTOR', settings.FEATURES.get('SHOW_LANGUAGE_SELECTOR', False))
return setting or deprecated_setting
def footer_language_selector_is_enabled():
"""Return true if the footer language selector has been enabled via settings or site-specific configuration."""
return get_value('SHOW_FOOTER_LANGUAGE_SELECTOR', settings.FEATURES.get('SHOW_FOOTER_LANGUAGE_SELECTOR', False))
def released_languages():
"""Retrieve the list of released languages.
Constructs a list of Language tuples by intersecting the
list of valid language tuples with the list of released
language codes.
Returns:
list of Language: Languages in which full translations are available.
Example:
>>> print released_languages()
[Language(code='en', name=u'English'), Language(code='fr', name=u'Français')]
"""
released_language_codes = DarkLangConfig.current().released_languages_list
default_language_code = settings.LANGUAGE_CODE
if default_language_code not in released_language_codes:
released_language_codes.append(default_language_code)
released_language_codes.sort()
# Intersect the list of valid language tuples with the list
# of released language codes
return [
Language(language_info[0], language_info[1])
for language_info in settings.LANGUAGES
if language_info[0] in released_language_codes
]
def all_languages():
"""Retrieve the list of all languages, translated and sorted.
Returns:
list of (language code (str), language name (str)): the language names
are translated in the current activated language and the results sorted
alphabetically.
"""
languages = [(lang[0], _(lang[1])) for lang in settings.ALL_LANGUAGES] # pylint: disable=translation-of-non-string
return sorted(languages, key=lambda lang: lang[1])
def get_closest_released_language(target_language_code):
"""
Return the language code that most closely matches the target and is fully
supported by the LMS, or None if there are no fully supported languages that
match the target.
"""
match = None
languages = released_languages()
for language in languages:
if language.code == target_language_code:
match = language.code
break
elif (match is None) and (language.code[:2] == target_language_code[:2]):
match = language.code
return match
|
Google has published a new webmaster help video about putting content in the body of webpages. Naturally, this features Matt Cutts’ head floating in the air.
How To Use The Google Keyword Planner?
Google has published a new video about how to use the Keyword Planner.
|
from itertools import chain
import warnings
class writeonce(object):
def __init__(self, *args, **kwargs):
self.name = None
self.__doc__ = kwargs.pop('writeonce_doc', None)
self.msg = kwargs.pop('writeonce_msg', None)
self.args = args # for class decorator case
self.kwargs = kwargs
if args: # for property case
self.default = args[0]
def __call__(self, klass):
for attr_name in chain(self.args, self.kwargs.iterkeys()):
if hasattr(klass, attr_name):
raise TypeError(u'%s already has "%s" attribute: unable to add writeonce property' % (klass, attr_name))
default_args = []
if attr_name in self.kwargs:
default_args.append(self.kwargs[attr_name])
setattr(klass, attr_name, type(self)(*default_args))
return klass
@staticmethod
def iter_bases_attrs(klass):
iterables = []
for base in reversed(type.mro(klass)):
iterables.append(base.__dict__.iteritems())
return chain(*iterables)
def get_name(self, obj):
if not self.name:
for attr_name, attr_value in self.iter_bases_attrs(obj.__class__):
if attr_value is self:
self.name = attr_name
return self.name
def __get__(self, instance, owner):
if instance is None:
return self
key = '__writeonce_%s_%s' % (id(instance), self.get_name(instance))
if hasattr(instance, key):
return getattr(instance, key)
elif hasattr(self, 'default'):
return self.default
else:
raise AttributeError(u"%s has no attribute '%s'" % (instance, self.get_name(instance)))
def __set__(self, instance, value):
key = '__writeonce_%s_%s' % (id(instance), self.get_name(instance))
if not hasattr(instance, key):
setattr(instance, key, value)
elif getattr(instance, key) is value:
warnings.warn(u"Same value overwritten in writeonce attribute '%s' of '%s'" % (self.get_name(instance), instance))
else:
raise TypeError(
(self.msg or u"immutable property '%(name)s' of %(instance)s can't be modified") % {
'name': self.get_name(instance),
'instance': instance,
'old_value': getattr(instance, key),
'value': value})
def __delete__(self, instance):
raise TypeError(u"immutable property %s of %s can't be deleted" % (self.get_name(instance), instance))
|
Dear readers: we are home!! And I don’t know what to tell you about first.
Josie, jumping joyously in her own bed for ten minutes straight, yelling, “Jump my bed! Jump my bed!” with the exuberance of a toddler liberated from the pack-n-play for good?
Phoebe’s sudden urge to dress as though she wants to wear all of her clothes—unpacked at last after two months—at once?
The stab of happiness I get every time I walk into the kitchen and see not a wall but a real dining room so big and pretty it makes our table—even with both leaves installed—look small?
I could tell you about the two-month adventure that went from intense to really intense when we learned that our church of thirteen years was dissolving. I could tell you about sharing a twin bed with Mitch for two weeks, or about learning to cook in six different kitchens, or about how ridiculously well the construction itself went, or about how thankful we are for everyone who hosted, fed, prayed for and/or helped us in the past two months.
But for now, I will tell you about a book.
We brought a lot of books with us on the road, mostly because we like options and we don’t like leaving books behind, but there are a few that we read daily and that lent structure to our otherwise structure-less lives. What Every Child Should Know About Prayer is one of those.
Even though half of my girls are well outside the recommended age range for this book, we started reading through What Every Child Should Know About Prayer together because this is the sort of subject I fumble through, either over-explaining or overlooking the fact that it needs explanation. And so I’m glad for Nancy Guthrie’s help here. I’m glad for her direct explanations and for the conversations they generate at our table.
But for now, friends, it is good to be home. We still have a crazy amount of work to do—there are rough drywall edges everywhere and we’re living on the subfloor—but still. We are reveling right now in the amount of work already done.
|
from erukar.system.engine.commands import Map, Inventory, Skills, Stats
class DifferentialMessageEngine:
''''
The Differential Message Engine takes messages that need to be sent
to real life players and minimizes them before they are sent out.
This should reduce bandwidth drastically
'''
MapStateParams = [
'minX',
'minY',
'height',
'width',
'pixel',
'coordinates'
]
MapTypesForDiff = ['rooms', 'actions', 'lighting']
def __init__(self):
self.state = {}
def messages_for(self, instance, node, log):
yield from self.game(instance, node, log)
yield from self.map(node)
yield from self.skills(node)
yield from self.vitals(node)
yield from self.inventory(node)
def game(self, inst, node, log):
char = node.lifeform()
game_state = {
'wealth': char.wealth,
'log': log,
'location': inst.dungeon.overland_location.alias(),
'movementPoints': char.movement_allowed,
'actionPoints': {
'current': char.current_action_points,
'reserved': char.reserved_action_points
},
'turnOrder': inst.turn_manager.frontend_readable_turn_order()[:4],
'interactions': inst.get_interaction_results(node)
}
yield 'update state', game_state
def map(self, node):
cmd = node.create_command(Map)
new = cmd.execute().result_for(node.uid)[0]
yield from self.map_state_diff(node, new)
for _type in self.MapTypesForDiff:
yield from self._type_diff(node, _type, new[_type])
actors = new['actors']
yield 'update actors', actors
def map_state_diff(self, node, new):
map_state = {kw: new[kw] for kw in self.MapStateParams}
state = self.get(node, 'map_state', {})
coord_diff = self.diff(node, 'map_state', map_state, state)
if coord_diff:
yield 'update map state', coord_diff
def _type_diff(self, node, _type, new):
state = self.get(node, _type, {})
diff = self.diff(node, _type, new, state)
if diff:
yield 'update {}'.format(_type), diff
def inventory(self, node):
state = self.get(node, 'inventory', {})
cmd = node.create_command(Inventory)
new = cmd.execute().result_for(node.uid)[0]
diff = self.diff(node, 'inventory', new, state)
if diff:
yield 'set inventory', diff
def skills(self, node):
cmd = node.create_command(Skills)
new = cmd.execute().result_for(node.uid)[0]
state = self.get(node, 'skills', {})
skills_diff = self.diff(node, 'skills', new, state)
if skills_diff:
yield 'update skills', skills_diff
def vitals(self, node):
cmd = node.create_command(Stats)
new = cmd.execute().result_for(node.uid)[0]
state = self.get(node, 'skills', {})
skills_diff = self.diff(node, 'skills', new, state)
if skills_diff:
yield 'update vitals', new
def get(self, node, state_type, default):
node_state = self.state.get(node, {})
if not node_state:
self.state[node] = {}
specific_state = node_state.get(state_type, default)
if not specific_state:
self.state[node][state_type] = default
return specific_state
def diff(self, node, _type, new, state):
msg, state = self._dict_diffgen(node, new, state)
self.state[node][_type] = state
return msg
def _dict_diffgen(self, node, msg, state):
state = state or {}
diff = {}
for key in msg:
if key not in state or not isinstance(msg[key], type(state[key])):
state[key] = msg[key]
diff[key] = msg[key]
continue
if isinstance(msg[key], dict):
_diff, _state = self._dict_diffgen(node, msg[key], state[key])
if _diff:
diff[key] = _diff
state[key] = _state
continue
if msg[key] != state[key]:
diff[key] = msg[key]
state[key] = msg[key]
return diff, state
|
Extract and replace substrings from a character vector.
str_sub will recycle all arguments to be the same length as the longest argument. If any arguments are of length 0, the output will be a zero length character vector.
Two integer vectors. start gives the position of the first character (defaults to first), end gives the position of the last (defaults to last character). Alternatively, pass a two-column matrix to start.
Negative values count backwards from the last character.
Single logical value. If TRUE, missing values in any of the arguments provided will result in an unchanged input.
A character vector of substring from start to end (inclusive). Will be length of longest input argument.
Substrings are inclusive - they include the characters at both start and end positions. str_sub(string, 1, -1) will return the complete substring, from the first character to the last.
#> "a" "e" "i" "a"
#> "Hadley Wickham" "adley Wickham" "dley Wickham" "ley Wickham"
#> "ey Wickham" "y Wickham" " Wickham" "Wickham"
#> "ickham" "ckham" "kham" "ham"
#> "H" "Ha" "Had" "Hadl"
#> "Hadle" "Hadley" "Hadley " "Hadley W"
#> "Hadley Wi" "Hadley Wic" "Hadley Wick" "Hadley Wickh"
#> "Hadley Wickha" "Hadley Wickham"
x1 <- x2 <- x3 <- x4 <- "AAA"
str_sub(x1, 1, NA) <- "B"
str_sub(x3, 1, NA, omit_na = TRUE) <- "B"
|
###########################################################################
# #
# physical_validation, #
# a python package to test the physical validity of MD results #
# #
# Written by Pascal T. Merz <pascal.merz@me.com> #
# Michael R. Shirts <michael.shirts@colorado.edu> #
# #
# Copyright (c) 2017-2021 University of Colorado Boulder #
# (c) 2012 The University of Virginia #
# #
###########################################################################
r"""
lammps_parser.py
"""
import numpy as np
from ..util import error as pv_error
from . import (
ObservableData,
SimulationData,
SystemData,
TrajectoryData,
UnitData,
parser,
)
class LammpsParser(parser.Parser):
"""
LammpsParser
"""
def units(self):
if self.__unit == "real":
return UnitData(
kb=8.314462435405199e-3 / 4.18400,
energy_str="kcal/mol",
energy_conversion=4.18400,
length_str="A",
length_conversion=0.1,
volume_str="A^3",
volume_conversion=1e-3,
temperature_str="K",
temperature_conversion=1,
pressure_str="atm",
pressure_conversion=1.01325,
time_str="fs",
time_conversion=1e-3,
)
else:
raise NotImplementedError("Only LAMMPS 'units real' is implemented.")
def __init__(self):
self.__unit = "lj"
# lammps energy codes
self.__lammps_energy_names = {
"kinetic_energy": "KinEng",
"potential_energy": "PotEng",
"total_energy": "TotEng",
"volume": "Vol",
"pressure": "Press",
"temperature": "Temp",
"constant_of_motion": "TotEng",
}
# BETA warning
print(
"###########################################################################"
)
print(
"# WARNING: The LAMMPS parser is an experimental feature under current #"
)
print(
"# development. You can help us to improve it by reporting errors #"
)
print(
"# at https://github.com/shirtsgroup/physical_validation #"
)
print(
"# Thank you! #"
)
print(
"###########################################################################"
)
def get_simulation_data(
self, ensemble=None, in_file=None, log_file=None, data_file=None, dump_file=None
):
"""
Parameters
----------
ensemble: EnsembleData, optional
in_file: str, optional
log_file: str, optional
data_file: str, optional
dump_file: str, optional
Returns
-------
result: SimulationData
"""
# input file
input_dict = None
if in_file is not None:
input_dict = self.__read_input_file(in_file)
if input_dict is not None:
self.__unit = input_dict["units"][0]
# data file
data_dict = None
if data_file is not None:
data_dict = self.__read_data_file(data_file)
# log file
log_dict = None
if log_file is not None:
log_dict = self.__read_log_file(log_file)
# dump file
dump_dict = None
if dump_file is not None:
dump_dict = self.__read_dump_file(dump_file)
# Create SimulationData object
result = SimulationData()
result.units = self.units()
# Ensemble must be provided
if ensemble is not None:
result.ensemble = ensemble
# trajectory data from dump
if dump_dict is not None:
result.trajectory = TrajectoryData(
dump_dict["position"], dump_dict["velocity"]
)
# system data
if data_dict is not None:
system = SystemData()
system.natoms = data_dict["Header"]["atoms"]
masses = data_dict["Masses"]
mass = []
molecule_idx = []
molec = -1
for atom in data_dict["Atoms"]:
mass.append(float(masses[atom["type"]][0]))
if molec != atom["molec"]:
molec = atom["molec"]
molecule_idx.append(atom["n"])
system.mass = mass
system.molecule_idx = molecule_idx
system.nconstraints = 0
system.nconstraints_per_molecule = np.zeros(len(system.molecule_idx))
system.ndof_reduction_tra = 0
system.ndof_reduction_rot = 0
if input_dict is not None:
if "shake" in input_dict["fix"] or "rattle" in input_dict["rattle"]:
print(
"NOTE: Found `fix shake` or `fix rattle`. Reading of\n"
" constraints is currently not implemented.\n"
" Please set system.nconstraints manually."
)
# center of mass constraining
if "recenter" in input_dict["fix"]:
system.ndof_reduction_tra = 3
result.system = system
# observable data
if log_dict is not None:
result.observables = ObservableData()
for key, lammpskey in self.__lammps_energy_names.items():
if lammpskey in log_dict:
result.observables[key] = log_dict[lammpskey]
if self.__lammps_energy_names["volume"] not in log_dict:
if dump_dict is not None:
vol = []
for b in dump_dict["box"]:
vol.append(b[0] * b[1] * b[2])
if len(vol) == 1:
vol = vol * result.observables.nframes
if len(vol) != result.observables.nframes and np.allclose(
[vol[0]] * len(vol), vol
):
vol = [vol[0]] * result.observables.nframes
key = "volume"
result.observables[key] = vol
return result
@staticmethod
def __read_input_file(name):
# parse input file
input_dict = {}
with open(name) as f:
for line in f:
line = line.split("#")[0].strip()
if not line:
continue
option = line.split(maxsplit=1)[0].strip()
value = line.split(maxsplit=1)[1].strip()
if option == "fix":
if "fix" not in input_dict:
input_dict["fix"] = {}
line = line.split()
style = line[3]
if style not in input_dict["fix"]:
input_dict["fix"][style] = []
input_dict["fix"][style].append(
{
"ID": line[1],
"group-ID": line[2],
"style": style,
"args": line[4:],
}
)
elif option == "unfix":
del_id = line.split()[1]
for style in input_dict["fix"]:
input_dict["fix"][style] = [
fix
for fix in input_dict["fix"][style]
if fix["ID"] != del_id
]
elif option in input_dict:
input_dict[option].append(value)
else:
input_dict[option] = [value]
return input_dict
@staticmethod
def __read_data_file(name):
# > available blocks
blocks = [
"Header", # 0
"Masses", # 1
"Nonbond Coeffs",
"Bond Coeffs",
"Angle Coeffs",
"Dihedral Coeffs",
"Improper Coeffs",
"BondBond Coeffs",
"BondAngle Coeffs",
"MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs",
"AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs",
"AngleAngle Coeffs",
"Atoms", # 15
"Velocities", # 16
"Bonds", # 17
"Angles",
"Dihedrals",
"Impropers",
]
file_blocks = {}
# > read file
with open(name) as f:
# header section must appear first in file
block = "Header"
file_blocks["Header"] = []
# 1st 2 lines are ignored
next(f)
next(f)
for line in f:
line = line.strip()
if not line:
continue
if line in blocks:
block = line
file_blocks[block] = []
continue
file_blocks[block].append(line)
data_dict = {}
# > handle header
block = "Header"
header_single = [
"atoms",
"bonds",
"angles",
"dihedrals",
"impropers",
"atom types",
"bond types",
"angle types",
"dihedral types",
"improper types",
]
header_double = ["xlo xhi", "ylo yhi", "zlo zhi"]
# default values
data_dict[block] = {hs: 0 for hs in header_single}
data_dict[block].update({hd: [0.0, 0.0] for hd in header_double})
# read out
for line in file_blocks[block]:
if line.split(maxsplit=1)[1] in header_single:
hs = line.split(maxsplit=1)[1]
data_dict[block][hs] = int(line.split(maxsplit=1)[0])
elif line.split(maxsplit=2)[2] in header_double:
hs = line.split(maxsplit=2)[2]
data_dict[block][hs] = [
float(line.split(maxsplit=2)[0]),
float(line.split(maxsplit=2)[1]),
]
else:
raise pv_error.FileFormatError(name, "Unknown header line")
# > handle coeffs
# N type coeff1 coeff2 ...
for block in blocks[1:15]:
if block not in file_blocks:
continue
data_dict[block] = {}
for line in file_blocks[block]:
line = line.split()
data_dict[block][int(line[0])] = [line[1]] + [
float(c) for c in line[2:]
]
# > handle atoms
# n molecule-tag atom-type q x y z nx ny nz
block = blocks[15]
data_dict[block] = []
for line in file_blocks[block]:
line = line.split()
if len(line) == 7:
data_dict[block].append(
{
"n": int(line[0]),
"molec": int(line[1]),
"type": float(line[2]),
"q": float(line[3]),
"x": float(line[4]),
"y": float(line[5]),
"z": float(line[6]),
}
)
else:
data_dict[block].append(
{
"n": int(line[0]),
"molec": int(line[1]),
"type": float(line[2]),
"q": float(line[3]),
"x": float(line[4]),
"y": float(line[5]),
"z": float(line[6]),
"nx": float(line[7]),
"ny": float(line[8]),
"nz": float(line[9]),
}
)
# > handle velocities
# N vx vy vz
block = blocks[16]
if block in file_blocks:
data_dict[block] = []
for line in file_blocks[block]:
line = line.split()
data_dict[block].append(
{
"n": int(line[0]),
"vx": float(line[1]),
"vy": float(line[2]),
"vz": float(line[3]),
}
)
# > handle bonds etc
# N bond-type atom-1 atom-2 ...
for block in blocks[17:]:
if block not in file_blocks:
continue
data_dict[block] = []
for line in file_blocks[block]:
line = line.split()
data_dict[block].append(
{"n": int(line[0]), "atoms": [int(c) for c in line[1:]]}
)
# return dictionary
return data_dict
@staticmethod
def __read_log_file(name):
# parse log file
def start_single(line1, line2):
if not line1.split():
return False
if len(line1.split()) != len(line2.split()):
return False
try:
[float(nn) for nn in line2.split()]
except ValueError:
return False
return True
def end_single(line, length):
if len(line.split()) != length:
return True
try:
[float(nn) for nn in line.split()]
except ValueError:
return True
return False
def start_multi(line):
if "---- Step" in line and "- CPU =" in line:
return True
return False
def end_multi(line):
line = line.split()
# right length (is it actually always 9??)
if len(line) == 0 or len(line) % 3 != 0:
return True
# 2nd, 5th, 8th, ... entry must be '='
for eq in line[1::3]:
if eq != "=":
return True
# 3rd, 6th, 9th, ... entry must be numeric
try:
[float(nn) for nn in line[2::3]]
except ValueError:
return True
return False
ene_traj = {}
nreads = 0
with open(name) as f:
read_single = False
read_multi = False
continued = False
old_line = ""
fields = []
for new_line in f:
if read_single:
if end_single(new_line, len(fields)):
read_single = False
continued = True
else:
for field, n in zip(fields, new_line.split()):
ene_traj[field].append(float(n))
if read_multi:
if end_multi(new_line):
read_multi = False
continued = True
else:
for field, n in zip(
new_line.split()[0::3], new_line.split()[2::3]
):
if field not in ene_traj:
ene_traj[field] = []
ene_traj[field].append(float(n))
if not (read_single or read_multi):
if start_multi(new_line):
if not continued:
ene_traj = {}
nreads += 1
read_multi = True
old_line = new_line
if start_single(old_line, new_line):
if not continued:
ene_traj = {}
nreads += 1
read_single = True
fields = new_line.split()
for field in fields:
if field not in ene_traj:
ene_traj[field] = []
old_line = new_line
continued = False
if nreads > 1:
print(
"NOTE: Multiple runs found in log file. Assumed prior runs\n"
" were equilibration runs and used only last run."
)
return ene_traj
@staticmethod
def __read_dump_file(name):
# parse dump file
# the dictionary to be filled
dump_dict = {"position": [], "velocity": [], "box": []}
# helper function checking line items
def check_item(line_str, item):
item = "ITEM: " + item
if not line_str.startswith(item):
raise pv_error.FileFormatError(name, "dump file: was expecting " + item)
return line_str.replace(item, "")
with open(name) as f:
line = f.readline()
while line:
check_item(line, "TIMESTEP")
f.readline()
line = f.readline()
check_item(line, "NUMBER OF ATOMS")
natoms = int(f.readline())
line = f.readline()
line = check_item(line, "BOX BOUNDS")
bx = 0
by = 0
bz = 0
if len(line.split()) == 3:
# rectangular
# xx yy zz, where each of them one of
# p = periodic, f = fixed, s = shrink wrap,
# or m = shrink wrapped with a minimum value
line = f.readline().split()
bx = float(line[1]) - float(line[0])
line = f.readline().split()
by = float(line[1]) - float(line[0])
line = f.readline().split()
bz = float(line[1]) - float(line[0])
elif len(line.split()) == 6:
# orthogonal
# xy xz yz xx yy zz, where xy xz yz indicates
# 3 tilt factors will be included, and
# xx yy zz being each one of
# p = periodic, f = fixed, s = shrink wrap,
# or m = shrink wrapped with a minimum value
raise NotImplementedError("Orthogonal box reading not implemented.")
line = f.readline()
line = check_item(line, "ATOMS").split()
if "x" not in line or "y" not in line or "z" not in line:
raise pv_error.FileFormatError(name, "No positions in dump file.")
irx = line.index("x")
iry = line.index("y")
irz = line.index("z")
has_velocities = False
ivx = None
ivy = None
ivz = None
if "vx" in line and "vy" in line and "vz" in line:
has_velocities = True
ivx = line.index("vx")
ivy = line.index("vy")
ivz = line.index("vz")
positions = []
velocities = []
for n in range(natoms):
line = f.readline().split()
positions.append([float(line[idx]) for idx in [irx, iry, irz]])
if has_velocities:
velocities.append([float(line[idx]) for idx in [ivx, ivy, ivz]])
dump_dict["position"].append(positions)
dump_dict["velocity"].append(velocities)
dump_dict["box"].append([bx, by, bz])
# end of dump loop
line = f.readline()
return dump_dict
|
(800) 488-3238 . in case you reside in or close to Manitou Springs Colorado and are coping with an unplanned being pregnant and searching for adoption providers, then you might have come to the appropriate place ! Some delivery parents could have simply typed the phrases “putting baby up for adoption or giving a child up for adoption” right into a Web search and are starting to think about choices. Others could have been eager about giving a child for adoption for a while now and are prepared to pick a program and a household. Forever After Adoptions may also help you set an adoption plan collectively, whether or not you simply discovered concerning the being pregnant or simply delivered the child. No case is just too early or too late for our staff. If you aren’t able to mother or father, you may nonetheless plan to your child’s future by choosing a secure, loving household to your youngster. You can obtain photos and updates of the kid you place. The Forever After Adoptions workers will talk about your whole choices and supply adoption assist in order that you’ll really feel snug with no matter alternative you make. If you’re a delivery mother or father, yow will discover completely different adoption types right here, and might rely upon us to work with you to search out the appropriate plan and the appropriate household so as to be ok with the choices you make. For most delivery moms, we’re capable of assist with prices of dwelling, housing, medical bills, and some other bills associated to the being pregnant and placement of the child for adoption. If you might be in want of adoption assist, contact us – we can have a Colorado licensed adoption legal professional name you or meet you in particular person in Manitou Springs Colorado.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.