text
stringlengths 29
850k
|
|---|
# Plot of the Lorenz Attractor based on Edward Lorenz's 1963 "Deterministic
# Nonperiodic Flow" publication.
# http://journals.ametsoc.org/doi/abs/10.1175/1520-0469%281963%29020%3C0130%3ADNF%3E2.0.CO%3B2
#
# Note: Because this is a simple non-linear ODE, it would be more easily
# done using SciPy's ode solver, but this approach depends only
# upon NumPy.
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def lorenz(x, y, z, s=10, r=13, b=2.667):
x_dot = s*(y - x)
y_dot = r*x - y - x*z
z_dot = x*y - b*z
return x_dot, y_dot, z_dot
dt = 0.01
stepCnt = 10000
# Need one more for the initial values
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
ws = np.empty((stepCnt + 1,))
# Setting initial values
xs[0], ys[0], zs[0], ws[0]= (0., 1., 1.05, 0.)
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the X, Y, Z state
x_dot, y_dot, z_dot = lorenz(xs[i], ys[i], zs[i])
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
ws[i + 1] = i * dt
#fig = plt.figure()
#ax = fig.gca(projection='3d')
plt.figure(1)
plt.subplot(2, 1, 1)
plt.plot(xs, ys)
plt.yscale('linear')
plt.title('xy')
plt.grid(True)
plt.gca().set_aspect('equal')
plt.subplot(2, 1, 2)
plt.plot(ws, zs)
plt.yscale('linear')
plt.title('wz')
plt.grid(True)
plt.gca().set_aspect('equal')
plt.show()
print(ws[0:10])
print(ys)
print(ws)
#plt.show()
|
Southern First Bancshares, Inc. reported net income available to common shareholders of $5.2 million, or $0.67 per diluted share, for the first quarter of 2018.
The company's $6 million investment is projected to create more than 150 new jobs over the next five years.
The Greenville Chamber and CommunityWorks have partnered to host two events during National Small Business Week to recognize small businesses.
The deal with Clemson University marks MHI Vestas Offshore Wind’s first major investment in the United States.
Donan has served public and private clients in the global industrial manufacturing, automotive, healthcare, transportation, and technology industries.
This grant program focuses on projects that are outcome-oriented and aim to build stronger entrepreneurs and companies.
The Greater Greer Chamber of Commerce recognized Guy Furay and The Insurance Source as the April 2018 Small Business of the Quarter.
FUEL received the 2018 Interactive Marketing Award for “Best Integrated Campaign” for the work they did on the Blue Ridge Mountain Club SOAR Campaign.
Since its inception, the event has raised more than $2.5 million for human-help agencies in Anderson, Greenville, Oconee, and Pickens counties.
It is Bon Secours’ seventh consecutive year receiving this award.
|
"""
.. module:: brane
:platform: Unix
:synopsis: Simulate effect of anisotropic scattering.
.. moduleauthor:: Katherine Rosenfeld <krosenf@gmail.com>
.. moduleauthor:: Michael Johnson
Default settings are appropriate for Sgr A*
Resources:
Bower et al. (2004, 2006)
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import RectBivariateSpline
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import zoom,rotate
from numpy import (pi,sqrt,log,sin,cos,exp,ceil,arange,
min,abs,ones,radians,dot,transpose,
zeros_like,clip,empty,empty,empty_like,reshape)
from numpy.fft import fft2,fftfreq
from astropy.constants import au,pc
from astropy import units
import logging
from scatterbrane import utilities
__all__ = ["Brane"]
class Brane(object):
"""
Scattering simulation object.
:param model: ``(n, n)``
Numpy array of the source image.
:param dx: scalar
Resolution element of model in uas.
:param nphi: (optional) ``(2, )`` or scalar
Number of pixels in a screen. This may be a tuple specifying the dimensions of a rectangular screen.
:param screen_res: (optional) scalar
Size of a screen pixel in units of :math:`R_0`.
:param wavelength: (optional) scalar
Observing wavelength in meters.
:param dpc: (optional) scalar
Observer-Source distance in parsecs.
:param rpc: (optional) scalar
Observer-Scatterer distance in parsecs.
:param r0: (optional) scalar
Phase coherence length along major axis as preset string or in km.
:param r_outer: (optional) scalar
Outer scale of turbulence in units of :math:`R_0`.
:param r_inner: (optional) scalar
Inner scale of turbulence in units of :math:`R_0`.
:param alpha: (optional) string or scalar
Preset string or float to set power-law index for tubulence scaling (e.g., Kolmogorov has :math:`\\alpha= 5/3`)
:param anisotropy: (optional) scalar
Anisotropy for screen is the EW / NS elongation.
:param pa: (optional) scalar
Orientation of kernel's major axis, East of North, in degrees.
:param live_dangerously: (optional) bool
Skip the parameter checks?
:param think_positive: (optional) bool
Should we enforce that the source image has no negative pixel values?
:returns: An instance of a scattering simulation.
:Example:
.. code-block:: python
s = Brane(m,dx,nphi=2**12,screen_res=5.,wavelength=3.0e-3,dpc=8.4e3,rpc=5.8e3)
where ``s`` is the class instance, ``m`` is the image array, ``nphi`` is the number of screen pixels,
``wavelength`` is the observing wavelength.
.. note:: :math:`R_0` is the phase coherence length and Sgr A* defaults are from Bower et al. (2006).
"""
def __init__(self,model,dx,nphi=2**12,screen_res=2,\
wavelength=1.3e-3,dpc=8400,rpc=5800,r0 = 'sgra',\
r_outer=10000000,r_inner=12,alpha='kolmogorov',\
anisotropy=2.045,pa=78,match_screen_res=False,live_dangerously=False,think_positive=False):
# set initial members
self.logger = logging.getLogger(self.__class__.__name__)
self.live_dangerously = live_dangerously
self.think_positive = think_positive
self.wavelength = wavelength*1e-3 # observing wavelength in km
self.dpc = float(dpc) # earth-source distance in pc
self.rpc = float(rpc) # R: source-scatterer distance in pc
self.d = self.dpc - self.rpc # D: earth-scatterer distance in pc
self.m = self.d/self.rpc # magnification factor (D/R)
if r0 == 'sgra':
# major axis (EW) phase coherence length in km
self.r0 = 3136.67*(1.3e-6/self.wavelength)
else:
try:
self.r0 = float(r0)
except:
raise ValueError('Bad value for r0')
self.anisotropy = anisotropy # anisotropy for screen = (EW / NS elongation)
self.pa = pa # orientation of major axis, E of N (or CCW of +y)
# Fresnel scale in km
self.rf = sqrt(self.dpc*pc.to(units.km).value / (2*pi / self.wavelength) * self.m / (1+self.m)**2)
# compute pixel scale for image
if match_screen_res:
self.ips = 1
self.screen_dx = screen_res * self.r0
else:
self.screen_dx = screen_res * self.r0 # size of a screen pixel in km
self.ips = int(ceil(1e-6*dx*self.d*au.to(units.km).value/self.screen_dx)) # image pixel / screen pixel
# image arrays
self.dx = 1e6 * self.ips * (self.screen_dx / (au.to(units.km).value * self.d)) # image pixel scale in uas
self.nx = int(ceil(model.shape[-1] * dx / self.dx)) # number of image pixels
self.model = model # source model
self.model_dx = dx # source model resolution
self.iss = np.array([],dtype=np.float64) # scattered image
self.isrc = np.array([],dtype=np.float64) # source image at same scale as scattered image
# screen parameters
if type(nphi) == int:
self.nphi = (nphi,nphi) # size of screen array
else:
self.nphi = nphi
self.nphi = np.asarray(self.nphi)
self.r_inner = r_inner # inner turbulence scale in r0
self.r_outer = r_outer # outer turbulence scale in r0
#self.qmax = 1.*screen_res/r_inner # 1 / inner turbulence scale in pix
#self.qmin = 1.*screen_res/r_outer # 1 / outer tubulence scale in pix
if alpha == 'kolmogorov':
self.alpha = 5./3
else:
try:
self.alpha = float(alpha)
except:
raise ValueError('Bad value for alpha')
# use logger to report
self.chatter()
# includes sanity check
self.setModel(self.model,self.model_dx,think_positive=self.think_positive)
def _checkSanity(self):
'''
Check that screen parameters are consistent.
'''
# sanity check: is screen large enough?
assert np.ceil(self.nx*self.ips)+2 < np.min(self.nphi), \
"screen is not large enough: {0} > {1}".\
format(int(np.ceil(self.ips*self.nx)+2),np.min(self.nphi))
# sanity check: is image square?
assert self.model.shape[-1] == self.model.shape[-2], \
'source image must be square'
# sanity check: integer number of screen pixels / image pixel?
assert self.ips % 1 == 0, 'image/screen pixels should be an integer'
# is inner turbulence scale larger than r0?
#assert 1./self.qmax > self.r0/self.screen_dx, 'turbulence inner scale < r0'
assert self.r_inner > 1., 'turbulence inner scale < r0'
# check image smoothness
V = fft2(self.isrc)
freq = fftfreq(self.nx,d=self.dx*radians(1.)/(3600*1e6))
u = dot(transpose([np.ones(self.nx)]),[freq])
v = dot(transpose([freq]),[ones(self.nx)])
try:
if max(abs(V[sqrt(u*u+v*v) > (1.+self.m)*self.r_inner*self.r0/self.wavelength])) / self.isrc.sum() > 0.01:
self.logger.warning('image is not smooth enough: {0:g} > 0.01'.format(max(abs(V[sqrt(u*u+v*v) > (1.+self.m)*self.r_inner*self.r0/self.wavelength])) / self.isrc.sum()))
except ValueError:
self.logger.warning('r_inner is too large to test smoothness')
# is screen pixel smaller than inner turbulence scale?
#assert 1./self.qmax >= 1, 'screen pixel > turbulence inner scale'
assert self.r_inner*self.r0/self.screen_dx >= 1, 'screen pixel > turbulence inner scale'
if (self.rf*self.rf/self.r0/(self.ips*self.screen_dx) < 3):
self.logger.warning('WARNING: image resolution is approaching Refractive scale')
def chatter(self):
'''
Print information about the current scattering simulation where many parameters are cast as integers.
'''
fmt = "{0:32s} :: "
self.logger.info( (fmt + "{1:g}").format('Observing wavelength [mm]',1e6*self.wavelength) )
self.logger.info( (fmt + "{1:d}").format('Phase coherence length [km]',int(self.r0)) )
self.logger.info( (fmt + "{1:d}").format('Fresnel scale [km]',int(self.rf)) )
self.logger.info( (fmt + "{1:d}").format('Refractive scale [km]',int(self.rf**2/self.r0)) )
#self.logger.info( (fmt + "{1:d}").format('Inner turbulence scale [km]',int(self.screen_dx/self.qmax)))
self.logger.info( (fmt + "{1:d}").format('Inner turbulence scale [km]',int(self.r_inner*self.r0)))
self.logger.info( (fmt + "{1:d}").format('Screen resolution [km]',int(self.screen_dx)))
self.logger.info( (fmt + "{1:d} {2:d}").format('Linear filling factor [%,%]',*map(int,100.*self.nx*self.ips/self.nphi)) )
self.logger.info( (fmt + "{1:g}").format('Image resolution [uas]',self.dx))
self.logger.info( (fmt + "{1:d}").format('Image size',int(self.nx)))
def _generateEmptyScreen(self):
'''
Create an empty screen.
'''
if not self.live_dangerously: self._checkSanity()
self.phi = np.zeros(self.nphi)
def setModel(self,model,dx,think_positive=False):
'''
Set new model for the source.
:param model: ``(n, n)``
Numpy image array.
:param dx: scalar
Pixel size in microarcseconds.
:param think_positive: (optional) bool
Should we enforce that the source image has no negative pixel values?
'''
self.nx = int(ceil(model.shape[-1] * dx / self.dx)) # number of image pixels
self.model = model # source model
self.model_dx = dx # source model resolution
# load source image that has size and resolution compatible with the screen.
self.isrc = np.empty(2*(self.nx,))
self.think_positive = think_positive
M = self.model.shape[1] # size of original image array
f_img = RectBivariateSpline(self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
self.model)
xx_,yy_ = np.meshgrid((np.arange(self.nx) - 0.5*(self.nx-1)),\
(np.arange(self.nx) - 0.5*(self.nx-1)),indexing='xy')
m = f_img.ev(yy_.flatten(),xx_.flatten()).reshape(2*(self.nx,))
self.isrc = m * (self.dx/self.model_dx)**2 # rescale for change in pixel size
if self.think_positive:
self.isrc[self.isrc < 0] = 0
if not self.live_dangerously: self._checkSanity()
def generatePhases(self,seed=None,save_phi=None):
'''
Generate screen phases.
:param seed: (optional) scalar
Seed for random number generator
:param save_phi: (optional) string
To save the screen, set this to the filename.
'''
# check setup
if not self.live_dangerously: self._checkSanity()
# seed the generator
if seed != None:
np.random.seed(seed=seed)
# include anisotropy
qx2 = dot(transpose([np.ones(self.nphi[0])]),[np.fft.rfftfreq(self.nphi[1])**2])
qy2 = dot(transpose([np.fft.fftfreq(self.nphi[0])**2*self.anisotropy**2]),[np.ones(self.nphi[1]//2+1)])
rr = qx2+qy2
rr[0,0] = 0.02 # arbitrary normalization
# generating phases with given power spectrum
size = rr.shape
qmax2 = (self.r_inner*self.r0/self.screen_dx)**-2
qmin2 = (self.r_outer*self.r0/self.screen_dx)**-2
phi_t = (1/sqrt(2) * sqrt(exp(-1./qmax2*rr) * (rr + qmin2)**(-0.5*(self.alpha+2.)))) \
* (np.random.normal(size=size) + 1j * np.random.normal(size=size))
# calculate phi
self.phi = np.fft.irfft2(phi_t)
# normalize structure function
nrm = self.screen_dx/(self.r0*sqrt(self._getPhi(1,0)))
self.phi *= nrm
# save screen
if save_phi != None:
np.save(save_phi,self.phi)
def _checkDPhi(self,nLag=5):
'''
Report the phase structure function for various lags.
:param nLag: (optional) int
Number of lags to report starting with 0.
'''
self.logger.info( "\nEstimates of the phase structure function at various lags:")
for i in np.arange(nLag):
self.logger.info( "lag ",i, self._getPhi(i,0), self._getPhi(0,i), self._getPhi(i,i))
def _getPhi(self,lag_x,lag_y):
'''
Empirical estimate for phase structure function
:param lag_x: int
Screen pixels to lag in x direction.
:param lag_y: int
Screen pixels to lag in y direction.
'''
assert (lag_x < self.nphi[0]) and (lag_x < self.nphi[1]), "lag choice larger than screen array"
# Y,X
if (lag_x == 0 and lag_y == 0):
return 0.
if (lag_x == 0):
return 1.*((self.phi[:-1*lag_y,:] - self.phi[lag_y:,:])**2).sum()/(self.nphi[0]*(self.nphi[1]-lag_y))
if (lag_y == 0):
return 1.*((self.phi[:,:-1*lag_x] - self.phi[:,lag_x:])**2).sum()/((self.nphi[0]-lag_x)*self.nphi[1])
else:
return (1.*((self.phi[:-1*lag_y,:-1*lag_x] - self.phi[lag_y:,lag_x:])**2).sum()/((self.nphi[1]-lag_y)*(self.nphi[0]-lag_x)))
def readScreen(self,filename):
'''
Read in screen phases from a file.
:param filename: string
File containing the screen phases.
'''
self.phi = np.fromfile(filename,dtype=np.float64).reshape(self.nphi)
def _calculate_dphi(self,move_pix=0):
'''
Calculate the screen gradient.
:param move_pix: (optional) int
Number of pixels to roll the screen (for time evolution).
:returns: ``(nx, nx)``, ``(nx, nx)``
numpy arrays containing the dx,dy components of the gradient vector.
.. note:: Includes factors of the Fresnel scale and the result is in units of the source image.
'''
ips = self.ips # number of screen pixels per image pixel
# -- when this != 1, some sinusoidal signal
# over time with period of image_resolution
nx = self.nx # number of image pixels
rf = self.rf / self.screen_dx # Fresnel scale in screen pixels
assert move_pix < (self.nphi[1] - self.nx*self.ips), 'screen is not large enough'
dphi_x = (0.5 * rf * rf / ips ) * \
(self.phi[0:ips*nx:ips,2+move_pix:ips*nx+2+move_pix:ips] -
self.phi[0:ips*nx:ips,0+move_pix:ips*nx+move_pix :ips])
dphi_y = (0.5 * rf * rf / ips ) * \
(self.phi[2:ips*nx+2:ips,0+move_pix:ips*nx+move_pix:ips] -
self.phi[0:ips*nx :ips,0+move_pix:ips*nx+move_pix:ips])
self.logger.debug('{0:d},{1:d}'.format(*dphi_x.shape))
return dphi_x,dphi_y
def scatter(self,move_pix=0,scale=1):
'''
Generate the scattered image which is stored in the ``iss`` member.
:param move_pix: (optional) int
Number of pixels to roll the screen (for time evolution).
:param scale: (optional) scalar
Scale factor for gradient. To simulate the scattering effect at another
wavelength this is (lambda_new/lambda_old)**2
'''
M = self.model.shape[-1] # size of original image array
N = self.nx # size of output image array
#if not self.live_dangerously: self._checkSanity()
# calculate phase gradient
dphi_x,dphi_y = self._calculate_dphi(move_pix=move_pix)
if scale != 1:
dphi_x *= scale
dphi_y *= scale
xx_,yy = np.meshgrid((np.arange(N) - 0.5*(N-1)),\
(np.arange(N) - 0.5*(N-1)),indexing='xy')
# check whether we care about PA of scattering kernel
if self.pa != None:
f_model = RectBivariateSpline(self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
self.model)
# apply rotation
theta = -(90 * pi / 180) + np.radians(self.pa) # rotate CW 90 deg, then CCW by PA
xx_ += dphi_x
yy += dphi_y
xx = cos(theta)*xx_ - sin(theta)*yy
yy = sin(theta)*xx_ + cos(theta)*yy
self.iss = f_model.ev(yy.flatten(),xx.flatten()).reshape((self.nx,self.nx))
# rotate back and clip for positive values for I
if self.think_positive:
self.iss = clip(rotate(self.iss,-1*theta/np.pi*180,reshape=False),a_min=0,a_max=1e30) * (self.dx/self.model_dx)**2
else:
self.iss = rotate(self.iss,-1*theta/np.pi*180,reshape=False) * (self.dx/self.model_dx)**2
# otherwise do a faster lookup rather than the expensive interpolation.
else:
yyi = np.rint((yy+dphi_y+self.nx/2)).astype(np.int) % self.nx
xxi = np.rint((xx_+dphi_x+self.nx/2)).astype(np.int) % self.nx
if self.think_positive:
self.iss = clip(self.isrc[yyi,xxi],a_min=0,a_max=1e30)
else:
self.iss = self.isrc[yyi,xxi]
def _load_src(self,stokes=(0,),think_positive=True):
'''
Load the source image from model (changes size and resolution to match the screen).
:param stokes: (optional) tuple
Stokes parameters to consider.
:param think_positive: (optional) bool
Should we enforce that the source image has no negative pixel values?
'''
M = self.model.shape[1] # size of original image array
N = self.nx # size of output image array
if len(self.model.shape) > 2:
self.isrc = np.empty((self.model.shape[-1],N,N))
else:
self.isrc = np.empty((1,N,N))
self.model = np.reshape(self.model,(1,M,M))
for s in stokes:
f_img = RectBivariateSpline(self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
self.model_dx/self.dx*(np.arange(M) - 0.5*(M-1)),\
self.model[s,:,:])
xx_,yy_ = np.meshgrid((np.arange(N) - 0.5*(N-1)),\
(np.arange(N) - 0.5*(N-1)),indexing='xy')
m = f_img.ev(yy_.flatten(),xx_.flatten()).reshape((self.nx,self.nx))
res = m * (self.dx/self.model_dx)**2 # rescale for change in pixel size
if s == 0 and think_positive:
res[res < 0] = 0
self.isrc[s,:,:] = res
self.model = np.squeeze(self.model)
self.isrc = np.squeeze(self.isrc)
def saveSettings(self,filename):
'''
Save screen settings to a file.
:param filename: string
settings filename
'''
f = open(filename,"w")
f.write("wavelength \t {0}\n".format(self.wavelength))
f.write("dpc \t {0}\n".format(self.dpc))
f.write("rpc \t {0}\n".format(self.rpc))
f.write("d \t {0}\n".format(self.d))
f.write("m \t {0}\n".format(self.m))
f.write("r0 \t {0}\n".format(self.r0))
f.write("anisotropy \t {0}\n".format(self.anisotropy))
f.write("pa \t {0}\n".format(self.pa))
f.write("nphix \t {0}\n".format(self.nphi[0])) # size of phase screen
f.write("nphiy \t {0}\n".format(self.nphi[1])) # size of phase screen
f.write("screen_dx \t {0}\n".format(self.screen_dx))
f.write("rf \t {0}\n".format(self.rf))
f.write("ips \t {0}\n".format(self.ips))
f.write("dx \t {0}\n".format(self.dx))
f.write("nx \t {0}\n".format(self.nx))
f.write("qmax \t {0}\n".format(self.r_inner)) # inner turbulence scale in r0
f.write("qmin \t {0}\n".format(self.r_outer)) # outer turbulence scale in r0
#f.write("qmax \t {0}\n".format(self.qmax)) # 1./inner turbulence scale in screen pixels
#f.write("qmin \t {0}\n".format(self.qmin)) # 1./inner turbulence scale in screen pixels
f.close()
|
Mourne Mountains on November morning.
Took this shot of the front of the Mourne Mountains the other morning, a few fields away from my house. Just discovered this location while out on a ramble with the dog.
|
import itertools
import math
import os
import time
from collections import defaultdict
import numpy as np
from urh.cythonext import awre_util
from urh.signalprocessing.FieldType import FieldType
class Preprocessor(object):
"""
This class preprocesses the messages in the following ways
1) Identify preamble / length of preamble
2) Identify sync word(s)
3) Align all given messages on the identified preamble information
"""
_DEBUG_ = False
def __init__(self, bitvectors: list, existing_message_types: dict = None):
self.bitvectors = bitvectors # type: list[np.ndarray]
self.existing_message_types = existing_message_types if existing_message_types is not None else dict()
def preprocess(self) -> (np.ndarray, int):
raw_preamble_positions = self.get_raw_preamble_positions()
existing_sync_words = self.__get_existing_sync_words()
if len(existing_sync_words) == 0:
sync_words = self.find_possible_syncs(raw_preamble_positions)
else:
# NOTE: This does not cover the case if protocol has multiple sync words and not all of them were labeled
sync_words = existing_sync_words
preamble_starts = raw_preamble_positions[:, 0]
preamble_lengths = self.get_preamble_lengths_from_sync_words(sync_words, preamble_starts=preamble_starts)
sync_len = len(sync_words[0]) if len(sync_words) > 0 else 0
return preamble_starts, preamble_lengths, sync_len
def get_preamble_lengths_from_sync_words(self, sync_words: list, preamble_starts: np.ndarray):
"""
Get the preamble lengths based on the found sync words for all messages.
If there should be more than one sync word in a message, use the first one.
:param sync_words:
:param preamble_starts:
:return:
"""
# If there should be varying sync word lengths we need to return an array of sync lengths per message
assert all(len(sync_word) == len(sync_words[0]) for sync_word in sync_words)
byte_sync_words = [bytes(map(int, sync_word)) for sync_word in sync_words]
result = np.zeros(len(self.bitvectors), dtype=np.uint32)
for i, bitvector in enumerate(self.bitvectors):
preamble_lengths = []
bits = bitvector.tobytes()
for sync_word in byte_sync_words:
sync_start = bits.find(sync_word)
if sync_start != -1:
if sync_start - preamble_starts[i] >= 2:
preamble_lengths.append(sync_start - preamble_starts[i])
# Consider case where sync word starts with preamble pattern
sync_start = bits.find(sync_word, sync_start + 1, sync_start + 2 * len(sync_word))
if sync_start != -1:
if sync_start - preamble_starts[i] >= 2:
preamble_lengths.append(sync_start - preamble_starts[i])
preamble_lengths.sort()
if len(preamble_lengths) == 0:
result[i] = 0
elif len(preamble_lengths) == 1:
result[i] = preamble_lengths[0]
else:
# consider all indices not more than one byte before first one
preamble_lengths = list(filter(lambda x: x < preamble_lengths[0] + 7, preamble_lengths))
# take the smallest preamble_length, but prefer a greater one if it is divisible by 8 (or 4)
preamble_length = next((pl for pl in preamble_lengths if pl % 8 == 0), None)
if preamble_length is None:
preamble_length = next((pl for pl in preamble_lengths if pl % 4 == 0), None)
if preamble_length is None:
preamble_length = preamble_lengths[0]
result[i] = preamble_length
return result
def find_possible_syncs(self, raw_preamble_positions=None):
difference_matrix = self.get_difference_matrix()
if raw_preamble_positions is None:
raw_preamble_positions = self.get_raw_preamble_positions()
return self.determine_sync_candidates(raw_preamble_positions, difference_matrix, n_gram_length=4)
@staticmethod
def merge_possible_sync_words(possible_sync_words: dict, n_gram_length: int):
"""
Merge possible sync words by looking for common prefixes
:param possible_sync_words: dict of possible sync words and their frequencies
:return:
"""
result = defaultdict(int)
if len(possible_sync_words) < 2:
return possible_sync_words.copy()
for sync1, sync2 in itertools.combinations(possible_sync_words, 2):
common_prefix = os.path.commonprefix([sync1, sync2])
if len(common_prefix) > n_gram_length:
result[common_prefix] += possible_sync_words[sync1] + possible_sync_words[sync2]
else:
result[sync1] += possible_sync_words[sync1]
result[sync2] += possible_sync_words[sync2]
return result
def determine_sync_candidates(self,
raw_preamble_positions: np.ndarray,
difference_matrix: np.ndarray,
n_gram_length=4) -> list:
possible_sync_words = awre_util.find_possible_sync_words(difference_matrix, raw_preamble_positions,
self.bitvectors, n_gram_length)
self.__debug("Possible sync words", possible_sync_words)
if len(possible_sync_words) == 0:
return []
possible_sync_words = self.merge_possible_sync_words(possible_sync_words, n_gram_length)
self.__debug("Merged sync words", possible_sync_words)
scores = self.__score_sync_lengths(possible_sync_words)
sorted_scores = sorted(scores, reverse=True, key=scores.get)
estimated_sync_length = sorted_scores[0]
if estimated_sync_length % 8 != 0:
for other in filter(lambda x: 0 < estimated_sync_length-x < 7, sorted_scores):
if other % 8 == 0:
estimated_sync_length = other
break
# Now we look at all possible sync words with this length
sync_words = {word: frequency for word, frequency in possible_sync_words.items()
if len(word) == estimated_sync_length}
self.__debug("Sync words", sync_words)
additional_syncs = self.__find_additional_sync_words(estimated_sync_length, sync_words, possible_sync_words)
if additional_syncs:
self.__debug("Found additional sync words", additional_syncs)
sync_words.update(additional_syncs)
result = []
for sync_word in sorted(sync_words, key=sync_words.get, reverse=True):
# Convert bytes back to string
result.append("".join(str(c) for c in sync_word))
return result
def __find_additional_sync_words(self, sync_length: int, present_sync_words, possible_sync_words) -> dict:
"""
Look for additional sync words, in case we had varying preamble lengths and multiple sync words
(see test_with_three_syncs_different_preamble_lengths for an example)
:param sync_length:
:type present_sync_words: dict
:type possible_sync_words: dict
:return:
"""
np_syn = [np.fromiter(map(int, sync_word), dtype=np.uint8, count=len(sync_word))
for sync_word in present_sync_words]
messages_without_sync = [i for i, bv in enumerate(self.bitvectors)
if not any(awre_util.find_occurrences(bv, s, return_after_first=True) for s in np_syn)]
result = dict()
if len(messages_without_sync) == 0:
return result
# Is there another sync word that applies to all messages without sync?
additional_candidates = {word: score for word, score in possible_sync_words.items()
if len(word) > sync_length and not any(s in word for s in present_sync_words)}
for sync in sorted(additional_candidates, key=additional_candidates.get, reverse=True):
if len(messages_without_sync) == 0:
break
score = additional_candidates[sync]
s = sync[:sync_length]
np_s = np.fromiter(s, dtype=np.uint8, count=len(s))
matching = [i for i in messages_without_sync
if awre_util.find_occurrences(self.bitvectors[i], np_s, return_after_first=True)]
if matching:
result[s] = score
for m in matching:
messages_without_sync.remove(m)
return result
def get_raw_preamble_positions(self) -> np.ndarray:
"""
Return a 2D numpy array where first column is the start of preamble
second and third columns are lower and upper bound for preamble length by message, respectively
"""
result = np.zeros((len(self.bitvectors), 3), dtype=np.uint32)
for i, bitvector in enumerate(self.bitvectors):
if i in self.existing_message_types:
preamble_label = self.existing_message_types[i].get_first_label_with_type(FieldType.Function.PREAMBLE)
else:
preamble_label = None
if preamble_label is None:
start, lower, upper = awre_util.get_raw_preamble_position(bitvector)
else:
# If this message is already labeled with a preamble we just use it's values
start, lower, upper = preamble_label.start, preamble_label.end, preamble_label.end
result[i, 0] = start
result[i, 1] = lower - start
result[i, 2] = upper - start
return result
def get_difference_matrix(self) -> np.ndarray:
"""
Return a matrix of the first difference index between all messages
:return:
"""
return awre_util.get_difference_matrix(self.bitvectors)
def __score_sync_lengths(self, possible_sync_words: dict):
sync_lengths = defaultdict(int)
for sync_word, score in possible_sync_words.items():
sync_lengths[len(sync_word)] += score
self.__debug("Sync lengths", sync_lengths)
return sync_lengths
def __get_existing_sync_words(self) -> list:
result = []
for i, bitvector in enumerate(self.bitvectors):
if i in self.existing_message_types:
sync_label = self.existing_message_types[i].get_first_label_with_type(FieldType.Function.SYNC)
else:
sync_label = None
if sync_label is not None:
result.append("".join(map(str, bitvector[sync_label.start:sync_label.end])))
return result
def __debug(self, *args):
if self._DEBUG_:
print("[PREPROCESSOR]", *args)
@staticmethod
def get_next_multiple_of_n(number: int, n: int):
return n * int(math.ceil(number / n))
@staticmethod
def lower_multiple_of_n(number: int, n: int):
return n * int(math.floor(number / n))
@staticmethod
def get_next_lower_multiple_of_two(number: int):
return number if number % 2 == 0 else number - 1
|
What are the attributes of Creativity?
An artist puts their whole self into their work.
Taboos - mostly cultural. Societal norms affect the way artists think. Examples: Rules about where art should be, playfulness is for children, problems are serious, humor is out the question.
|
from django.db import models
from django.contrib.auth.models import User
from django.contrib import admin
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin as AuthUserAdmin
from django.http import HttpRequest
class Member(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE)
netid = models.IntegerField(default=0)
role = models.CharField(max_length=200)
major = models.CharField(max_length=200)
points = models.IntegerField(default=0)
def __str__(self):
return self.user.get_full_name()
class Team(models.Model):
title = models.CharField(max_length=200)
lead = models.ForeignKey(User, related_name="team_lead",null=True, on_delete=models.SET_NULL)
members = models.ManyToManyField(User, related_name='team_members', blank=True)
def __str__(self):
return self.title
class Event(models.Model):
team = models.ForeignKey(Team,on_delete=models.CASCADE)
title = models.CharField(max_length=200)
location = models.CharField(max_length=200)
dateTime = models.DateTimeField()
description = models.CharField(max_length=200)
members = models.ManyToManyField(User, related_name='members', blank=True)
def __str__(self):
return self.title
class TeamAdmin(admin.ModelAdmin):
model=Team
filter_vertical = ('members',)
class EventAdmin(admin.ModelAdmin):
model=Event
filter_vertical = ('members',)
class TeamUserInline(admin.StackedInline):
model = Team.members.through
max_num = 1
class MemberInline(admin.StackedInline):
model=Member
max_num=1
class UserAdmin(AuthUserAdmin):
inlines=(MemberInline, TeamUserInline)
def get_inline_instances(self, request, obj=None):
if not obj:
return list()
return super().get_inline_instances(request, obj)
|
Nintendo surprised more than a few people when they said the new Zelda would be the only playable game they bring to E3 2016. Whether or not that was the best approach for them, or overall, will be the subject of many thinkpieces for some time to come, I imagine. But one thing“s for sure: impressions of The Legend of Zelda: Breath of the Wild are highly sought after by show goers and series fans alike.
of the game, and y“all definitely liked the initial trailer too. Here on the west coast -- well -- the line completely filled up in six minutes. It took six hours of rigorous standing for me to finally reach the collective experience of Nintendo“s booth. And gosh, is it ever a sight to behold. But, I“m here to talk about how the game plays, not the journey it took for me to get there, right?
I“ll get to that. But I think it“s important that I first address a series of prejudices I have, so you know what perspective I“m coming from. I rarely couple hands-on previews with caveats, but I feel like I was and still am in the minority of people -- who reacted to Tuesday morning“s big reveal with trepidation, not unrivaled glee.
Here“s the thing: the Internet affectionately referred to Breath of the Wild as “The Legend of Zelda: Skyrim†before we learned a lot about it. However, I“ve never once played Skyrim, or ever completed a single Western-developed open-world “sandbox game†like what inspired Breath of the Wild because -- almost everything about how open-world RPGs work has consistently intimidated, overwhelmed, and ultimately shunned me as a player. They don“t respect my time, or my level of anxiety.
The basic philosophy of modernizing the first The Legend of Zelda on NES to create a seamless, living world to explore forever is not why I play Zelda games, and not something I was attracted to from the onset. I waited for as long as I did not because of unrivaled excitement, but because of unrivaled worry.
The Legend of Zelda, as a franchise, is something I can usually turn to in order to help introduce non-gaming folks to how great games can be. I can pick any title from the entire series and heartily recommend it to anyone who“s never played a video game before in their lives. Because of how they instruct the player from the beginning and teach them how the basic controls work each time -- any given Zelda title is simple to learn, but hard to master.
The reason the first Zelda worked so well as an introduction to gaming was because... well... the NES controller had two buttons, and the world -- while ultimately open for exploration and map-drawing -- was pretty tiny and manageable. This one has a billion buttons and things to do -- and just the area you could explore in the E3 demo was but a tiny spec of dust in the grand scheme of things. This is definitely the first Zelda in thirty years to leave newcomers completely in the dust. Its lack of direction and ultra-focused realism is terrifying to me, in ways that most modern open-world games are. It didn“t sit well with me in the reveal, and it still doesn“t sit well with me after I“ve played the demo. So it goes.
And that“s where my perspective comes from. I don“t play these games for the same reasons most of my Twitter feed seems to. I enjoy the sense of exploration and figuring stuff out that most of Link“s adventures provide, but there“s always been a certain degree of linearity to tell me where to go when I“m done. If the guiding hand that leads you forward isn“t specific enough -- it could lead to folks getting lost in this hugely vast Hyrule, where literally everything you see is a place you can go.
I think there are two types of Zelda players: the ones who enjoy the more 2D, Link to the Past-style Zeldas where both the world and narrative are small, manageable, and enjoyable -- and those who absolutely pine for "The Legend of Zelda: Skyrim" to be a reality. As you probably gather by now, I“m in the former camp. And there are many people who are like-minded here; I“m not on an island. Plenty of the 3D entries have provided a perfect balance of linearity and complexity. But gosh, if it“s not too careful, Breath of the Wild could leave this type of person behind -- leave me behind.
The collective “experience†in Nintendo“s booth is the summation of two demos. They gave me fifteen minutes of being dropped in a world with no direction or place to go, so I could just explore and see what happens. Then, I got to play from the very beginning of the game, where Link wakes up and first begins his new adventure. Now that my prejudices are out of the way -- I“m just going to tell you what happened during each of my sessions, not necessarily how I feel about them (yet).
Despite my fears -- there is something immensely satisfying about taking a Bokoblin“s club and beating his friends with it. Everything you“ve seen from Treehouse Live is as fun as it seems. The enemies are more alive than we“ve ever previously seen in a Zelda game. The sounds you make will tip them off. They“ll summon their friends and make your life really difficult, really fast. You“ve got to micromanage even the tiniest bits of exploration you do if you“re not confident about your combat skills... because there were no hearts to be seen in the demo, only food to find and eat.
The standard skull-type enemies that used to haunt the nights in Ocarina of Time can now be chopped apart, and they summon the rest of their body and put themselves back together if you fail to destroy the head. You“ve got to make sure you completely eradicate your foes if you don“t want an overwhelming situation. Breath of the Wild is definitely not going to be “too easy†-- far from it.
I wandered the earth for a bit, and didn“t really discover anything too noteworthy. Collected a few materials, dispatched a few foes, scaled a cliff or two. One thing about the basic gameplay, for those who haven“t really paid much attention to all the streams: the systems first introduced in Skyward Sword, like stamina and weapon durability, are back. You“ve got to keep every single aspect of Link“s health in mind if you wanna survive for longer than five minutes. In previous games, falling from a cliff might lose you a heart or two. In this one -- if you scale to the top of a super-high cliff, then lose your footing because you run out of stamina -- you“ll die. It“s a big bad world to explore -- the big is evident, but the mercilessness didn“t really sink in for me until I played the demo.
So yeah, back to my wandering: I was minding my own business, chucking bombs at things because I wanted to see how satisfying the explosions were and I honestly felt like some of the simpler weapons I picked up didn“t get the job done (especially when it came to destroying the heads of those dang persistent skeletons). And then, a gigantic rock titan boss appeared. My peers playing the demo around me didn“t find that, so I all of a sudden had an audience -- and I didn“t have the means to kill him since I“d wasted all my bombs! It was an opportunity lost, as my “Exploration Demo†ended.
The second demo started you off at the very beginning of the game. A practically naked Link wakes up after being submerged in water to find himself in a deserted temple. You find some clothes in a few chests and can choose whether to put them on or not. I used the Sheikah Slate to find my way to the outside world -- and with very little words exchanged, the title appeared on the top right corner as Link ran to the edge of the cliff, as seen in the initial trailer.
It“s extremely reminiscent of NES Zelda -- seeing that in action will delight series fans in every way; that can“t be overstated. You even get to follow an old man to a cave, like in the first game. He seemed pretty indignant, and he scolded me (at first) when I snatched an apple from the stick he was roasting on an open flame. If every NPC reacts the same way the old man did, I can surmise that this Zelda will have just as memorable characters as ones that came before it, despite being heavily inspired by a game whose narrative was ultimately held back by hardware constraints.
What little story I did see gives me the impression that the narrative could end up being relatively solid. I was a little worried they might “phone in†the story, after hearing things like “you can skip right to the end, if you wantâ€. But it seems like the story“s there if you“re willing to follow the game“s lead -- it“s not necessarily something you“ll have to dig out, like some quests in Xenoblade Chronicles and games of the same ilk. That“s definitely comforting to me, since narrative is always an important part of my “personal†Zelda experience. Rest easy if we“re in the same boat.
Here“s the thing about following the game“s lead, though. I got lost, right from the beginning. As soon as you discover the Temple of Time (that“s noticeable, and the game points you towards it from the onset), the guiding voice tells you to “follow the Sheikah Tabletâ€, which marks an objective spot on your map. There were two objective spots marked on my Gamepad -- one, I assume, was to continue the narrative, and the other must have led to something else -- or would have.
I worked my way over to the first marked spot on the map, which led me to a mountain with a curious structure poking on top of it. I inspected the poked out structure, looking for a way to interact with it. And when I found nothing, I gave up and went to the other marked spot on my map, assuming my objective was there instead. The person working the booth had to tell me where to go, and when I went back towards the poking structure, I saw the giant cave underneath the mountain that I“d climbed from the other side before.
I“m not dumb -- I“ve played every single game in the series. The objective point of “follow the Shekiah Tablet†wasn“t specific enough. I missed my mark, and wasted what precious little time I had with the demo wandering aimlessly back and forth. Without more specific directions for folks who don“t wish to wander -- it could leave many feeling like their time“s been wasted. I know I was sad, and I kind of wanted a do-over. But that“s the way the ball bounces.
Something like that can be an easy fix during localization, though. “Follow the Shekiah Tablet... to the cave†gives you something to look out for, as you explore. It“s not too late for them to consider changing something like that, so the folks who approach this brand new kind of Zelda scared out of their darned minds can feel a little more at ease when they know exactly what it is they“re looking for.
I“ve fully outlined how aware I am that The Legend of Zelda: Breath of the Wild and I don“t agree philosophically, at all. I know I“m an outlier when it comes to how my impressions read overall. But I hope I“ve clearly articulated my point of view, after all this.
The new Zelda terrifies me, and as a result of that -- I got way lost and squandered away my limited demo time with the beginning of the game. If the development team (and particularly localization) doesn“t work extra hard to provide a much better sense of signposting to the objectives at hand, it could sour someone“s experience of what the game is trying to accomplish.
I know the game is trying to articulate a sense of harsh realism to make Hyrule feel more alive than ever before. But the objectives in a Zelda game should be crystal clear, so that folks who prefer to take this gigantic experience in more manageable chunks don“t get lost and waste time along the way.
That“s the end of my experience. If you“ve got something to say or questions to ask, I“m more than happy to hear you out. Please, please share your thoughts below.
In case you didn“t know, The Legend of Zelda: Breath of the Wild is slated for release on Wii U and “NX†sometime in 2017. We“ll offer more information as it comes.
|
from django.db import models
from xbrowse_server.base.models import Project, Individual
from xbrowse.core import genomeloc
class BreakpointFile(models.Model):
project = models.ForeignKey(Project, blank=True)
file_path = models.CharField(max_length=500, default="", blank=True)
class Meta:
db_table="base_breakpointfile"
class Breakpoint(models.Model):
project = models.ForeignKey(Project, null=False)
individual = models.ForeignKey(Individual, null=False)
xpos = models.BigIntegerField(db_index=True)
# depth cscore partner genes cdsdist
obs = models.IntegerField(db_index=True)
sample_count = models.IntegerField(db_index=True)
consensus = models.FloatField()
partner = models.TextField(blank=True, null=True)
class Meta:
db_table="base_breakpoint"
def toList(self):
genes = [{ 'gene' : bg.gene_symbol, 'cds_dist': bg.cds_dist } for bg in self.breakpointgene_set.all()]
chr,pos = genomeloc.get_chr_pos(self.xpos)
return [
self.xpos,
chr,
pos,
self.obs,
self.sample_count,
self.consensus,
self.partner,
self.individual.indiv_id,
genes,
]
def toDict(self):
genes = [{ 'gene' : bg.gene_symbol, 'cds_dist': bg.cds_dist } for bg in self.breakpointgene_set.all()]
chr,pos = genomeloc.get_chr_pos(self.xpos)
return {
'xpos' : self.xpos,
'chr' : chr,
'pos' : pos,
'obs' : self.obs,
'sample_count' : self.sample_count,
'consensus' : self.consensus,
'indiv_id' : self.individual.indiv_id,
'genes' : genes,
}
class BreakpointMetaData(models.Model):
breakpoint = models.ForeignKey(Breakpoint, null=False)
type = models.TextField(blank=True, default="")
tags = models.TextField(blank=True, default="")
class Meta:
db_table="base_breakpointmetadata"
def toDict(self):
return {
'breakpoint_id' : self.breakpoint.xpos,
'type' : self.type,
'tags' : self.tags
}
class BreakpointGene(models.Model):
breakpoint = models.ForeignKey(Breakpoint, null=False)
gene_symbol = models.CharField(db_index=True,max_length=20) # HGNC symbol
cds_dist = models.IntegerField()
class Meta:
db_table="base_breakpointgene"
|
[22:23] - Flight KAC538 is ready to depart HESH with a final destination of OKBK.
[00:45] - Landed at -192fpm. With 4580kgs of fuel onboard and a pitch angle of 2deg.
[00:47] - Arrived safely at OKBK. We hope you enjoyed your flight!
[00:48] - Arrived safely at OKBK. We hope you enjoyed your flight!
|
import sys
sys.path.append('../..')
from pyramid.config import Configurator
from pyramid.session import UnencryptedCookieSessionFactoryConfig
from sqlalchemy import engine_from_config
from social.apps.pyramid_app.models import init_social
from .models import DBSession, Base
def main(global_config, **settings):
"""This function returns a Pyramid WSGI application."""
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
session_factory = UnencryptedCookieSessionFactoryConfig('thisisasecret')
config = Configurator(settings=settings,
session_factory=session_factory,
autocommit=True)
config.include('pyramid_chameleon')
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_request_method('example.auth.get_user', 'user', reify=True)
config.add_route('home', '/')
config.add_route('done', '/done')
config.include('example.settings')
config.include('example.local_settings')
config.include('social.apps.pyramid_app')
init_social(config, Base, DBSession)
config.scan()
config.scan('social.apps.pyramid_app')
return config.make_wsgi_app()
|
Welcome to the archive (everything up to 2016) for ‘Our Daily Bread’; Monolith Cocktail’s weekly bulletin for reviews, news and notable releases.
001 – 130: Vieux Farka Toure to The Magic Lantern.
131 – 202: The Magic Lantern to Sonic Dispatches From Traditional Mali.
|
# -*- coding: utf-8 -*-
from duralex.AbstractVisitor import AbstractVisitor
from . import template
from duralex.alinea_parser import *
import gitlab
class AddGitLabIssueVisitor(AbstractVisitor):
def __init__(self, args):
self.gitlab = gitlab.Gitlab('https://gitlab.com', args.gitlab_token)
self.repo_name = args.gitlab_repository
self.repo = self.gitlab.projects.get(self.repo_name)
self.issues = self.repo.issues.list(state='opened')
self.current_issue_number = -1
self.current_issue_link = None
super(AddGitLabIssueVisitor, self).__init__()
def visit_edit_node(self, node, post):
if post:
return
node['gitlabIssue'] = self.current_issue_link
node['commitMessage'] = template.template_string('gitlab/commit_message.j2', {'edit': node})
def visit_node(self, node):
if 'type' in node and node['type'] == 'article':
title = template.template_string('gitlab/issue_title.j2', {'article': node})
description = template.template_string('gitlab/issue_description.j2', {'article': node})
found = False
for issue in self.issues:
if issue.title == title:
found = True
self.current_issue_number = issue.iid
if issue.description != description:
issue.save(title=title, description=description)
if not found:
issue = self.gitlab.project_issues.create(
{
'title': title,
'description': description
},
project_id=self.repo.id
)
self.current_issue_number = issue.iid
self.current_issue_link = 'https://gitlab.com/' + self.repo_name + '/issues/' + str(self.current_issue_number)
node['gitlabIssue'] = self.current_issue_link
super(AddGitLabIssueVisitor, self).visit_node(node)
|
Wage and employment benefits requirements by political subdivisions; restrictions.
(a) “Employee” means any natural person who is entitled under state or federal law to receive a state or federal minimum wage.
(b) “Employer” means any person who is required under state or federal law to pay a state or federal minimum wage to the person’s employees.
(c) “Employer contracting to provide goods or services for the political subdivision” means a person contracting with the political subdivision to provide goods or services to, for the benefit of, or on behalf of, the political subdivision in exchange for valuable consideration, and includes a person leasing or subleasing real property owned by the political subdivision.
(d) “Employment benefits” means anything of value that an employee may receive from an employer in addition to wages and salary. The term includes, but is not limited to, health benefits; disability benefits; death benefits; group accidental death and dismemberment benefits; paid or unpaid days off for holidays, sick leave, vacation, and personal necessity; retirement benefits; and profit-sharing benefits.
(e) “Federal minimum wage” means a minimum wage required under federal law, including the federal Fair Labor Standards Act of 1938, as amended, 29 U.S.C. ss. 201 et seq.
(f) “Political subdivision” means a county, municipality, department, commission, district, board, or other public body, whether corporate or otherwise, created by or under state law.
(g) “Wage” means that compensation for employment to which any state or federal minimum wage applies.
(2) Except as otherwise provided in subsection (3), a political subdivision may not establish, mandate, or otherwise require an employer to pay a minimum wage, other than a state or federal minimum wage, to apply a state or federal minimum wage to wages exempt from a state or federal minimum wage, or to provide employment benefits not otherwise required by state or federal law.
3. For the employees of an employer receiving a direct tax abatement or subsidy from the political subdivision, as a condition of the direct tax abatement or subsidy.
(b) Apply to a domestic violence or sexual abuse ordinance, order, rule, or policy adopted by a political subdivision.
(4) If it is determined by the officer or agency responsible for distributing federal funds to a political subdivision that compliance with this act would prevent receipt of those federal funds, or would otherwise be inconsistent with federal requirements pertaining to such funds, then this act does not apply, but only to the extent necessary to allow receipt of the federal funds or to eliminate the inconsistency with such federal requirements.
(5) This section does not prohibit a federally authorized and recognized tribal government from requiring employment benefits for a person employed within a territory over which the tribe has jurisdiction.
History.—s. 1, ch. 2003-87; s. 1, ch. 2013-200; s. 5, ch. 2015-3; s. 4, ch. 2015-98.
|
"""
Django settings for locallibrary project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = '2!o+4zkrcvvwhj65wph4bb=dkloys+l5br)m8^ih_xp52^1^6i'
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '2!o+4zkrcvvwhj65wph4bb=dkloys+l5br)m8^ih_xp52^1^6i')
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
DEBUG = bool(os.environ.get('DJANGO_DEBUG', True))
ALLOWED_HOSTS = ['secret-reef-21077.herokuapp.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalog.apps.CatalogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'locallibrary.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['./templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'locallibrary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
During our snowbird season traveling the South West Desert areas this year I have been hard at work on my pet project developing an RVing Community website called Love Your RV Forum. In the last while I have been improving the look of the site with an upgraded theme and adding many cool new features to enhance the member’s experience. One nice feature of the site is it is mobile friendly so you can access it on a smart phone or tablet and it looks and operates nicely on the smaller screen size. I’ve also recently installed some new photo / file upload and viewing capabilities plus a points (which we call “Cheers”) and member rank system.
There are a couple hundred members now and growing. My goal is to make it like a virtual happy hour where we gather to chat, make friends and share the adventure of the RV lifestyle. The site has a lot of great features. My aim from day one has been to make a social website just for RVers, easy to use but at the same time a powerful tool to share and communicate with each other online. Take a look at this video I just uploaded to YouTube giving you an overview of the site and what you can do on it.
If you’re new to RVing this is a terrific place to meet and mingle with others who love the RV life. I invite everyone of whatever level of RVing knowledge to join up, even if you don’t even have an RV yet. Hopefully we more experienced veteran RVers can share our knowledge and help out the newbies out there. Registration is quick, free and simple and you can easily delete your account at any time if it is not for you.
|
from __future__ import absolute_import
import six
from . import backend as K
from .utils.generic_utils import serialize_keras_object
from .utils.generic_utils import deserialize_keras_object
class Regularizer(object):
"""Regularizer base class.
"""
def __call__(self, x):
return 0.
@classmethod
def from_config(cls, config):
return cls(**config)
class L1L2(Regularizer):
"""Regularizer for L1 and L2 regularization.
# Arguments
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
"""
def __init__(self, l1=0., l2=0.):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
def __call__(self, x):
regularization = 0.
if self.l1:
regularization += K.sum(self.l1 * K.abs(x))
if self.l2:
regularization += K.sum(self.l2 * K.square(x))
return regularization
def get_config(self):
return {'l1': float(self.l1),
'l2': float(self.l2)}
# Aliases.
def l1(l=0.01):
return L1L2(l1=l)
def l2(l=0.01):
return L1L2(l2=l)
def l1_l2(l1=0.01, l2=0.01):
return L1L2(l1=l1, l2=l2)
def serialize(regularizer):
return serialize_keras_object(regularizer)
def deserialize(config, custom_objects=None):
return deserialize_keras_object(config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='regularizer')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret regularizer identifier:',
identifier)
|
Can anyone suggest a chiral nucleophile so this prochiral eta-3 complex becomes chiral? how would it attach?
Which complex, can you draw it and post the structure?
sorry forgot to click upload!
How about an amino acid? Cysteine.
could i use a grignard reagent? how would it add to the complex?
its got to be chiral, i need to modify the eta structure, should be a simple chiral nucleophile to add, any other ideas?
How about a chiral boron reagent?
that could work, would it add to one side of the multiple bonds leaving the other side a double bond to the other ph group? would the metal un-complex?
It may do that I'm not sure. You have an allylic double bond so I assume it would add normally.
I have no idea if it would un-complex the metal. If the C=C is gone then it may do.
It could indeed un-complex to leave a double bond. A simple chiral nucleophile would be a modified Lithium diisopropylamide (LDA) Just modify it so you form a chiral carbon on one of the alkyl parts. The good thing about using Nitrogen as the bonding point is that you need not worry about having two chiral centers next to one another as nitrogen cannot form a chiral centre. Reacting a chiral nucleophile with a PROCHIRAL electrophilic multihapto complex would yield a pair of epimers.
|
from pyelt.datalayers.dwh import Dwh
__author__ = 'hvreenen'
# class FieldTransformationType():
# INLINE = 'INLINE' #type: str
# FUNCTION = 'FUNCTION'#type: str
#
# class FunctionLanguage():
# PLSQL = 'PLSQL'#type: str
# PLPYTHON = 'PLPYTHON'#type: str
class FieldTransformation():
def __init__(self, name: str = '', sql: str = '') -> None:
self.name = name #type: str
self.field_name = 'id' #type: str
self.table = ''
self.descr = '' #type: str
self.filter = '' #type: str
# self.type = FieldTransformationType.INLINE #type: str
self.steps = {} #type: Dict[int, FieldTransformStep]
if sql:
# self.parse_sql(sql)
step = self.new_step(sql)
def get_table(self):
return self.table
def parse_sql(self, sql: str):
pos_start = sql.find('(')
pos_end = sql.rfind(')')
func_name = sql[:pos_start]
func_inner = sql[pos_start + 1:pos_end]
step = self.new_step(func_inner)
self.steps[step.sort_order] = step
def new_step(self, sql: str) -> 'FieldTransformStep':
step = FieldTransformStep(sql=sql)
step.sort_order = len(self.steps) + 1
self.steps[step.sort_order] = step
return step
def get_sql(self, alias: str='')->str:
sql = ''
index = 0
steps = sorted(self.steps.values(), key = lambda x: x.sort_order)
for step in steps:
# step_sql = step.sql
step_sql = step.get_sql(alias)
step_sql = step_sql.replace(self.field_name, "{fld}")
if (index > 0):
if '{fld}' in step_sql:
sql = step_sql.replace("{fld}", sql)
else:
sql = step_sql.replace("{step" + str(index) + "}", sql)
else:
sql = step_sql
sql = sql.replace("{fld}", self.field_name)
index += 1
return sql
def __repr__(self):
return self.get_sql('')
# def create_function_at_db(self, dwh: 'Dwh') -> None:
# #todo afmaken
# params = {} #type: Dict[str, str]
# sql = """CREATE OR REPLACE FUNCTION {schema}.{name}({params})
# RETURNS {return_type} AS
# $BODY$
# {body}
# $BODY$
# LANGUAGE {lang} VOLATILE;""".format(**params)
# dwh.execute(sql)
class FieldTransformStep(FieldTransformation):
def __init__(self, sortorder: int = 0, name: str = '', sql: str = '') -> None:
FieldTransformation.__init__(self, name)
self.sql = sql
self.sort_order = sortorder
# self.parse_sql(sql)
def parse_sql(self, sql: str) -> None:
func_name = ''
func_params = []
pos_start = sql.find('(')
pos_end = sql.rfind(')')
func_name = sql[:pos_start]
func_params_sql = sql[pos_start + 1:pos_end]
func_param_names = func_params_sql.split(',')
for func_param_name in func_param_names:
if not func_param_name: continue
func_param = FuncParam(func_param_name.strip())
func_params.append(func_param)
self.func_name = func_name
self.func_params = func_params
def get_sql(self, alias: str='') -> str:
return self.sql
# func_params_sql = ''
# for func_param in self.func_params:
# if func_param.is_db_field and alias:
# func_params_sql += '{}.{}, '.format(alias, func_param)
# else:
# func_params_sql += '{}, '.format(func_param)
# func_params_sql = func_params_sql[:-2]
# sql = "{}({})".format(self.func_name, func_params_sql)
# return sql
# class FuncParam():
# def __init__(self, name: str = '') -> None:
# self.name = name #type: str
# self.is_db_field = "'" not in name #type: bool
# if self.is_digit(name):
# self.is_db_field = False
#
#
# def __str__(self) -> str:
# return self.name
#
# def is_digit(self, s: str) -> bool:
# try:
# f = float(s)
# return True
# except:
# return False
class Lookup(FieldTransformation):
def __init__(self, name, dict={}, sor=None):
super().__init__(name=name)
self.new_step("(select ref_code_doel from {}.{} where ref_code_doel = '{}')".format(sor, name, '{fld}'))
def get_ddl(self):
sql = """
CREATE TABLE {}.{}_ref_mappings
(ref_code_bron text,
ref_code_doel text)
""".format(self.name)
def get_etl(self):
values = ''
for code, descr in self.dict.items():
values += "('{}', '{}'),\r\n".format(code, descr)
values = values[:-3]
params = {}
params['values'] = values
sql = """
CREATE TEMP TABLE {sor}.{name}_ref_mappings_temp
(ref_code_bron text,
ref_code_doel text);
INSERT INTO {sor}.{name}_ref_mappings_temp (ref_code_bron, ref_code_doel)
VALUES {values};
INSERT INTO {sor}.{name}_ref_mappings (ref_code_bron, ref_code_doel)
SELECT ref_code_bron, ref_code_doel
FROM {sor}.{name}_ref_mappings_temp
WHERE NOT EXISTS (SELECT 1 FROM {sor}.{name}_ref_mappings maps WHERE maps.naam = '{ref_type}');
DROP TABLE _ref_values_temp;
""".format(**params)
|
Do you need to see a doctor? There are many everyday situations that could lead to your needing to see a doctor.
see a doctor at your local community service centre (CLSC).
get the list of medical clinics in your territory.
see a doctor at your CLSC’s youth clinic.
You’re looking for a family doctor?
You’re registered with your CLSC’s in-home support services?
|
# -*- coding: utf-8 -*-
import inspect
from django.db import models
from django.contrib.contenttypes import generic
from django.utils.translation import ugettext_lazy as _
from fuzzy_modeling.utils import get_class_by_python_path, get_choices_from_python_path_listing
from fuzzy_modeling.models.norms import NormModel
from fuzzy_modeling.models.utils import PyFuzzyMixin
from fuzzy_modeling.models.parameters import ParameterModel
class DefuzzifyModel(models.Model, PyFuzzyMixin):
"""
A Fuzzy defuzzify base model
"""
class Meta:
app_label = 'fuzzy_modeling'
DEFUZZIFY_CHOICES = get_choices_from_python_path_listing(
'fuzzy.defuzzify',
ignores=['Base', ]
)
# (
# ('fuzzy.defuzzify.COG.COG', _("COG")),
# ('fuzzy.defuzzify.Dict.Dict', _("Dict")),
# ('fuzzy.defuzzify.COGS.COGS', _("COGS")),
# ('fuzzy.defuzzify.LM.LM', _("LM")),
# ('fuzzy.defuzzify.MaxLeft.MaxLeft', _("MaxLeft")),
# ('fuzzy.defuzzify.MaxRight.MaxRight', _("MaxRight")),
# ('fuzzy.defuzzify.RM.RM', _("RM")),
# )
defuzzify = models.CharField(
_("Defuzzify"),
choices=DEFUZZIFY_CHOICES,
max_length=250,
blank=False, null=False,
default=DEFUZZIFY_CHOICES[0][0]
)
inf = models.ForeignKey(NormModel, related_name="defuzzify_inf_set", blank=True, null=True)
acc = models.ForeignKey(NormModel, related_name="defuzzify_acc_set", blank=True, null=True)
parameters = generic.GenericRelation(ParameterModel)
def get_pyfuzzy(self):
"""
Return the Pyfuzzy class of this model
"""
DefuzzifyClass = get_class_by_python_path(self.defuzzify)
inf = self.inf.get_pyfuzzy() if self.inf else None
acc = self.acc.get_pyfuzzy() if self.acc else None
# parameters =
parameters_dict = {
'INF': inf,
'ACC': acc
}
for p in self.parameters.all():
if p.name != 'INF' and p.name != 'ACC':
parameters_dict[p.name] = p.get_value()
defuzzify = DefuzzifyClass(**parameters_dict)
return defuzzify
@classmethod
def from_pyfuzzy(cls, pyfuzzy):
"""
Return the model representation of an instance of the pyfuzzy attr
"""
defuzz_model = cls()
defuzzify = 'fuzzy.defuzzify.%s.%s' % (
pyfuzzy.__class__.__name__,
pyfuzzy.__class__.__name__
)
defuzz_model.defuzzify = defuzzify
# INF
inf_model = None
if pyfuzzy.INF:
inf_model = cls.inf.field.related.parent_model.from_pyfuzzy(pyfuzzy.INF)
defuzz_model.inf = inf_model
# ACC
acc_model = None
if pyfuzzy.ACC:
acc_model = cls.acc.field.related.parent_model.from_pyfuzzy(pyfuzzy.ACC)
defuzz_model.acc = acc_model
defuzz_model.save()
# parameters
for arg in inspect.getargspec(pyfuzzy.__init__).args:
if arg != 'self':
arg_value = getattr(pyfuzzy, arg)
if arg_value is not None:
arg_type = ParameterModel.get_type_from_python_type(arg_value)
defuzz_model.parameters.create(
name=arg,
value=arg_value,
value_type=arg_type
)
defuzz_model.save()
return defuzz_model
def __unicode__(self):
return self.get_defuzzify_display()
|
A shirt that works for a straight-hipped body, regardless of gender. Or, worn oversized, it's just the kind of button-up that Chika and Cat turn to season after season, year after year. Organic cotton.
|
# Copyright (c) 2015 Carlos Valiente
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Utilities for working with Vagrant environments."""
import os
import logging
import subprocess
__all__ = [
"destroy",
"up",
]
class up(object):
"""Context manager that brings up a Vagrant environment on start.
"""
log = logging.getLogger("vagrant")
def __init__(self, dname=None):
"""Constructor.
Parameters:
dname
Path to the directory containing the Vagrantfile. Defaults to
the current working directory if not given.
"""
self._dname = os.getcwd() if dname is None else dname
self._vagrantfile = os.path.join(self._dname, "Vagrantfile")
if not os.access(self._vagrantfile, os.F_OK):
raise Exception("%s: Not found" % (self._vagrantfile,))
self._hosts = None
def __enter__(self):
for (host, status) in self._status():
if status != "running":
self._vagrant("up", host)
return self
def __exit__(self, *exc_info):
pass
@property
def hosts(self):
"""Tuple of Vagrant nodes in this Vagrant environment.
"""
if self._hosts is None:
self._hosts = []
for line in self._vagrant("status --machine-readable"):
bits = line.split(",")
if bits[2] == "state":
self._hosts.append(bits[1])
self._hosts = tuple(self._hosts)
return self._hosts
def provision(self):
"""Provisions all nodes in this Vagrant environment.
"""
return self._vagrant("provision")
def ssh(self, node, cmd):
"""Executes the given command in the given hosts.
Raises an error if the return code of ``vagrant ssh`` is non-zero.
Returns a list containing the output of ``vagrant ssh`` (both stdout
and stderr).
"""
return self._vagrant('ssh -c "%s"' % (cmd,), node)
def _status(self):
ret = []
for line in self._vagrant("status --machine-readable"):
bits = line.split(",")
if bits[2] == "state":
ret.append((bits[1], bits[3]))
if self._hosts is None:
self._hosts = tuple(h for (h, _) in ret)
return ret
def _vagrant(self, *args):
cmdline = ["vagrant"]
cmdline.extend(args)
cmdline = " ".join(cmdline)
self.log.debug("Executing: %s", cmdline)
p = subprocess.Popen(cmdline,
shell=True,
cwd=self._dname,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, _ = p.communicate()
if p.returncode:
raise Exception(stdout)
return stdout.strip().split("\n")
LOG = logging.getLogger("vagrant")
def destroy(dname=None):
"""Destroys the Vagrant environment.
Arguments:
dname
Path to the directory containing the Vagrantfile. Defaults to the
current working directory if not given.
"""
dname = os.getcwd() if dname is None else dname
vagrantfile = os.path.join(dname, "Vagrantfile")
if not os.access(vagrantfile, os.F_OK):
raise Exception("%s: Not found" % (vagrantfile,))
cmdline = "vagrant destroy --force"
LOG.debug("Executing: %s", cmdline)
p = subprocess.Popen(cmdline,
shell=True,
cwd=dname,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, _ = p.communicate()
if p.returncode:
raise Exception(stdout)
LOG.debug(stdout)
|
Neil Simon's play The Good Doctor will be performed at 8 p.m. Friday, Feb. 9 and Saturday, Feb. 10, and at 2 p.m. Sunday, Feb. 11, in the Gannett Theater, 305 College St. Tickets are $6 and $3 for students and seniors.
Robinson Players to stage Neil Simon's "Chapter Two"
The Robinson Players, a Bates College theater group, will stage four performances of Pulitzer Prize-winner Neil Simon's comic drama "Chapter Two" Thursday, Oct. 15, at 8 p.m.; Friday, Oct. 16, at 8 p.m.; Saturday, Oct. 17, at 2 p.m.; and Sunday, Oct. 18, at 2 p.m. Directed by Jonathan Adler, a Bates junior from Newton, Mass., all performances of "Chapter Two" will be in Gannett Theater. The public is invited to attend, and general admission is $5.
|
from django.contrib.postgres.fields import HStoreField
from django.db import models
class Venue(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
address = models.TextField(blank=True)
map_address = models.CharField(blank=True, max_length=255)
seat_map_dimensions = HStoreField(null=False, default=dict)
def __str__(self):
return self.name
class SeatingGroup(models.Model):
venue = models.ForeignKey(Venue, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
def active_pricing_model(self, timestamp=None):
return self.pricingmodel_set.active(timestamp).filter(seating_group_id=self.id).first()
class Seat(models.Model):
group = models.ForeignKey(SeatingGroup, on_delete=models.CASCADE)
name = models.CharField(max_length=40, help_text='Till exempel "Rad 17, Stol 5011"')
x_pos = models.IntegerField()
y_pos = models.IntegerField()
def __str__(self):
return self.name
def price_for_type(self, ticket_type, timestamp=None):
return self.group.active_pricing_model(timestamp).price_for(ticket_type)
|
Entertainment in Knighton includes Childrens Entertainer funny Comedians a top quality range of Tribute Acts and superb Bands throughout the UK. Costello Entertainments is a well established, experienced and customer focused Entertainment Agency supplying Entertainers in Knighton . For Weddings, Birthday Parties, Christenings or Corporate Events we provide a comprehensive Party Entertainment Hire Service for all types of Entertainment in Knighton .
|
#!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
def linearPowerLimitedRocket_dist( t, F, ve=10000, m0=1.0 ):
A = m0*ve
B = np.log( F*t - A )
return F*ve*( t/F + A*B/F**2 ) - t*ve*B
def getAccFT( F=None, ve=10000, P=1e+9, m0=1.0, fmass=0.1 ):
if F is None:
F = P/ve ;print "F[N]" , F
tend = (1-fmass)*ve*m0/F ;print "tend[s]", tend
return tend, F
def linearPowerLimitedRocket( t, ve=10000, P=1e+9, m0=1.0 ):
#F = P/ve ;print "F[N]" , F
#tend = m0*ve*(fmass - 1)/(F*fmass) ;print "tend[s]", tend
#tend = (1-fmass)*ve*m0/F ;print "tend[s]", tend
tend, F = getAccFT( ve=ve, P=P, m0=m0 )
a = F/( m0 -F*t/ve ) #;primt "a[G]", a/9.81
v0 = ve*np.log( m0*ve )
v = -ve*np.log( np.abs( m0*ve - F*t ) ) + v0
#s = ( ve*t + t*v - v*(m0*ve/F) )
s = ve*t + v * ( t - m0*ve/F )
#s = linearPowerLimitedRocket_dist( t, F, ve=ve, m0=m0 )
return s,v,a
P = 10e+9
ve = 10e+3
fmass = 0.1
m0 = 1.0
tend, F = getAccFT( ve=ve, P=P, m0=m0, fmass=fmass )
ts = np.linspace(0,tend,1000)
s,v,a = linearPowerLimitedRocket( ts, ve=ve, P=P, m0=m0 )
plt.figure( figsize=(5,9) )
plt.subplot(3,1,1); plt.plot( ts, a ); plt.ylabel('a'); plt.xlabel('t[s]'); plt.grid()
plt.axvline( tend, ls="--")
plt.subplot(3,1,2); plt.plot( ts, v ); plt.ylabel('v [m/s]'); plt.xlabel('t[s]') ; plt.grid()
plt.axvline( tend, ls="--")
plt.subplot(3,1,3); plt.plot( ts, s ); plt.ylabel('s [m] '); plt.xlabel('t[s]') ; plt.grid()
plt.show()
|
View list Coral Springs Open Houses Ne Calgary on Thursday, April 18th 12:21am new weekend open houses events, schedules. Search 16 active Coral Springs real estate listings, Open Houses, REALTORS® with Coral Springs real estate statistics, news, maps & homes Ne Calgary. Currently active properties in this subdivision average price $486,828.94 highest $879,000.00. Property types in Coral Springs Calgary may include Attached homes, affordable Detached Homes, Luxury Homes plus Condos, Townhomes, Rowhouses, For Sale By Owner, Lake Homes including any Bank owned Foreclosures.
Coral Springs Open Houses market statistics: As of Thursday, April 18th 12:21am Search new Coral Springs Open Houses including surround areas of Northwest Calgary, North Calgary , North East , Inner City Calgary , East Calgary plus new Calgary Open Houses.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import xbmc
import re
import resources.lib.utils as utils
import xbmcaddon
import HTMLParser
import xbmcvfs
addonID = 'plugin.video.ardmediathek_de'
addon = xbmcaddon.Addon(id=addonID)
subFile = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('profile')+'/sub.srt').decode('utf-8')
baseUrl = "http://www.ardmediathek.de"
coloredSubtitles = addon.getSetting("coloredSubtitles") == "true"
def setSubtitle(uri,offset=0):
if offset != 0:
print offset
print baseUrl+uri
if uri.startswith('/subtitle'):
_newSubtitle(baseUrl+uri)
else:
_oldSubtitle(baseUrl+uri)
def _newSubtitle(url):
#if os.path.exists(subFile):
# os.remove(subFile)
if xbmcvfs.exists(subFile):
xbmcvfs.delete(subFile)
try:
content = utils.getUrl(url)
except:
content = ""
if content:
dict = _stylesSetup(re.compile('<tt:styling>(.+?)</tt:styling>', re.DOTALL).findall(content)[0])
div = re.compile('<tt:div.+?>(.+?)</tt:div>', re.DOTALL).findall(content)[0]
p = re.compile('<tt:p(.+?)</tt:p>', re.DOTALL).findall(div)
i = 1
buffer = ''
for part in p:
if '<tt:span' in part:
part = part.replace('begin="1','begin="0').replace('end="1','end="0').replace('\n','').replace('<tt:br/>','\n')
begin = re.compile('begin="(.+?)"').findall(part)[0]
begin = begin.replace(".",",")[:-1]
end = re.compile('end="(.+?)"').findall(part)[0]
end = end.replace(".",",")[:-1]
s = part.split('>')[0]
part = part.replace(s+'>','')
if 'style=' in s:
style = re.compile('style="(.+?)"').findall(s)[0]
if dict[style]:
part = '<font color="'+dict[style]+'">'+part+'</font>'
match = re.compile('<(.+?)>').findall(part)
for entry in match:
if entry.startswith('tt:span'):
if 'style' in entry:
style = re.compile('style="(.+?)"').findall(entry)[0]
part = part.replace('<'+entry+'>','<font color="'+dict[style]+'">')
else:
part = part.replace('<'+entry+'>','')
elif entry.startswith('tt:/span'):
part = part.replace('</tt:span>','</font>')
else:
part = part.replace('<'+entry+'>','')
buffer += str(i) + '\n'
buffer += begin+" --> "+end+"\n"
buffer += part + '\n\n'
i+=1
f = xbmcvfs.File(subFile, 'w')
f.write(buffer)
f.close()
xbmc.sleep(1000)
xbmc.Player().setSubtitles(subFile)
def _oldSubtitle(url):
if os.path.exists(subFile):
os.remove(subFile)
try:
content = utils.getUrl(url)
except:
content = ""
if content:
dict = _stylesSetup(re.compile('<styling>(.+?)</styling>', re.DOTALL).findall(content)[0])
matchLine = re.compile('<p id=".+?" begin="1(.+?)" end="1(.+?)".+?style="(.+?)">(.+?)</p>', re.DOTALL).findall(content)
#fh = open(subFile, 'a')
f = xbmcvfs.File(subFile, 'w')
count = 1
for begin, end, style, line in matchLine:
begin = "0"+begin.replace(".",",")[:-1]
end = "0"+end.replace(".",",")[:-1]
text = ''
line = line.replace('\n','').strip()
line = line.replace("<br />","\n")
if dict[style]:
line = '<font color="'+dict[style]+'">'+line+'</font>'
s = line.split('<')
for entry in s:
if entry.startswith('span'):
if 'tts:color' in entry.split('>')[0]:
color = re.compile('tts:color="(.+?)"', re.DOTALL).findall(entry.split('>')[0])[0]
line = line.replace('<'+entry.split('>')[0]+'>','<font color="'+color+'">')
line = line.replace('</span>','</font>')
while ' ' in line:
line = line.replace(' ',' ')
line = line.replace(' \n','\n').replace(' </font>\n','</font>\n')
#fh.write(str(count)+"\n"+begin+" --> "+end+"\n"+_cleanTitle(line)+"\n\n")
f.write(str(count)+"\n"+begin+" --> "+end+"\n"+_cleanTitle(line)+"\n\n")
count+=1
f.close()
xbmc.sleep(1000)
xbmc.Player().setSubtitles(subFile)
"""
def _oldSubtitle(url):
if os.path.exists(subFile):
os.remove(subFile)
try:
content = utils.getUrl(url)
except:
content = ""
if content:
dict = _stylesSetup(re.compile('<styling>(.+?)</styling>', re.DOTALL).findall(content)[0])
matchLine = re.compile('<p id=".+?" begin="1(.+?)" end="1(.+?)".+?style="(.+?)">(.+?)</p>', re.DOTALL).findall(content)
fh = open(subFile, 'a')
count = 1
for begin, end, style, line in matchLine:
begin = "0"+begin.replace(".",",")[:-1]
end = "0"+end.replace(".",",")[:-1]
match = re.compile('<span(.+?)>', re.DOTALL).findall(line)
for span in match:
line = line.replace("<span"+span+">","")
line = line.replace("<br />","\n").replace("</span>","").strip()
if dict[style]:
line = '<font color="'+dict[style]+'">'+line+'</font>'
fh.write(str(count)+"\n"+begin+" --> "+end+"\n"+_cleanTitle(line)+"\n\n")
count+=1
fh.close()
xbmc.sleep(1000)
xbmc.Player().setSubtitles(subFile)
"""
def _stylesSetup(styles):
dict = {}
styles = styles.replace('tt:','').replace('xml:','')
match_styles = re.compile('<style(.+?)>', re.DOTALL).findall(styles)
for style in match_styles:
id = re.compile('id="(.+?)"', re.DOTALL).findall(style)[0]
if 'color=' in style and coloredSubtitles:
color = re.compile('color="(.+?)"', re.DOTALL).findall(style)[0]
else:
color = False
dict[id] = color
return dict
def _cleanTitle(title,html=True):
if html:
title = HTMLParser.HTMLParser().unescape(title)
return title.encode("utf-8")
else:
title = title.replace("<", "<").replace(">", ">").replace("&", "&").replace(""", "\"").replace("'", "'").replace(""", "\"").replace("ß", "ß").replace("–", "-")
title = title.replace("Ä", "Ä").replace("Ü", "Ü").replace("Ö", "Ö").replace("ä", "ä").replace("ü", "ü").replace("ö", "ö").replace("é", "é").replace("è", "è")
title = title.replace("Ä","Ä").replace("ä","ä").replace("Ö","Ö").replace("ö","ö").replace("Ü","Ü").replace("ü","ü").replace("ß","ß")
title = title.replace("'","'").strip()
return title
|
KUALA LUMPUR, Tan Sri Muhyiddin Yassin hopes that the Johor state administration will run smoothly and focus will be on efforts to develop the state, following the transition of leadership.
The Parti Pribumi Bersatu Malaysia (Bersatu) president also thanked the Sultan of Johor Sultan Ibrahim Sultan Iskandar for giving his consent on the appointment of Bukit Kepong assemblyman, Dr Sahruddin Jamal as the new Menteri Besar of Johor, as proposed by the Pakatan Harapan (PH) leadership.
“I am grateful that with Allah’s blessings the Pakatan Harapan leaders at the state and central levels reached a consensus to allow a smooth transition of leadership,” he said in a statement today.
Dr Sahruddin was sworn in as the 17th Menteri Besar of Johor at Istana Bukit Serene in Johor Bahru this morning. He replaces Datuk Osman Sapian.
In JOHOR BAHRU, Johor Bersatu media director Mohd Solihan Badri in congratulating the appointment of Dr Sahruddin thanked Bersatu central leadership as well as PH component parties’ leaders for reaching a consensus on the Johor menteri besar post.
“Johor Bersatu has high expectation and is confident of Dr Sahruddin’s ability to lead Johor to become a developed state and a Pakatan stronghold,” he said in a statement here today.
According to the Tenang state assemblyman, Bersatu leaders at the division level should give their support to Dr Sahruddin in performing his duties and strengthening the party.
Meanwhile, Education Minister Dr Maszlee Malik in his Facebook posting today congratulated Dr Sahruddin on his appointment.
Maszlee also hoped that Johor would remain developed and prosperous under the leadership of the new menteri besar.
Johor UMNO Youth chief Mohd Hairi Mad Shah in expressing his wishes on the occasion hoped Dr Sahruddin would do a better job in improving the wellbeing of the people.
At the same time he reminded that UMNO would act as an agent of check and balance in ensuring that the people’s voices are heard.
Meanwhile, Dr Sahruddin’s mother Isniah Kasmongin, 72, when met at her home in Kampung Batu 28, Lenga said, she never expected her eighth child to be appointed as menteri besar.
Isniah said as a mother she was proud of her son’s achievement, as her late husband Jamal Kasran had always wanted to see his children succeed as leaders.
“He (Dr Sahruddin) called to inform me on his appointment yesterday. We were so overjoyed that some of his siblings started crying.
“I am happy and grateful.I hope he will administer the state efficiently and with integrity, she added.
Johor DAP chairman Liew Chin Tong in expressing support on Dr Sahruddin’s appointment said the party would give the new menteri besar their full cooperation to ensure progress and well-being of the people are taken care of.
On the reshuffle of the state executive council Liew said it was up to the Menteri Besar to decide.
When asked whether DAP had submitted a list of names, he said: “I have not been informed as the Menteri Besar has yet to determine what he wants. We will just wait and see,” he said.
Meanwhile Johor PKR deputy chairman Jimmy Puah Wee Tse said the party would cooperate fully with Dr Sahruddin in promoting economic growth for the sake of the people.
“As we sat together in the state executive council for 11 months, I can see that he (Dr Sahruddin) is competent and takes his responsibilities seriously. I have confidence in his ability to lead the Johor government,” he said.
|
# -*- coding: utf-8 -*-
class LazyJIT(object):
this = None
def __init__(self, decorator, f, *args, **kwargs):
self.f = f
self.args = args
self.kwargs = kwargs
self.decorator = decorator
def __call__(self, *args, **kwargs):
if self.this is None:
try:
mod = __import__('numba', fromlist=[self.decorator])
d = getattr(mod, self.decorator)
self.this = d(*self.args, **self.kwargs)(self.f)
except ImportError, e:
self.this = self.f
return getattr(self.this, '__call__')(*args, **kwargs)
def jit(signature, **kwargs):
if not isinstance(signature, (str, unicode)):
raise ValueError('First argument should be signature')
def _(f):
return LazyJIT('jit', f, signature, **kwargs)
return _
def autojit(*args, **kwargs):
if len(args) ==1 and not kwargs and callable(args[0]):
f = args[0]
return LazyJIT('autojit', f)
else:
def _(f):
return LazyJIT('autojit', f, *args, **kwargs)
return _
|
60 procedures not the 90 average.
retainer is left indefinitely in the mouth?
Did you miss this code?
(typical fee $150) on the seat date.
43% of the crowns & crown retainers.
Varnish applications, regardless of caries risk.
your only choices are D1206 & D1208.
type of fluoride for all patients!
Evaluation for new patients at all?
& has a higher UCR allowance.
as diabetes, smoking or heart disease.
counts while your pallative (D9110) counts are low?
lower while pallative (D9110) counts should be higher.
should be close to zero.
a full crown & any root remains.
• Must be a ceramic crown only!
the facial and/or lingual surfaces.
reported when 2nd molars have erupted?
• If so, lots of money is left on the table!
an erupted tooth & use (D7140) for all extractions?
removed or the tooth was sectioned.
annually when two are generally payable?
patients once a year, then once a year is OK.
surfaces (D2335) – MIFL or DIFL?
D2355 it’s a tip off.
dentists preparation of the tooth.
(associated with osseous surgery and natural teeth)?
fee allowed, if controlled by a PPO plan.
• This generally results in a lower write-off!
are really doing an implant supported denture?
• This generally results in lower write-off.
which is a better benefit.
(D6056 or D6067) is involved.
generally done by most offices.
time with the proper narrative.
• Tooth whitening is a per arch code!
This leaving money on the table!
• May pay some of the time!
• This can ONLY be used for thumb sucking!
rate is on a steady rise due to HPV.
• This is done once per year.
Purchase the new CDT-2015 Book today!
Will There Be Tears In Heaven?
|
"""
Contains functions with extract triples from external resources.
"""
from __future__ import with_statement
import functools
import urlparse
import lxml
import pytz
import rdflib
from django.conf import settings
from humfrey.utils.namespaces import NS
__all__ = ['extractors']
class NoLinkFoundError(Exception):
pass
class InvalidPingback(Exception):
def __init__(self, reason):
self.reason = reason
def _extract_from_rdf(graph, response, filename, source, target, format):
pass
def _extract_from_html(graph, response, filename, source, target):
try:
html = lxml.etree.parse(response, parser=lxml.etree.HTMLParser())
except:
raise InvalidPingback('invalid-html')
url = response['content-location']
for anchor in html.xpath(".//a"):
href = urlparse.urlparse(urlparse.urljoin(url, anchor.get('href')))
if not href[2]:
href = href[:2] + ('/',) + href[3:]
href = urlparse.urlunparse(href)
if href == target:
break
else:
raise NoLinkFoundError
title = html.xpath('.//head/title')
if title and title[0].text:
graph.add((rdflib.URIRef(url), NS.dcterms['title'], rdflib.Literal(title[0].text)))
extractors = {'application/rdf+xml': functools.partial(_extract_from_rdf, format='xml'),
'text/n3': functools.partial(_extract_from_rdf, format='n3'),
'text/turtle': functools.partial(_extract_from_rdf, format='n3'),
'text/plain': functools.partial(_extract_from_rdf, format='nt'),
'application/xhtml+xml': _extract_from_html,
'text/html': _extract_from_html,
}
def extract(pingback, response):
content_type = response.get('content-type', '').split(';')[0].lower()
graph = rdflib.ConjunctiveGraph()
graph_name = pingback.graph_name
date = lambda x: rdflib.Literal(pytz.timezone(settings.TIME_ZONE).localize(x))
url = response['content-location']
uri = rdflib.URIRef(url)
graph += (
(uri, NS.sioc.links_to, rdflib.URIRef(pingback.target)),
(graph_name, NS.dcterms.created, date(pingback.created)),
(graph_name, NS.dcterms.modified, date(pingback.updated)),
(graph_name, NS.dcterms.source, uri),
(graph_name, NS.void.inDataset, settings.PINGBACK_DATASET),
(graph_name, NS.dcterms['title'], rdflib.Literal(u'Pingback from %s to %s' % (unicode(pingback.source), unicode(pingback.target)))),
)
try:
extractor = extractors[content_type]
except KeyError:
raise InvalidPingback('unexpected-media-type')
try:
extractor(graph, response, pingback.source, pingback.target)
except NoLinkFoundError:
raise InvalidPingback('no-link-found')
return graph
|
ICLG.com > Firms > Snell & Wilmer L.L.P.
Founded in 1938, Snell & Wilmer is a full-service law firm with more than 425 attorneys practising in 12 locations throughout the United States and in Mexico. The firm represents clients ranging from large, publicly traded corporations to small businesses, individuals and entrepreneurs.
Our environmental and natural resources attorneys advise clients on a wide variety of environmental permitting and compliance issues. We assist with negotiation of environmental liability and oversight of due diligence for commercial transactions. Our litigation team represents clients in federal, state and local environmental enforcement actions. Due to our location in the Southwestern United States, we frequently address public land issues and permits which involve the National Environmental Policy Act, the California Environmental Quality Act and associated environmental impact statements. Our team can also advise clients regarding contaminated property and brownfield development including drafting and negotiating prospective purchaser agreements, voluntary clean-up agreements, institutional controls, deed and land use restrictions.
For more information, visit www.swlaw.com.
|
"""
Tests for assetstore using any of the modulestores for metadata. May extend to testing the storage options
too.
"""
import unittest
from datetime import datetime, timedelta
import pytest
import ddt
import pytz
import six
from django.test import TestCase
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import CourseLocator
from six.moves import range, zip
from openedx.core.lib.tests import attr
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore import IncorrectlySortedList, ModuleStoreEnum, SortedAssetList
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.utils import (
MIXED_MODULESTORE_BOTH_SETUP,
MODULESTORE_SETUPS,
MixedModulestoreBuilder,
XmlModulestoreBuilder
)
class AssetStoreTestData(object):
"""
Shared data for constructing test assets.
"""
now = datetime.now(pytz.utc)
user_id = 144
if six.PY2:
user_id_long = long(user_id) # lint-amnesty, pylint: disable=undefined-variable
else:
user_id_long = int(user_id)
user_email = "me@example.com"
asset_fields = (
AssetMetadata.ASSET_BASENAME_ATTR, 'internal_name', 'pathname', 'locked',
'edited_by', 'edited_by_email', 'edited_on', 'created_by', 'created_by_email', 'created_on',
'curr_version', 'prev_version'
)
all_asset_data = (
('pic1.jpg', 'EKMND332DDBK', 'pix/archive', False,
user_id_long, user_email, now + timedelta(seconds=10 * 1), user_id_long, user_email, now, '14', '13'),
('shout.ogg', 'KFMDONSKF39K', 'sounds', True,
user_id, user_email, now + timedelta(seconds=10 * 2), user_id, user_email, now, '1', None),
('code.tgz', 'ZZB2333YBDMW', 'exercises/14', False,
user_id * 2, user_email, now + timedelta(seconds=10 * 3), user_id * 2, user_email, now, 'AB', 'AA'),
('dog.png', 'PUPY4242X', 'pictures/animals', True,
user_id_long * 3, user_email, now + timedelta(seconds=10 * 4), user_id_long * 3, user_email, now, '5', '4'),
('not_here.txt', 'JJJCCC747', '/dev/null', False,
user_id * 4, user_email, now + timedelta(seconds=10 * 5), user_id * 4, user_email, now, '50', '49'),
('asset.txt', 'JJJCCC747858', '/dev/null', False,
user_id * 4, user_email, now + timedelta(seconds=10 * 6), user_id * 4, user_email, now, '50', '49'),
('roman_history.pdf', 'JASDUNSADK', 'texts/italy', True,
user_id * 7, user_email, now + timedelta(seconds=10 * 7), user_id * 7, user_email, now, '1.1', '1.01'),
('weather_patterns.bmp', '928SJXX2EB', 'science', False,
user_id * 8, user_email, now + timedelta(seconds=10 * 8), user_id * 8, user_email, now, '52', '51'),
('demo.swf', 'DFDFGGGG14', 'demos/easy', False,
user_id * 9, user_email, now + timedelta(seconds=10 * 9), user_id * 9, user_email, now, '5', '4'),
)
class TestSortedAssetList(unittest.TestCase):
"""
Tests the SortedAssetList class.
"""
def setUp(self):
super(TestSortedAssetList, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
asset_list = [dict(list(zip(AssetStoreTestData.asset_fields, asset))) for asset in AssetStoreTestData.all_asset_data] # lint-amnesty, pylint: disable=line-too-long
self.sorted_asset_list_by_filename = SortedAssetList(iterable=asset_list)
self.sorted_asset_list_by_last_edit = SortedAssetList(iterable=asset_list, key=lambda x: x['edited_on'])
self.course_key = CourseLocator('org', 'course', 'run')
def test_exception_on_bad_sort(self):
asset_key = self.course_key.make_asset_key('asset', 'pic1.jpg')
with pytest.raises(IncorrectlySortedList):
__ = self.sorted_asset_list_by_last_edit.find(asset_key)
def test_find(self):
asset_key = self.course_key.make_asset_key('asset', 'asset.txt')
assert self.sorted_asset_list_by_filename.find(asset_key) == 0
asset_key_last = self.course_key.make_asset_key('asset', 'weather_patterns.bmp')
assert self.sorted_asset_list_by_filename.find(asset_key_last) == (len(AssetStoreTestData.all_asset_data) - 1)
@attr('mongo')
@ddt.ddt
class TestMongoAssetMetadataStorage(TestCase):
"""
Tests for storing/querying course asset metadata.
"""
XML_MODULESTORE_MAP = {
'XML_MODULESTORE_BUILDER': XmlModulestoreBuilder(),
'MIXED_MODULESTORE_BUILDER': MixedModulestoreBuilder([('xml', XmlModulestoreBuilder())])
}
def setUp(self):
super(TestMongoAssetMetadataStorage, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.addTypeEqualityFunc(datetime, self._compare_datetimes)
self.addTypeEqualityFunc(AssetMetadata, self._compare_metadata)
self.differents = (('different', 'burn.jpg'),)
self.vrmls = (
('vrml', 'olympus_mons.vrml'),
('vrml', 'ponte_vecchio.vrml'),
)
self.regular_assets = (('asset', 'zippy.png'),)
self.alls = self.differents + self.vrmls + self.regular_assets
def _compare_metadata(self, mdata1, mdata2, msg=None):
"""
So we can use the below date comparison
"""
if type(mdata1) != type(mdata2): # lint-amnesty, pylint: disable=unidiomatic-typecheck
self.fail(self._formatMessage(msg, u"{} is not same type as {}".format(mdata1, mdata2)))
for attr in mdata1.ATTRS_ALLOWED_TO_UPDATE: # lint-amnesty, pylint: disable=redefined-outer-name
assert getattr(mdata1, attr) == getattr(mdata2, attr), msg
def _compare_datetimes(self, datetime1, datetime2, msg=None):
"""
Don't compare microseconds as mongo doesn't encode below milliseconds
"""
if not timedelta(seconds=-1) < datetime1 - datetime2 < timedelta(seconds=1):
self.fail(self._formatMessage(msg, u"{} != {}".format(datetime1, datetime2)))
def _make_asset_metadata(self, asset_loc):
"""
Make a single test asset metadata.
"""
now = datetime.now(pytz.utc)
return AssetMetadata(
asset_loc, internal_name='EKMND332DDBK',
pathname='pictures/historical', contenttype='image/jpeg',
locked=False, fields={'md5': '77631ca4f0e08419b70726a447333ab6'},
edited_by=ModuleStoreEnum.UserID.test, edited_on=now,
created_by=ModuleStoreEnum.UserID.test, created_on=now,
curr_version='v1.0', prev_version='v0.95'
)
def _make_asset_thumbnail_metadata(self, asset_md):
"""
Add thumbnail to the asset_md
"""
asset_md.thumbnail = 'ABC39XJUDN2'
return asset_md
def setup_assets(self, course1_key, course2_key, store=None):
"""
Setup assets. Save in store if given
"""
for i, asset in enumerate(AssetStoreTestData.all_asset_data):
asset_dict = dict(list(zip(AssetStoreTestData.asset_fields[1:], asset[1:])))
if i in (0, 1) and course1_key:
asset_key = course1_key.make_asset_key('asset', asset[0])
asset_md = AssetMetadata(asset_key, **asset_dict)
if store is not None:
store.save_asset_metadata(asset_md, asset[4])
elif course2_key:
asset_key = course2_key.make_asset_key('asset', asset[0])
asset_md = AssetMetadata(asset_key, **asset_dict)
# Don't save assets 5 and 6.
if store is not None and i not in (4, 5):
store.save_asset_metadata(asset_md, asset[4])
@ddt.data(*MODULESTORE_SETUPS)
def test_save_one_and_confirm(self, storebuilder):
"""
Save the metadata in each store and retrieve it singularly, as all assets, and after deleting all.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
asset_filename = 'burnside.jpg'
new_asset_loc = course.id.make_asset_key('asset', asset_filename)
# Save the asset's metadata.
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
# Find the asset's metadata and confirm it's the same.
found_asset_md = store.find_asset_metadata(new_asset_loc)
assert found_asset_md is not None
assert new_asset_md == found_asset_md
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 1
@ddt.data(*MODULESTORE_SETUPS)
def test_delete(self, storebuilder):
"""
Delete non-existent and existent metadata
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
# Attempt to delete an asset that doesn't exist.
assert store.delete_asset_metadata(new_asset_loc, ModuleStoreEnum.UserID.test) == 0
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 0
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
assert store.delete_asset_metadata(new_asset_loc, ModuleStoreEnum.UserID.test) == 1
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 0
@ddt.data(*MODULESTORE_SETUPS)
def test_find_non_existing_assets(self, storebuilder):
"""
Find a non-existent asset in an existing course.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
# Find existing asset metadata.
asset_md = store.find_asset_metadata(new_asset_loc)
assert asset_md is None
@ddt.data(*MODULESTORE_SETUPS)
def test_get_all_non_existing_assets(self, storebuilder):
"""
Get all assets in an existing course when no assets exist.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
# Find existing asset metadata.
asset_md = store.get_all_asset_metadata(course.id, 'asset')
assert asset_md == []
@ddt.data(*MODULESTORE_SETUPS)
def test_find_assets_in_non_existent_course(self, storebuilder):
"""
Find asset metadata from a non-existent course.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
fake_course_id = CourseKey.from_string("{}nothere/{}nothere/{}nothere".format(
course.id.org, course.id.course, course.id.run
))
new_asset_loc = fake_course_id.make_asset_key('asset', 'burnside.jpg')
# Find asset metadata from non-existent course.
with pytest.raises(ItemNotFoundError):
store.find_asset_metadata(new_asset_loc)
with pytest.raises(ItemNotFoundError):
store.get_all_asset_metadata(fake_course_id, 'asset')
@ddt.data(*MODULESTORE_SETUPS)
def test_add_same_asset_twice(self, storebuilder):
"""
Add an asset's metadata, then add it again.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
# Add asset metadata.
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 1
# Add *the same* asset metadata.
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
# Still one here?
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 1
@ddt.data(*MODULESTORE_SETUPS)
def test_different_asset_types(self, storebuilder):
"""
Test saving assets with other asset types.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('vrml', 'pyramid.vrml')
new_asset_md = self._make_asset_metadata(new_asset_loc)
# Add asset metadata.
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
assert len(store.get_all_asset_metadata(course.id, 'vrml')) == 1
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 0
@ddt.data(*MODULESTORE_SETUPS)
def test_asset_types_with_other_field_names(self, storebuilder):
"""
Test saving assets using an asset type of 'course_id'.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('course_id', 'just_to_see_if_it_still_works.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
# Add asset metadata.
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
assert len(store.get_all_asset_metadata(course.id, 'course_id')) == 1
assert len(store.get_all_asset_metadata(course.id, 'asset')) == 0
all_assets = store.get_all_asset_metadata(course.id, 'course_id')
assert all_assets[0].asset_id.path == new_asset_loc.path
@ddt.data(*MODULESTORE_SETUPS)
def test_lock_unlock_assets(self, storebuilder):
"""
Save multiple metadata in each store and retrieve it singularly, as all assets, and after deleting all.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
locked_state = new_asset_md.locked
# Flip the course asset's locked status.
store.set_asset_metadata_attr(new_asset_loc, "locked", not locked_state, ModuleStoreEnum.UserID.test)
# Find the same course and check its locked status.
updated_asset_md = store.find_asset_metadata(new_asset_loc)
assert updated_asset_md is not None
assert updated_asset_md.locked == (not locked_state)
# Now flip it back.
store.set_asset_metadata_attr(new_asset_loc, "locked", locked_state, ModuleStoreEnum.UserID.test)
reupdated_asset_md = store.find_asset_metadata(new_asset_loc)
assert reupdated_asset_md is not None
assert reupdated_asset_md.locked == locked_state
ALLOWED_ATTRS = (
('pathname', '/new/path'),
('internal_name', 'new_filename.txt'),
('locked', True),
('contenttype', 'image/png'),
('thumbnail', 'new_filename_thumb.jpg'),
('fields', {'md5': '5346682d948cc3f683635b6918f9b3d0'}),
('curr_version', 'v1.01'),
('prev_version', 'v1.0'),
('edited_by', 'Mork'),
('edited_on', datetime(1969, 1, 1, tzinfo=pytz.utc)),
)
DISALLOWED_ATTRS = (
('asset_id', 'IAmBogus'),
('created_by', 'Smith'),
('created_on', datetime.now(pytz.utc)),
)
UNKNOWN_ATTRS = (
('lunch_order', 'burger_and_fries'),
('villain', 'Khan')
)
@ddt.data(*MODULESTORE_SETUPS)
def test_set_all_attrs(self, storebuilder):
"""
Save setting each attr one at a time
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
for attribute, value in self.ALLOWED_ATTRS:
# Set the course asset's attribute.
store.set_asset_metadata_attr(new_asset_loc, attribute, value, ModuleStoreEnum.UserID.test)
# Find the same course asset and check its changed attribute.
updated_asset_md = store.find_asset_metadata(new_asset_loc)
assert updated_asset_md is not None
assert getattr(updated_asset_md, attribute, None) is not None
assert getattr(updated_asset_md, attribute, None) == value
@ddt.data(*MODULESTORE_SETUPS)
def test_set_disallowed_attrs(self, storebuilder):
"""
setting disallowed attrs should fail
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
for attribute, value in self.DISALLOWED_ATTRS:
original_attr_val = getattr(new_asset_md, attribute)
# Set the course asset's attribute.
store.set_asset_metadata_attr(new_asset_loc, attribute, value, ModuleStoreEnum.UserID.test)
# Find the same course and check its changed attribute.
updated_asset_md = store.find_asset_metadata(new_asset_loc)
assert updated_asset_md is not None
assert getattr(updated_asset_md, attribute, None) is not None
# Make sure that the attribute is unchanged from its original value.
assert getattr(updated_asset_md, attribute, None) == original_attr_val
@ddt.data(*MODULESTORE_SETUPS)
def test_set_unknown_attrs(self, storebuilder):
"""
setting unknown attrs should fail
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
new_asset_loc = course.id.make_asset_key('asset', 'burnside.jpg')
new_asset_md = self._make_asset_metadata(new_asset_loc)
store.save_asset_metadata(new_asset_md, ModuleStoreEnum.UserID.test)
for attribute, value in self.UNKNOWN_ATTRS:
# Set the course asset's attribute.
store.set_asset_metadata_attr(new_asset_loc, attribute, value, ModuleStoreEnum.UserID.test)
# Find the same course and check its changed attribute.
updated_asset_md = store.find_asset_metadata(new_asset_loc)
assert updated_asset_md is not None
# Make sure the unknown field was *not* added.
with pytest.raises(AttributeError):
assert getattr(updated_asset_md, attribute) == value
@ddt.data(*MODULESTORE_SETUPS)
def test_save_one_different_asset(self, storebuilder):
"""
saving and deleting things which are not 'asset'
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
asset_key = course.id.make_asset_key('different', 'burn.jpg')
new_asset_thumbnail = self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
)
store.save_asset_metadata(new_asset_thumbnail, ModuleStoreEnum.UserID.test)
assert len(store.get_all_asset_metadata(course.id, 'different')) == 1
assert store.delete_asset_metadata(asset_key, ModuleStoreEnum.UserID.test) == 1
assert len(store.get_all_asset_metadata(course.id, 'different')) == 0
@ddt.data(*MODULESTORE_SETUPS)
def test_find_different(self, storebuilder):
"""
finding things which are of type other than 'asset'
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
asset_key = course.id.make_asset_key('different', 'burn.jpg')
new_asset_thumbnail = self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
)
store.save_asset_metadata(new_asset_thumbnail, ModuleStoreEnum.UserID.test)
assert store.find_asset_metadata(asset_key) is not None
unknown_asset_key = course.id.make_asset_key('different', 'nosuchfile.jpg')
assert store.find_asset_metadata(unknown_asset_key) is None
def _check_asset_values(self, assets, orig):
"""
Check asset type/path values.
"""
for idx, asset in enumerate(orig):
assert assets[idx].asset_id.asset_type == asset[0]
assert assets[idx].asset_id.path == asset[1]
@ddt.data(*MODULESTORE_SETUPS)
def test_get_multiple_types(self, storebuilder):
"""
getting all things which are of type other than 'asset'
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
# Save 'em.
for asset_type, filename in self.alls:
asset_key = course.id.make_asset_key(asset_type, filename)
new_asset = self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
)
store.save_asset_metadata(new_asset, ModuleStoreEnum.UserID.test)
# Check 'em.
for asset_type, asset_list in (
('different', self.differents),
('vrml', self.vrmls),
('asset', self.regular_assets),
):
assets = store.get_all_asset_metadata(course.id, asset_type)
assert len(assets) == len(asset_list)
self._check_asset_values(assets, asset_list)
assert len(store.get_all_asset_metadata(course.id, 'not_here')) == 0
assert len(store.get_all_asset_metadata(course.id, None)) == 4
assets = store.get_all_asset_metadata(
course.id, None, start=0, maxresults=-1,
sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(assets) == len(self.alls)
self._check_asset_values(assets, self.alls)
@ddt.data(*MODULESTORE_SETUPS)
def test_save_metadata_list(self, storebuilder):
"""
Save a list of asset metadata all at once.
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
# Make a list of AssetMetadata objects.
md_list = []
for asset_type, filename in self.alls:
asset_key = course.id.make_asset_key(asset_type, filename)
md_list.append(self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
))
# Save 'em.
store.save_asset_metadata_list(md_list, ModuleStoreEnum.UserID.test)
# Check 'em.
for asset_type, asset_list in (
('different', self.differents),
('vrml', self.vrmls),
('asset', self.regular_assets),
):
assets = store.get_all_asset_metadata(course.id, asset_type)
assert len(assets) == len(asset_list)
self._check_asset_values(assets, asset_list)
assert len(store.get_all_asset_metadata(course.id, 'not_here')) == 0
assert len(store.get_all_asset_metadata(course.id, None)) == 4
assets = store.get_all_asset_metadata(
course.id, None, start=0, maxresults=-1,
sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(assets) == len(self.alls)
self._check_asset_values(assets, self.alls)
@ddt.data(*MODULESTORE_SETUPS)
def test_save_metadata_list_with_mismatched_asset(self, storebuilder):
"""
Save a list of asset metadata all at once - but with one asset's metadata from a different course.
"""
with storebuilder.build() as (__, store):
course1 = CourseFactory.create(modulestore=store)
course2 = CourseFactory.create(modulestore=store)
# Make a list of AssetMetadata objects.
md_list = []
for asset_type, filename in self.alls:
if asset_type == 'asset':
asset_key = course2.id.make_asset_key(asset_type, filename)
else:
asset_key = course1.id.make_asset_key(asset_type, filename)
md_list.append(self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
))
# Save 'em.
store.save_asset_metadata_list(md_list, ModuleStoreEnum.UserID.test)
# Check 'em.
for asset_type, asset_list in (
('different', self.differents),
('vrml', self.vrmls),
):
assets = store.get_all_asset_metadata(course1.id, asset_type)
assert len(assets) == len(asset_list)
self._check_asset_values(assets, asset_list)
assert len(store.get_all_asset_metadata(course1.id, 'asset')) == 0
assert len(store.get_all_asset_metadata(course1.id, None)) == 3
assets = store.get_all_asset_metadata(
course1.id, None, start=0, maxresults=-1,
sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(assets) == len((self.differents + self.vrmls))
self._check_asset_values(assets, self.differents + self.vrmls)
@ddt.data(*MODULESTORE_SETUPS)
def test_delete_all_different_type(self, storebuilder):
"""
deleting all assets of a given but not 'asset' type
"""
with storebuilder.build() as (__, store):
course = CourseFactory.create(modulestore=store)
asset_key = course.id.make_asset_key('different', 'burn_thumb.jpg')
new_asset_thumbnail = self._make_asset_thumbnail_metadata(
self._make_asset_metadata(asset_key)
)
store.save_asset_metadata(new_asset_thumbnail, ModuleStoreEnum.UserID.test)
assert len(store.get_all_asset_metadata(course.id, 'different')) == 1
@ddt.data(*MODULESTORE_SETUPS)
def test_get_all_assets_with_paging(self, storebuilder):
"""
Save multiple metadata in each store and retrieve it singularly, as all assets, and after deleting all.
"""
with storebuilder.build() as (__, store):
course1 = CourseFactory.create(modulestore=store)
course2 = CourseFactory.create(modulestore=store)
self.setup_assets(course1.id, course2.id, store)
expected_sorts_by_2 = (
(
('displayname', ModuleStoreEnum.SortOrder.ascending),
('code.tgz', 'demo.swf', 'dog.png', 'roman_history.pdf', 'weather_patterns.bmp'),
(2, 2, 1)
),
(
('displayname', ModuleStoreEnum.SortOrder.descending),
('weather_patterns.bmp', 'roman_history.pdf', 'dog.png', 'demo.swf', 'code.tgz'),
(2, 2, 1)
),
(
('uploadDate', ModuleStoreEnum.SortOrder.ascending),
('code.tgz', 'dog.png', 'roman_history.pdf', 'weather_patterns.bmp', 'demo.swf'),
(2, 2, 1)
),
(
('uploadDate', ModuleStoreEnum.SortOrder.descending),
('demo.swf', 'weather_patterns.bmp', 'roman_history.pdf', 'dog.png', 'code.tgz'),
(2, 2, 1)
),
)
# First, with paging across all sorts.
for sort_test in expected_sorts_by_2:
for i in range(3):
asset_page = store.get_all_asset_metadata(
course2.id, 'asset', start=2 * i, maxresults=2, sort=sort_test[0]
)
num_expected_results = sort_test[2][i]
expected_filename = sort_test[1][2 * i]
assert len(asset_page) == num_expected_results
assert asset_page[0].asset_id.path == expected_filename
if num_expected_results == 2:
expected_filename = sort_test[1][(2 * i) + 1]
assert asset_page[1].asset_id.path == expected_filename
# Now fetch everything.
asset_page = store.get_all_asset_metadata(
course2.id, 'asset', start=0, sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(asset_page) == 5
assert asset_page[0].asset_id.path == 'code.tgz'
assert asset_page[1].asset_id.path == 'demo.swf'
assert asset_page[2].asset_id.path == 'dog.png'
assert asset_page[3].asset_id.path == 'roman_history.pdf'
assert asset_page[4].asset_id.path == 'weather_patterns.bmp'
# Some odd conditions.
asset_page = store.get_all_asset_metadata(
course2.id, 'asset', start=100, sort=('uploadDate', ModuleStoreEnum.SortOrder.ascending)
)
assert len(asset_page) == 0
asset_page = store.get_all_asset_metadata(
course2.id, 'asset', start=3, maxresults=0,
sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(asset_page) == 0
asset_page = store.get_all_asset_metadata(
course2.id, 'asset', start=3, maxresults=-12345,
sort=('displayname', ModuleStoreEnum.SortOrder.descending)
)
assert len(asset_page) == 2
@ddt.data('XML_MODULESTORE_BUILDER', 'MIXED_MODULESTORE_BUILDER')
def test_xml_not_yet_implemented(self, storebuilderName):
"""
Test coverage which shows that for now xml read operations are not implemented
"""
storebuilder = self.XML_MODULESTORE_MAP[storebuilderName]
with storebuilder.build(contentstore=None) as (__, store):
course_key = store.make_course_key("org", "course", "run")
asset_key = course_key.make_asset_key('asset', 'foo.jpg')
assert store.find_asset_metadata(asset_key) is None
assert store.get_all_asset_metadata(course_key, 'asset') == []
@ddt.data(*MODULESTORE_SETUPS)
def test_copy_all_assets_same_modulestore(self, storebuilder):
"""
Create a course with assets, copy them all to another course in the same modulestore, and check on it.
"""
with storebuilder.build() as (__, store):
course1 = CourseFactory.create(modulestore=store)
course2 = CourseFactory.create(modulestore=store)
self.setup_assets(course1.id, None, store)
assert len(store.get_all_asset_metadata(course1.id, 'asset')) == 2
assert len(store.get_all_asset_metadata(course2.id, 'asset')) == 0
store.copy_all_asset_metadata(course1.id, course2.id, ModuleStoreEnum.UserID.test * 101)
assert len(store.get_all_asset_metadata(course1.id, 'asset')) == 2
all_assets = store.get_all_asset_metadata(
course2.id, 'asset', sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(all_assets) == 2
assert all_assets[0].asset_id.path == 'pic1.jpg'
assert all_assets[1].asset_id.path == 'shout.ogg'
@ddt.data(*MODULESTORE_SETUPS)
def test_copy_all_assets_from_course_with_no_assets(self, storebuilder):
"""
Create a course with *no* assets, and try copy them all to another course in the same modulestore.
"""
with storebuilder.build() as (__, store):
course1 = CourseFactory.create(modulestore=store)
course2 = CourseFactory.create(modulestore=store)
store.copy_all_asset_metadata(course1.id, course2.id, ModuleStoreEnum.UserID.test * 101)
assert len(store.get_all_asset_metadata(course1.id, 'asset')) == 0
assert len(store.get_all_asset_metadata(course2.id, 'asset')) == 0
all_assets = store.get_all_asset_metadata(
course2.id, 'asset', sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(all_assets) == 0
@ddt.data(
('mongo', 'split'),
('split', 'mongo'),
)
@ddt.unpack
def test_copy_all_assets_cross_modulestore(self, from_store, to_store):
"""
Create a course with assets, copy them all to another course in a different modulestore, and check on it.
"""
mixed_builder = MIXED_MODULESTORE_BOTH_SETUP
with mixed_builder.build() as (__, mixed_store):
with mixed_store.default_store(from_store):
course1 = CourseFactory.create(modulestore=mixed_store)
with mixed_store.default_store(to_store):
course2 = CourseFactory.create(modulestore=mixed_store)
self.setup_assets(course1.id, None, mixed_store)
assert len(mixed_store.get_all_asset_metadata(course1.id, 'asset')) == 2
assert len(mixed_store.get_all_asset_metadata(course2.id, 'asset')) == 0
mixed_store.copy_all_asset_metadata(course1.id, course2.id, ModuleStoreEnum.UserID.test * 102)
all_assets = mixed_store.get_all_asset_metadata(
course2.id, 'asset', sort=('displayname', ModuleStoreEnum.SortOrder.ascending)
)
assert len(all_assets) == 2
assert all_assets[0].asset_id.path == 'pic1.jpg'
assert all_assets[1].asset_id.path == 'shout.ogg'
|
Ah, I missed the group.
When is the next batch or group where I can participate?
When was it started and completed ?
Will there be another class, if so when. Interested.
Please leave link to course.
|
#! /usr/bin/env python
from openturns import *
from math import *
TESTPREAMBLE()
RandomGenerator().SetSeed(0)
try :
# Problem parameters
dimension = 3
a = 7.0
b = 0.1
# Reference analytical values
meanTh = a/2
covTh = (b**2 * pi**8) / 18.0 + (b * pi**4) / 5.0 + (a**2) / 8.0 + 1.0 / 2.0
sob_1 = [(b * pi**4 / 5.0 + b**2 * pi**8 / 50.0 + 1.0/2.0) / covTh, (a**2 / 8.0) / covTh, 0.0]
sob_2 = [0.0, (b**2 * pi**8 / 18.0 - b**2 * pi**8 / 50.0) / covTh, 0.0]
sob_3 = [0.0]
sob_T1 = [sob_1[0] + sob_2[0] + sob_2[1] + sob_3[0], sob_1[1] + sob_2[0] + sob_2[2] + sob_3[0], sob_1[2] + sob_2[1] + sob_2[2] + sob_3[0]]
sob_T2 = [sob_2[0] + sob_2[1] + sob_3[0], sob_2[0] + sob_2[2] + sob_3[0], sob_2[1] + sob_2[2] + sob_3[0]]
sob_T3 = [sob_3[0]]
# Create the Ishigami function
inputVariables = Description(dimension)
inputVariables[0] = "xi1"
inputVariables[1] = "xi2"
inputVariables[2] = "xi3"
outputVariables = Description(1)
outputVariables[0] = "y"
formula = Description(1)
formula[0] = "sin(xi1) + (" + str(a) + ") * (sin(xi2)) ^ 2 + (" + str(b) + ") * xi3^4 * sin(xi1)"
model = NumericalMathFunction(inputVariables, outputVariables, formula)
# Create the input distribution
marginals = DistributionCollection(dimension)
marginals[0] = Uniform(-pi, pi)
marginals[1] = Uniform(-pi, pi)
marginals[2] = Uniform(-pi, pi)
distribution = ComposedDistribution(marginals)
# Create the orthogonal basis
polynomialCollection = PolynomialFamilyCollection(dimension)
polynomialCollection[0] = OrthogonalUniVariatePolynomialFamily(LegendreFactory())
polynomialCollection[1] = OrthogonalUniVariatePolynomialFamily(LegendreFactory())
polynomialCollection[2] = OrthogonalUniVariatePolynomialFamily(LegendreFactory())
enumerateFunction = EnumerateFunction(dimension)
productBasis = OrthogonalBasis(OrthogonalProductPolynomialFactory(polynomialCollection, enumerateFunction))
# Create the adaptive strategy
# We can choose amongst several strategies
# First, the most efficient (but more complex!) strategy
listAdaptiveStrategy = list()
degree = 6
indexMax = enumerateFunction.getStrataCumulatedCardinal(degree)
basisDimension = enumerateFunction.getStrataCumulatedCardinal(divmod(degree, 2)[0])
threshold = 1.0e-6
listAdaptiveStrategy.append(CleaningStrategy(productBasis, indexMax, basisDimension, threshold, False))
# Second, the most used (and most basic!) strategy
listAdaptiveStrategy.append(FixedStrategy(productBasis, enumerateFunction.getStrataCumulatedCardinal(degree)))
# Third, a slight enhancement with respect to the basic strategy
listAdaptiveStrategy.append(SequentialStrategy(productBasis, enumerateFunction.getStrataCumulatedCardinal(divmod(degree, 2)[0]), False))
for adaptiveStrategyIndex in range(len(listAdaptiveStrategy)):
adaptiveStrategy = listAdaptiveStrategy[adaptiveStrategyIndex]
# Create the projection strategy
samplingSize = 250
listProjectionStrategy = list()
# We have only the LeastSquaresStrategy up to now (0.13.0) but we can choose several sampling schemes
# Monte Carlo sampling
listProjectionStrategy.append(LeastSquaresStrategy(MonteCarloExperiment(samplingSize)))
# LHS sampling
listProjectionStrategy.append(LeastSquaresStrategy(LHSExperiment(samplingSize)))
# Low Discrepancy sequence
listProjectionStrategy.append(LeastSquaresStrategy(LowDiscrepancyExperiment(LowDiscrepancySequence(SobolSequence()),samplingSize)))
for projectionStrategyIndex in range(len(listProjectionStrategy)):
projectionStrategy = listProjectionStrategy[projectionStrategyIndex]
# Create the polynomial chaos algorithm
maximumResidual = 1.0e-10
algo = FunctionalChaosAlgorithm(model, distribution, adaptiveStrategy, projectionStrategy)
algo.setMaximumResidual(maximumResidual)
RandomGenerator.SetSeed(0)
algo.run()
# Examine the results
result = FunctionalChaosResult(algo.getResult())
print "###################################"
print AdaptiveStrategy(adaptiveStrategy)
print ProjectionStrategy(projectionStrategy)
#print "coefficients=", result.getCoefficients()
residuals = result.getResiduals()
print "residuals=", residuals
relativeErrors = result.getRelativeErrors()
print "relativeErrors=", relativeErrors
# Post-process the results
vector = FunctionalChaosRandomVector(result)
mean = vector.getMean()[0]
print "mean=%.8f" % mean, "absolute error=%.10f" % fabs(mean - meanTh)
variance = vector.getCovariance()[0, 0]
print "variance=%.8f" % variance, "absolute error=%.10f" % fabs(variance - covTh)
for i in range(dimension):
value = vector.getSobolIndex(i)
print "Sobol index", i, "= %.8f" % value, "absolute error=%.10f" % fabs(value - sob_1[i])
indices = Indices(2)
k = 0
for i in range(dimension):
indices[0] = i
for j in range(i+1, dimension):
indices[1] = j
value = vector.getSobolIndex(indices)
print "Sobol index", indices, "=%.8f" % value, "absolute error=%.10f" % fabs(value - sob_2[k])
k = k+1
indices = Indices(3)
indices[0] = 0
indices[1] = 1
indices[2] = 2
value = vector.getSobolIndex(indices)
print "Sobol index", indices, "=%.8f" % value, "absolute error=%.10f" % fabs(value - sob_3[0])
for i in range(dimension):
value = vector.getSobolTotalIndex(i)
print "Sobol total index", i, "=%.8f" % value, "absolute error=%.10f" % fabs(value - sob_T1[i])
indices = Indices(2)
k = 0
for i in range(dimension):
indices[0] = i
for j in range(i+1, dimension):
indices[1] = j
value = vector.getSobolIndex(indices)
print "Sobol total index", indices, "=%.8f" % value, "absolute error=%.10f" % fabs(value - sob_2[k])
k = k+1
indices = Indices(3)
indices[0] = 0
indices[1] = 1
indices[2] = 2
value = vector.getSobolTotalIndex(indices)
print "Sobol total index ", indices, "=%.8f" % value, "absolute error=%.10f" % fabs(value - sob_3[0])
except :
import sys
print "t_FunctionalChaos_ishigami.py", sys.exc_type, sys.exc_value
|
The San Diego Miata Club (SDMC) was founded in 1996 by eleven enthusiastic Miata owners. That enthusiasm has spread to hundreds of members over two decades. Our club packs the Hamburger Factory in Poway every month for general membership meetings, but the real fun lies in the numerous driving events, car shows, and parades that the club enjoys.
You have not experienced San Diego County like you will with SDMC - top down, wind in your hair, and a smile on your face!! In addition to the driving fun and socializing, club members can also enjoy discounts from numerous vendors (including participating Mazda dealerships), Twists & Turns (the club's monthly newsletter), and special events sponsored by Mazda and other vendors. So, what are you waiting for? Do you want to experience the magic before you join? Come by and join us during one of our meetings or events. You won't be disappointed!
Copyright © 2012 San Diego Miata Club. All rights reserved.
|
# Outspline - A highly modular and extensible outliner.
# Copyright (C) 2011 Dario Giovannetti <dev@dariogiovannetti.net>
#
# This file is part of Outspline.
#
# Outspline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Outspline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outspline. If not, see <http://www.gnu.org/licenses/>.
copy_create = ("CREATE TABLE Copy (C_id INTEGER, "
"C_parent INTEGER, "
"C_previous INTEGER, "
"C_text TEXT)")
copy_select_check = 'SELECT C_id FROM Copy LIMIT 1'
copy_select_parent = ('SELECT C_id, C_text FROM Copy WHERE C_parent=? '
'AND C_previous=? LIMIT 1')
copy_select_parent_roots = ('SELECT C_id, C_text FROM Copy '
'WHERE C_parent NOT IN (SELECT C_id FROM Copy)')
copy_insert = ('INSERT INTO Copy (C_id, C_parent, C_previous, C_text) '
'VALUES (?, ?, ?, ?)')
copy_delete = 'DELETE FROM Copy'
|
EARL is a vocabulary, the terms of which are defined across a set of specifications and technical notes, and that is used to describe test results. The primary motivation for developing this vocabulary is to facilitate the exchange of test results between Web accessibility evaluation tools in a vendor-neutral and platform-independent format. It also provides reusable terms for generic quality assurance and validation purposes.
|
import re
import ckan.lib.base as base
import ckan.controllers.group as group
import ckan.model as model
import ckan.logic as logic
from ckan.common import c, _
abort = base.abort
NotAuthorized = logic.NotAuthorized
NotFound = logic.NotFound
class EventGroupController(group.GroupController):
group_types = ['event']
def _action(self, action_name):
''' select the correct group/org action '''
if action_name == 'group_create':
action_name = 'event_create'
elif action_name == 'group_list':
action_name = 'event_list'
return super(EventGroupController, self)._action(action_name)
def _render_template(self, template_name, group_type):
''' render the correct group/org template '''
import sys; print >>sys.stderr, template_name, group_type
return super(EventGroupController, self)._render_template(template_name, group_type)
# TODO: overridden as no hook for changing template in base controller.
def members(self, id):
group_type = self._ensure_controller_matches_group_type(id)
context = {'model': model, 'session': model.Session,
'user': c.user}
try:
c.members = self._action('member_list')(
context, {'id': id, 'object_type': 'user'}
)
data_dict = {'id': id}
data_dict['include_datasets'] = False
c.group_dict = self._action('group_show')(context, data_dict)
except NotAuthorized:
abort(401, _('Unauthorized to delete group %s') % '')
except NotFound:
abort(404, _('Group not found'))
return self._render_template('mapactionevent/members.html', group_type)
|
data is rising so does the requirement of server performance.
to improve performance of the big data processing.
even Spark is also our main development target.
performance and how much it gains for data analysts.
|
#!/usr/bin/env python
import shelve
from saml2.ident import code, decode
from saml2 import time_util, SAMLError
import logging
logger = logging.getLogger(__name__)
# The assumption is that any subject may consist of data
# gathered from several different sources, all with their own
# timeout time.
class ToOld(SAMLError):
pass
class CacheError(SAMLError):
pass
class Cache(object):
def __init__(self, filename=None):
if filename:
self._db = shelve.open(filename, writeback=True)
self._sync = True
else:
self._db = {}
self._sync = False
def delete(self, name_id):
"""
:param name_id: The subject identifier, a NameID instance
"""
del self._db[code(name_id)]
if self._sync:
try:
self._db.sync()
except AttributeError:
pass
def get_identity(self, name_id, entities=None,
check_not_on_or_after=True):
""" Get all the identity information that has been received and
are still valid about the subject.
:param name_id: The subject identifier, a NameID instance
:param entities: The identifiers of the entities whoes assertions are
interesting. If the list is empty all entities are interesting.
:return: A 2-tuple consisting of the identity information (a
dictionary of attributes and values) and the list of entities
whoes information has timed out.
"""
if not entities:
try:
cni = code(name_id)
entities = self._db[cni].keys()
except KeyError:
return {}, []
res = {}
oldees = []
for entity_id in entities:
try:
info = self.get(name_id, entity_id, check_not_on_or_after)
except ToOld:
oldees.append(entity_id)
continue
if not info:
oldees.append(entity_id)
continue
for key, vals in info["ava"].items():
try:
tmp = set(res[key]).union(set(vals))
res[key] = list(tmp)
except KeyError:
res[key] = vals
return res, oldees
def get(self, name_id, entity_id, check_not_on_or_after=True):
""" Get session information about a subject gotten from a
specified IdP/AA.
:param name_id: The subject identifier, a NameID instance
:param entity_id: The identifier of the entity_id
:param check_not_on_or_after: if True it will check if this
subject is still valid or if it is too old. Otherwise it
will not check this. True by default.
:return: The session information
"""
cni = code(name_id)
(timestamp, info) = self._db[cni][entity_id]
if check_not_on_or_after and time_util.after(timestamp):
raise ToOld("past %s" % timestamp)
return info or None
def set(self, name_id, entity_id, info, not_on_or_after=0):
""" Stores session information in the cache. Assumes that the name_id
is unique within the context of the Service Provider.
:param name_id: The subject identifier, a NameID instance
:param entity_id: The identifier of the entity_id/receiver of an
assertion
:param info: The session info, the assertion is part of this
:param not_on_or_after: A time after which the assertion is not valid.
"""
cni = code(name_id)
if cni not in self._db:
self._db[cni] = {}
self._db[cni][entity_id] = (not_on_or_after, info)
if self._sync:
try:
self._db.sync()
except AttributeError:
pass
def reset(self, name_id, entity_id):
""" Scrap the assertions received from a IdP or an AA about a special
subject.
:param name_id: The subject identifier, a NameID instance
:param entity_id: The identifier of the entity_id of the assertion
:return:
"""
self.set(name_id, entity_id, {}, 0)
def entities(self, name_id):
""" Returns all the entities of assertions for a subject, disregarding
whether the assertion still is valid or not.
:param name_id: The subject identifier, a NameID instance
:return: A possibly empty list of entity identifiers
"""
cni = code(name_id)
return self._db[cni].keys()
def receivers(self, name_id):
""" Another name for entities() just to make it more logic in the IdP
scenario """
return self.entities(name_id)
def active(self, name_id, entity_id):
""" Returns the status of assertions from a specific entity_id.
:param name_id: The ID of the subject
:param entity_id: The entity ID of the entity_id of the assertion
:return: True or False depending on if the assertion is still
valid or not.
"""
try:
cni = code(name_id)
(timestamp, info) = self._db[cni][entity_id]
except KeyError:
return False
if not info:
return False
else:
return time_util.not_on_or_after(timestamp)
def subjects(self):
""" Return identifiers for all the subjects that are in the cache.
:return: list of subject identifiers
"""
return [decode(c) for c in self._db.keys()]
|
0August 12, 2011Van Gogh is Bipolar!
0August 6, 2011I stumbled upon this blog as I was searching for a photo inspiration for the store ambiance of Embellish (and possibly my room as well???).
|
import re
def main():
TLD_GROUP = (
r'(XN--CLCHC0EA0B2G2A9GCD|XN--HGBK6AJ7F53BBA|XN--HLCJ6AYA9ESC7A|'
'XN--11B5BS3A9AJ6G|XN--MGBERP4A5D4AR|XN--XKC2DL3A5EE0H|'
'XN--80AKHBYKNJ4F|XN--XKC2AL3HYE2A|XN--LGBBAT1AD8J|XN--MGBC0A9AZCG|'
'XN--9T4B11YI5A|XN--MGBAAM7A8H|XN--MGBAYH7GPA|XN--MGBBH1A71E|'
'XN--FPCRJ9C3D|XN--FZC2C9E2C|XN--YFRO4I67O|XN--YGBI2AMMX|'
'XN--3E0B707E|XN--JXALPDLP|XN--KGBECHTV|XN--OGBPF8FL|XN--0ZWM56D|'
'XN--45BRJ9C|XN--80AO21A|XN--DEBA0AD|XN--G6W251D|XN--GECRJ9C|'
'XN--H2BRJ9C|XN--J6W193G|XN--KPRW13D|XN--KPRY57D|XN--PGBS0DH|'
'XN--S9BRJ9C|XN--90A3AC|XN--FIQS8S|XN--FIQZ9S|XN--O3CW4H|'
'XN--WGBH1C|XN--WGBL6A|XN--ZCKZAH|XN--P1AI|MUSEUM|TRAVEL|AERO|ARPA|'
'ASIA|COOP|INFO|JOBS|MOBI|NAME|BIZ|CAT|COM|EDU|GOV|INT|MIL|NET|ORG|'
'PRO|TEL|XXX|AC|AD|AE|AF|AG|AI|AL|AM|AN|AO|AQ|AR|AS|AT|AU|AW|AX|AZ|'
'BA|BB|BD|BE|BF|BG|BH|BI|BJ|BM|BN|BO|BR|BS|BT|BV|BW|BY|BZ|CA|CC|CD|'
'CF|CG|CH|CI|CK|CL|CM|CN|CO|CR|CU|CV|CW|CX|CY|CZ|DE|DJ|DK|DM|DO|DZ|'
'EC|EE|EG|ER|ES|ET|EU|FI|FJ|FK|FM|FO|FR|GA|GB|GD|GE|GF|GG|GH|GI|GL|'
'GM|GN|GP|GQ|GR|GS|GT|GU|GW|GY|HK|HM|HN|HR|HT|HU|ID|IE|IL|IM|IN|IO|'
'IQ|IR|IS|IT|JE|JM|JO|JP|KE|KG|KH|KI|KM|KN|KP|KR|KW|KY|KZ|LA|LB|LC|'
'LI|LK|LR|LS|LT|LU|LV|LY|MA|MC|MD|ME|MG|MH|MK|ML|MM|MN|MO|MP|MQ|MR|'
'MS|MT|MU|MV|MW|MX|MY|MZ|NA|NC|NE|NF|NG|NI|NL|NO|NP|NR|NU|NZ|OM|PA|'
'PE|PF|PG|PH|PK|PL|PM|PN|PR|PS|PT|PW|PY|QA|RE|RO|RS|RU|RW|SA|SB|SC|'
'SD|SE|SG|SH|SI|SJ|SK|SL|SM|SN|SO|SR|ST|SU|SV|SX|SY|SZ|TC|TD|TF|TG|'
'TH|TJ|TK|TL|TM|TN|TO|TP|TR|TT|TV|TW|TZ|UA|UG|UK|US|UY|UZ|VA|VC|VE|'
'VG|VI|VN|VU|WF|WS|YE|YT|ZA|ZM|ZW)')
url_pattern = r'([A-Z]+\:\/\/[A-Z0-9\-\.]+\.' + TLD_GROUP + r')\b'
#pattern = r'\.(.*)'
text = 'blah blah 00ftp://www.domain.com/foo/bar blah blah'
my_re = re.compile(url_pattern, re.IGNORECASE)
match = my_re.search(text)
print match
if match:
print(match.group())
matches = my_re.findall(text)
print(matches)
main()
|
In this technological world that is making strides, it is a truism to say that the CDs became elements of an antique shop.
However, although we no longer hear music with CDs, many people have the house full of them because they fear throwing them. If this is your case, read on because in this article we’ll show you incredible ideas to recycle those old CDs so you do not have to get rid of them.
When you read these ideas to recycle old CDs you give then, you will no longer think that you have too many unused CDs, but you’ll regret not having more to make all these ideas of recycling.
There are still a few months to Christmas, but what better way to start putting together the decorations in time so December do not take us by surprise. This idea of recycling CDs is really incredible.
Close the plastic ball and put a pretty ribbon to tie it to the tree and it is ready!
If you have an old shirt that you want to add a little style and youth, try this spectacular technique and reuse your old CDs.
With this idea you can remodel the old pots with a touch of modernity. In addition, when the Sun shines over the old CDs, creates an incredible light.
Clean the pot with a dry cloth. If you want, before you attach the CDs you can paint it with a color that coordinates with the CDs, like blue or white.
You do not need to throw away the old CDs, as everything is recyclable. Try these crafts for reuse them.
|
from vcdtestutil import VCDTestCase, VCDTestLoader, mSec
class TestCase(VCDTestCase):
p2irq = {
"atmega128": "IRQ.VECTOR14",
"at90s4433": "IRQ.VECTOR5",
"at90s8515": "IRQ.VECTOR6",
"atmega48": "IRQ.VECTOR13",
}
def setUp(self):
self.getVCD()
self.setClock(4000000)
self.processor = self.getProcessorType()
self.tov1 = self.p2irq[self.processor]
def test_00(self):
"""simulation time [0..40ms]"""
self.assertVCD()
self.assertEqual(self.vcd.starttime, 0)
self.assertEqual(self.vcd.endtime, 40 * mSec)
def test_01(self):
"""init counter"""
self.assertVCD()
p = self.getVariable("TIMER1.TCNTH")
self.assertEqual(p.firstedge.intValue, 0)
p = self.getVariable("TIMER1.TCNTL")
self.assertEqual(p.firstedge.intValue, 0)
def test_02(self):
"""counter period = 0,25us"""
self.assertVCD()
c = self.getVariable("TIMER1.Counter")
c1 = c.firstedge
tp = self.tClock
t0 = c1.internalTime - tp
dtc = tp * 65536
self.assertEqual(c1.intValue, 1)
c2 = c.getNextEdge(c1)
self.assertEqual(c2.intValue, 2)
self.assertEqual(c2.internalTime - c1.internalTime, tp)
def test_03(self):
"""counter mode: count 0xffff, then 0"""
self.assertVCD()
c = self.getVariable("TIMER1.Counter")
c1 = c.firstedge
tp = self.tClock
t0 = c1.internalTime - tp
dtc = tp * 65536
c2 = c.getNextEdge(t0 + dtc)
self.assertEqual(c2.intValue, 0)
def test_04(self):
"""check occurence of TOV1 interrupt"""
self.assertVCD()
ctr = self.getVariable("TIMER1.Counter")
tp = self.tClock
t0 = ctr.firstedge.internalTime - tp
dtc = tp * 65536
idelay = 6 * self.tClock
irq = self.getVariable(self.tov1)
# first overflow
t = t0 + dtc
ce = ctr.getNextEdge(t)
self.assertEqual(ce.internalTime, t)
self.assertEqual(ce.intValue, 0)
# check, when interrupt occurs
ie = irq.getNextEdge(t)
self.assertEqual(ie.intValue, 1)
self.assertTrue(ie.internalTime <= (t + idelay), "TOV1 occured to late")
# seek next TOV1
ie = irq.getNextEdge(irq.getNextEdge(ie))
self.assertTrue(ie.internalTime <= (t + dtc + idelay), "second TOV1 occured to late")
if __name__ == '__main__':
from unittest import TestLoader, TextTestRunner
tests = VCDTestLoader("timer_16bit_normal_atmega128.vcd").loadTestsFromTestCase(TestCase)
TextTestRunner(verbosity = 2).run(tests)
# EOF
|
Overnight RV parking is not allowed at Las Vegas casinos without special permission.
Back in the 1940s when Las Vegas was first making its gambling debut, small motels and trailer parks were the area's primary accommodations. Over more than 70 years, casinos have grown in size and stature, and luxury rooms have become the name of the game. You'll still find a couple of RV parks at casinos along the Boulder Highway, as well as some you can walk or drive to within a few minutes of the Strip.
Sam's Town Hotel and Gambling Hall brings western flair to the casino scene. Beside its gambling floor, Sam's Town has family-friendly attractions that include an 18-screen movie theater, bowling and live entertainment. The adjacent RV park provides amenities that make it convenient to stay for days -- or months -- on end. All sites have full hookups that include cable TV and telephone, and pull-through spots are available. Take a dip in one of the pools or hot tubs; wash laundry on site; enjoy a hot shower. Pet runs, barbecue grills, grassy lawns and mature foliage create a park-like environment. Sam's Town lies along the southeastern side of the city on the Boulder Highway.
Just a mile northwest of Sam's Town, Arizona Charlie's Boulder has a full-service casino complete with slots, video poker, gaming tables and a bingo parlor. Stay just outside the casino at the establishment's RV park, which has 200 spaces accommodating rigs up to 70 feet long. When you're not gambling, take a dip in the heated pool or hot tub, and enjoy the park's deluxe clubhouse. You'll find amenities there that include a fitness center, big-screen TV and pool table, in addition to showers and laundry. The property is dog-friendly and has its own dog run.
Three casinos lie within walking distance of Las Vegas RV Resort on the Boulder Highway. Sam's Town Casino is a half-mile south, with East Side Cannery .4 mile further. Arizona Charlie's Boulder is the same distance north. The establishment has picnic tables at each site, extra-long sites up to 100 feet long and full hookups. The menu of resort amenities includes swimming pool fitness center, Wi-Fi, a clubhouse and spa. Just across the street, Road Runner RV Park has similar amenities. Both RV parks allow pets, but Road Runner RV Park limits dogs to 30 pounds or less.
While you won't find any RV parks on the Strip itself, you'll find some within a couple of miles. While you might expect to find Riviera RV Park at the casino of the same name, it lies 4 miles east. The park has basic sites, and amenities such as showers, flush toilets, pools and pull-through spots. Oasis Las Vegas RV Resort lies 3.5 miles south of Sunset Road, the southernmost point of the Strip. The park has two swimming pools, including a beach-like family pool. You'll find an 18-hole putting course, on-site restaurant, ballroom and fitness center. The resort provides full hookups, hot showers and laundry facilities.
|
import datetime
from django.conf import settings
from django.db import models
from django.utils import six, timezone
from django.utils.functional import cached_property
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import ugettext as _
import pytz
from boardinghouse.base import SharedSchemaMixin
from boardinghouse.exceptions import Forbidden
from boardinghouse.schema import activate_schema, deactivate_schema
class ExpiringObjectsQuerySet(models.query.QuerySet):
def expired(self):
"Expired demos"
return self.filter(expires_at__lt=timezone.now().replace(tzinfo=pytz.utc))
def active(self):
"Non-expired demos"
return self.filter(expires_at__gte=timezone.now().replace(tzinfo=pytz.utc))
@six.python_2_unicode_compatible
class DemoSchema(SharedSchemaMixin, models.Model):
"""A User's demo setup.
Each user may only have at most one DemoSchema object, which will have an
expiry date.
We retain a reference to the template from which it was cloned, so we can
easily reset it.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
primary_key=True,
related_name='demo_schema')
expires_at = models.DateTimeField()
from_template = models.ForeignKey('template.SchemaTemplate',
on_delete=models.CASCADE,
related_name='demo_schemata',
limit_choices_to=~models.Q(use_for_demo=None))
objects = ExpiringObjectsQuerySet.as_manager()
class Meta:
verbose_name = 'user demo'
verbose_name_plural = 'user demos'
def __str__(self):
if self.expired:
return u'Expired demo for {0} (expired {1} ago)'.format(self.user, timesince(self.expires_at))
return u'Demo for {0}: expires at {1} ({2} from now)'.format(
self.user, self.expires_at, timeuntil(self.expires_at))
@cached_property
def schema(self):
return '{0}{1}'.format(settings.BOARDINGHOUSE_DEMO_PREFIX, self.user_id)
@property
def expired(self):
return self.expires_at < timezone.now().replace(tzinfo=pytz.utc)
@property
def name(self):
return _('Demo schema ({template_name})').format(template_name=self.from_template.name)
@property
def _clone(self):
return self.from_template.schema
def save(self, *args, **kwargs):
if not self.expires_at:
self.expires_at = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) + settings.BOARDINGHOUSE_DEMO_PERIOD
return super(DemoSchema, self).save(*args, **kwargs)
def activate(self):
if self.expired:
raise DemoSchemaExpired()
activate_schema(self.schema)
def deactivate(self):
deactivate_schema()
class DemoSchemaExpired(Forbidden):
pass
class ValidDemoTemplateManager(models.Manager):
def get_queryset(self):
return super(ValidDemoTemplateManager, self).get_queryset().filter(template_schema__is_active=True)
class ValidDemoTemplate(SharedSchemaMixin, models.Model):
template_schema = models.OneToOneField('template.SchemaTemplate',
primary_key=True,
on_delete=models.CASCADE,
related_name='use_for_demo')
objects = ValidDemoTemplateManager()
def __str__(self):
return '{0} is valid as a demo source'.format(self.template_schema)
|
If you live in Wyoming, you’ll use HealthCare.gov website, to apply for coverage, compare plans, and enroll. Specific plans and prices will be available on October 1, 2013, when Marketplace open enrollment begins. Coverage can start as soon as January 1, 2014.
Beginning Oct 1, the Health Insurance Marketplace will make it easy for Wyomingites to compare qualified health plans, get answers to questions, find out if they are eligible for lower costs for private insurance or health programs like Medicaid and the Children’s Health Insurance Program (CHIP), and enroll in health coverage.
By the Numbers: Uninsured Wyomingites who are eligible for coverage through the Marketplace.
68,435 (83%) of Wyoming’s uninsured and eligible population may qualify for either tax credits to purchase coverage in the Marketplace or for Medicaid if Wyoming takes advantage of the new opportunity to expand Medicaid coverage under the Affordable Care Act.
This entry was posted in State Affordable Care Act and tagged Casper Affordable Care Act, Casper Obama Care, Cheyenne Affordable Care Act, Cheyenne Obama Care, Wyoming Affordable Care Act, Wyoming Obama Care on September 29, 2013 by Affordable Care Act.
|
from itertools import dropwhile, takewhile
import re
from django.template import loader, Context
class FormattingLayer(object):
shorthand = 'formatting'
def __init__(self, layer_data):
self.layer_data = layer_data
self.table_tpl = loader.get_template('regulations/layers/table.html')
self.note_tpl = loader.get_template('regulations/layers/note.html')
self.code_tpl = loader.get_template('regulations/layers/code.html')
self.subscript_tpl = loader.get_template(
'regulations/layers/subscript.html')
def render_table(self, table):
max_width = 0
for header_row in table['header']:
width = sum(cell['colspan'] for cell in header_row)
max_width = max(max_width, width)
# Just in case a row is longer than the header
row_max = max(len(row) for row in table['rows'])
max_width = max(max_width, row_max)
# Now pad rows if needed
for row in table['rows']:
row.extend([''] * (max_width - len(row)))
context = Context(table)
# Remove new lines so that they don't get escaped on display
return self.table_tpl.render(context).replace('\n', '')
def render_note(self, fence_data):
lines = fence_data.get('lines', [])
lines = [l for l in lines
if l.replace('Note:', '').replace('Notes:', '').strip()]
context = Context({'lines': lines})
return self.note_tpl.render(context).replace('\n', '')
def render_code(self, fence_data):
"""Generic code rendering. Not language specific"""
lines = fence_data.get('lines', [])
context = Context({'lines': lines})
return self.code_tpl.render(context)
def apply_layer(self, text_index):
"""Convert all plaintext tables into html tables"""
layer_pairs = []
if text_index in self.layer_data:
for data in self.layer_data[text_index]:
if 'table_data' in data:
layer_pairs.append((data['text'],
self.render_table(data['table_data']),
data['locations']))
if data.get('fence_data', {}).get('type') == 'note':
layer_pairs.append((data['text'],
self.render_note(data['fence_data']),
data['locations']))
elif 'fence_data' in data:
layer_pairs.append((data['text'],
self.render_code(data['fence_data']),
data['locations']))
if 'subscript_data' in data:
layer_pairs.append((
data['text'],
self.subscript_tpl.render(Context(
data['subscript_data'])).replace('\n', ''),
data['locations']))
return layer_pairs
|
Citizens Safety Academy is pleased to host the one and only Dr. Sherman House of the Civilian Defender. Dr. House will be teaching an extended (eight-hour) version of his flagship course on hemorrhage control. What will you do if someone suffers a severe injury and first responders haven’t arrived yet? This course gives you the tools to manage precisely that scenario.
This eight-hour course is designed to prepare the civilian defender to deal with the inevitable eventuality of stopping life-threatening bleeding. With the ever-increasing frequency of worldwide terror events, active killers, workplace violence incidents, and motor vehicle collisions, knowing the proper techniques, tactics, and procedures to keep yourself and your family, friends, parishioners, students and fellow countrymen alive until a higher level of medical care can reach you is essential! This course will prepare you for unexpected (but not unplanned-for) emergencies.
In the first half of this course, Sherman will cover the basic skills, techniques, and equipment needed for hemorrhage control. The second half will involve practical exercises to help attendees apply those skills under pressure. For more information on what to expect in this course, please see Dr. House’s blog, where he provides a detailed write-up (click here to view).
Dr. House is the originator of the CIVILIAN DEFENDER training concept. He has personally saved lives using CPR, Rescue Breathing, AED, TCCC, ACLS, PHTLS and Heimlich Manuever techniques. He has also used a handgun to defend himself from armed attackers. He has studied extensively in the defensive arts, both armed and unarmed, as well as self-defense law, emergency medicine, criminal psychology, tactical driving and other survival skills from Tom and Lynn Givens, Paul Gomez, James Yeager, Andy Stanford, John Farnam, Greg Sullivan, Rob Edwards, Yancey Harrington, John Chapman, Aaron Little, Larry Lindenman, Rob Pincus, Caleb Causey, Dr. Keith Brown, Dr. William Aprill, Chuck Haggard, Massad Ayoob, John Hearne, Claude Werner, Skip Gochenauer, Steve Moses, Darryl Bolke, Larry Vickers, Marty Hayes and Andrew Branca.
I receive no compensation from you purchasing the Civilian Defender IFAK. I’ve merely partnered with a Veteran-owned business to supply the life-safety equipment for my courses, as there has been a rash of phony, fugazi, “Airsoft,” grade counterfeit medical products on Amazon. While you can certainly buy the components of the kit “a la carte,” the cost of the kit is quite reasonable and convenient.
Full price for first-time students of Sherman House’s eight-hour Hemorrhage Arrest Control course is $150. For Civilian Defender alumni who have already taken Dr. House’s four-hour HAC course, the price is $135. To register, please visit the Civilian Defender Eventbrite page.
|
#word_count.py
import string
import map_reduce
def mapper(input_key,input_value):
return [(word,1) for word in
remove_punctuation(input_value.lower()).split()]
"""
After Mapper, we have this
[('the', 1), ('quick', 1), ('brown', 1), ('fox', 1),
('jumped', 1), ('over', 1), ('the', 1), ('lazy', 1), ('grey', 1),
('dogs', 1), ('mary', 1), ('had', 1), ('a', 1), ('little', 1),
('lamb', 1), ('its', 1), ('fleece', 1), ('was', 1), ('white', 1),
('as', 1), ('snow', 1), ('and', 1), ('everywhere', 1),
('that', 1), ('mary', 1), ('went', 1), ('the', 1), ('lamb', 1),
('was', 1), ('sure', 1), ('to', 1), ('go', 1), ('thats', 1),
('one', 1), ('small', 1), ('step', 1), ('for', 1), ('a', 1),
('man', 1), ('one', 1), ('giant', 1), ('leap', 1), ('for', 1),
('mankind', 1)]
"""
# Used to remove ','
def remove_punctuation(s):
return s.translate(string.maketrans("",""), string.punctuation)
def reducer(intermediate_key,intermediate_value_list):
return (intermediate_key,sum(intermediate_value_list))
"""
After Reducer, we have this
{'and': [1], 'fox': [1], 'over': [1], 'one': [1, 1], 'as': [1],
'go': [1], 'its': [1], 'lamb': [1, 1], 'giant': [1],
'for': [1, 1], 'jumped': [1], 'had': [1], 'snow': [1],
'to': [1], 'leap': [1], 'white': [1], 'was': [1, 1],
'mary': [1, 1], 'brown': [1], 'lazy': [1], 'sure': [1],
'that': [1], 'little': [1], 'small': [1], 'step': [1],
'everywhere': [1], 'mankind': [1], 'went': [1], 'man': [1],
'a': [1, 1], 'fleece': [1], 'grey': [1], 'dogs': [1],
'quick': [1], 'the': [1, 1, 1], 'thats': [1]}
"""
filenames = ["text\\a.txt","text\\b.txt","text\\c.txt"]
i = {}
for filename in filenames:
f = open(filename)
i[filename] = f.read()
f.close()
print map_reduce.map_reduce(i,mapper,reducer)
"""
The map_reduce module imported by this program implements MapReduce in pretty much the simplest possible way, using some useful functions from the itertools library:
"""
# map_reduce.py
"""Defines a single function, map_reduce, which takes an input
dictionary i and applies the user-defined function mapper to each
(input_key,input_value) pair, producing a list of intermediate
keys and intermediate values. Repeated intermediate keys then
have their values grouped into a list, and the user-defined
function reducer is applied to the intermediate key and list of
intermediate values. The results are returned as a list."""
import itertools
def map_reduce(i,mapper,reducer):
intermediate = []
# This is processing the mapper, combine all the mapper to the same list
for (key,value) in i.iteritems():
intermediate.extend(mapper(key,value))
groups = {}
# very important step.
# 1. lambda simply yields the first argument in the intermdediate, which is the key.
# That is used for setup group by what
# 2. sorted is used to get the result grouped. See the later comment
# 3. line 50 list comprehension is used to get the value, which can also use x[1] I think
for key, group in itertools.groupby(sorted(intermediate),lambda x: x[0]):
groups[key] = list([y for x, y in group])
# And finally apply reducer function to each item
return [reducer(intermediate_key,groups[intermediate_key]) for intermediate_key in groups]
"""
from itertools import groupby
def groupby_even_odd(items):
f = lambda x: 'even' if x % 2 == 0 else 'odd'
gb = groupby(items, f)
for k, items in gb:
print '%s: %s' % (k, ','.join(map(str, items)))
>>> groupby_even_odd([1, 3, 4, 5, 6, 8, 9, 11])
odd: 1,3
even: 4
odd: 5
even: 6,8
odd: 9,11
Which is not what we want. To improve, simply do the following:
def groupby_even_odd(items):
f = lambda x: 'even' if x % 2 == 0 else 'odd'
gb = groupby(sorted(items, key=f), f)
for k, items in gb:
print '%s: %s' % (k, ','.join(map(str, items)))
>>> groupby_even_odd([1, 3, 4, 5, 6, 8, 9, 11])
even: 4,6,8
odd: 1,3,5,9,11
"""
def map_reduce(i, mapper, reducer):
intermediate = []
for key, value in i.iteritems():
intermediate.extend(mapper(key, value))
groups = {}
for key, group in itertools.groupby(sorted(intermediate), key=lambda x: x[0]):
# Another way do to this
for inter_key, value in group:
groups.setdefault(key, []).append(value)
return [reducer(k, v for k, v in groups.iteritems())]
|
Looking for a Hardwood Floor and Carpet Installer in Marieville?
Smart Reno Express provides you with an easy way to find Hardwood Floor and Carpet Installers in Marieville without having to search business directories or the Web. Just post your hardwood floor and carpet project and start receiving quotes from local contractors.
|
# Copyright 2015 Michael Frank <msfrank@syntaxjockey.com>
#
# This file is part of cifparser. cifparser is BSD-licensed software;
# for copyright information see the LICENSE file.
from cifparser.converters import *
def or_default(default, fn, *args, **kwargs):
try:
return fn(*args, **kwargs)
except KeyError:
return default
class Namespace(object):
"""
"""
def __init__(self, values):
"""
:param values:
:type values: cifparser.valuetree.ValueTree
"""
self.values = values
def get_container(self, path):
return self.values.get_container(path)
def get_container_or_default(self, path, default=None):
return or_default(default, self.get_container, path)
def contains_container(self, path):
"""
"""
return self.values.contains_container(path)
def get_raw(self, path, name):
return self.values.get_field(path, name)
def get_raw_or_default(self, path, name, default=None):
return or_default(default, self.get_raw, path, name)
def get_raw_list(self, path, name):
return self.values.get_field_list(path, name)
def get_raw_list_or_default(self, path, name, default=None):
return or_default(default, self.get_raw_list, path, name)
def get_str(self, path, name):
return str_to_stripped(self.get_raw(path, name))
def get_str_or_default(self, path, name, default=None):
return or_default(default, self.get_str, path, name)
def get_str_list(self, path, name):
return map(lambda x: str_to_stripped(x), self.values.get_field_list(path, name))
def get_str_list_or_default(self, path, name, default=None):
return or_default(default, self.get_str_list, path, name)
def get_flattened(self, path, name):
return str_to_flattened(self.get_raw(path, name))
def get_flattened_or_default(self, path, name, default=None):
return or_default(default, self.get_str, path, name)
def get_flattened_list(self, path, name):
return map(lambda x: str_to_flattened(x), self.values.get_field_list(path, name))
def get_flattened_list_or_default(self, path, name, default=None):
return or_default(default, self.get_flattened_list, path, name)
def get_int(self, path, name):
return str_to_int(self.get_flattened(path, name))
def get_int_or_default(self, path, name, default=None):
return or_default(default, self.get_int, path, name)
def get_int_list(self, path, name):
return map(lambda x: str_to_int(x), self.get_flattened_list(path, name))
def get_int_list_or_default(self, path, name, default=None):
return or_default(default, self.get_int_list, path, name)
def get_bool(self, path, name):
return str_to_bool(self.get_flattened(path, name))
def get_bool_or_default(self, path, name, default=None):
return or_default(default, self.get_bool, path, name)
def get_bool_list(self, path, name):
return map(lambda x: str_to_bool(x), self.get_flattened_list(path, name))
def get_bool_list_or_default(self, path, name, default=None):
return or_default(default, self.get_bool_list, path, name)
def get_float(self, path, name):
return str_to_float(self.get_flattened(path, name))
def get_float_or_default(self, path, name, default=None):
return or_default(default, self.get_float, path, name)
def get_float_list(self, path, name):
return map(lambda x: str_to_float(x), self.get_flattened_list(path, name))
def get_float_list_or_default(self, path, name, default=None):
return or_default(default, self.get_float_list, path, name)
def get_timedelta(self, path, name):
return str_to_timedelta(self.get_flattened(path, name))
def get_timedelta_or_default(self, path, name, default=None):
return or_default(default, self.get_timedelta, path, name)
def get_timedelta_list(self, path, name):
return map(lambda x: str_to_timedelta(x), self.get_flattened_list(path, name))
def get_timedelta_list_or_default(self, path, name, default=None):
return or_default(default, self.get_timedelta_list, path, name)
def get_size(self, path, name):
return str_to_size(self.get_flattened(path, name))
def get_size_or_default(self, path, name, default=None):
return or_default(default, self.get_size, path, name)
def get_size_list(self, path, name):
return map(lambda x: str_to_size(x), self.get_flattened_list(path, name))
def get_size_list_or_default(self, path, name, default=None):
return or_default(default, self.get_size_list, path, name)
def get_percentage(self, path, name):
return str_to_percentage(self.get_flattened(path, name))
def get_percentage_or_default(self, path, name, default=None):
return or_default(default, self.get_percentage, path, name)
def get_percentage_list(self, path, name):
return map(lambda x: str_to_percentage(x), self.get_flattened_list(path, name))
def get_percentage_list_or_default(self, path, name, default=None):
return or_default(default, self.get_percentage_list, path, name)
def get_throughput(self, path, name):
return str_to_throughput(self.get_flattened(path, name))
def get_throughput_or_default(self, path, name, default=None):
return or_default(default, self.get_throughput, path, name)
def get_throughput_list(self, path, name):
return map(lambda x: str_to_throughput(x), self.get_flattened_list(path, name))
def get_throughput_list_or_default(self, path, name, default=None):
return or_default(default, self.get_throughput_list, path, name)
def contains_field(self, path, name):
"""
Returns True if the specified name exists, otherwise False.
:param name: The name.
:type name: str
:returns: True or False.
:rtype: [bool]
"""
return self.values.contains_field(path, name)
def contains_field_list(self, path, name):
"""
Returns True if the specified name exists, otherwise False.
:param name: The name.
:type name: str
:returns: True or False.
:rtype: [bool]
"""
return self.values.contains_field_list(path, name)
def contains(self, path, name):
return self.contains_field(path, name) or self.contains_field_list(path, name)
|
You have captured the very essence of this sweet dog. The colors and softness let its personality shine through.
Love the brushwork, love the dog and love the painting.
|
# # coding=utf-8
# import matplotlib.pyplot as plt
# import numpy as np
# import pytest
#
# from ..algorithms.field_interpolation import PeriodicInterpolateField
# from ..classes import Species, PeriodicGrid
#
# @pytest.mark.parametrize("power", range(6))
# def test_poly(power, plotting=False):
# NG = 16
# NG_plot = 500
# L = 1
#
# x, dx = np.linspace(0, L, NG, retstep=True, endpoint=False)
#
# N = 128
# x_particles = np.linspace(0, L, N, endpoint=False)
#
# def electric_field_function(x):
# return x ** power
#
# electric_field = electric_field_function(x)
#
# interpolated = PeriodicInterpolateField(x_particles, electric_field, x, dx)
# analytical = electric_field_function(x_particles)
#
# region_before_last_point = x_particles < x.max()
#
# def plot():
# x_plot = np.linspace(0, L, NG_plot, endpoint=False)
# electric_field_plot = electric_field_function(x_plot)
# plt.plot(x_plot, electric_field_plot, lw=5)
# plt.plot(x[region_before_last_point], electric_field[region_before_last_point])
# plt.plot(x_particles, interpolated, "go-")
# plt.vlines(x, electric_field.min(), electric_field.max())
# plt.show()
# return "poly test failed for power = {}".format(power)
#
# if plotting:
# plot()
#
# assert np.allclose(analytical[region_before_last_point], interpolated[region_before_last_point], atol=1e-2, rtol=1e-2), plot()
#
#
# @pytest.mark.parametrize("field", [lambda x: np.sin(2 * np.pi * x), lambda x: np.cos(2 * np.pi * x)])
# def test_periodic(field, plotting=False):
# NG = 16
# NG_plot = 500
# L = 1
#
# x, dx = np.linspace(0, L, NG, retstep=True, endpoint=False)
#
# N = 128
# x_particles = np.linspace(0, L, N, endpoint=False)
#
# electric_field = field(x)
# interpolated = PeriodicInterpolateField(x_particles, electric_field, x, dx)
# analytical = field(x_particles)
#
# def plot():
# x_plot = np.linspace(0, L, NG_plot, endpoint=False)
# electric_field_plot = field(x_plot)
# plt.plot(x_plot, electric_field_plot, lw=5)
# plt.plot(x, electric_field)
# plt.plot(x_particles, interpolated, "go-")
# plt.vlines(x, electric_field.min(), electric_field.max())
# plt.show()
# return "periodic test failure"
#
# if plotting:
# plot()
#
# assert np.allclose(interpolated, analytical, atol=1e-2, rtol=1e-2), plot()
#
#
# @pytest.mark.parametrize("power", range(2, 6))
# def test_single_particle(power, plotting=False):
# """tests interpolation of field to particles:
# at cell boundary
# at hall cell
# at 3/4 cell
# at end of simulation region (PBC)
# """
# NG = 16
# L = 1
# g = PeriodicGrid(1, L=L, NG=NG)
# s = Species(1, 1, 4, g)
#
# def electric_field_function(x):
# return x ** power
#
# electric_field = electric_field_function(g.x)
#
# interpolated = PeriodicInterpolateField(s.x, electric_field, g.x, g.dx)
# analytical = electric_field_function(s.x)
# # analytical[-1] = (electric_field[0] + electric_field[-1]) / 2
#
# def plot():
# plt.plot(s.x, interpolated, "go-")
# plt.vlines(g.x, electric_field.min(), electric_field.max())
# plt.show()
# return "poly test failed for power = {}".format(power)
#
# if plotting:
# plot()
#
# assert np.allclose(analytical, interpolated), plot()
#
#
# if __name__ == "__main__":
# test_single_particle()
|
At this stage of the process we typically construct and test several models. First up, we create a mould design. Then we start assembling the various components.
We try out test versions, evaluate them, make adjustments and repeat this process as long as it is necessary in order to get the product up to your and our expectations. Generally, relatively few try-outs are necessary thanks to our design and simulation capabilities as well as our many years of experience. And of course we aim to get it right the first time!
It's not just the plastic prototypes we subject to extensive testing. The printed circuit boards we design for consumer electronics or medical equipment, for instance, are subjected to several series of tests before we move on to the production process.
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A CloudSQL Instance Resource.
See: https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/
"""
import json
from google.cloud.forseti.common.gcp_type import resource
class CloudSQLInstanceLifecycleState(resource.LifecycleState):
"""Represents the cloud_sql_instance's LifecycleState."""
pass
class CloudSQLInstance(resource.Resource):
"""CloudSQL Instance resource."""
RESOURCE_NAME_FMT = 'instances/%s'
def __init__(
self,
instance_id,
full_name=None,
data=None,
name=None,
display_name=None,
parent=None,
locations=None,
lifecycle_state=CloudSQLInstanceLifecycleState.UNSPECIFIED):
"""Initialize.
Args:
instance_id (str): The cloud sql instance id.
full_name (str): The full resource name and ancestry.
data (str): Resource representation of the cloud sql instance.
name (str): The cloud_sql_instance's unique GCP name, with the
format "cloud_sql_instances/{id}".
display_name (str): The cloud sql instance's display name.
locations (List[str]): Locations this cloud sql instance resides in.
If set, there should be exactly one element in the list.
parent (Resource): The parent Resource.
lifecycle_state (LifecycleState): The lifecycle state of the
cloud_sql_instance.
"""
super(CloudSQLInstance, self).__init__(
resource_id=instance_id,
resource_type=resource.ResourceType.CLOUD_SQL_INSTANCE,
name=name,
display_name=display_name,
parent=parent,
locations=locations,
lifecycle_state=lifecycle_state)
self.full_name = full_name
self.data = data
@classmethod
def from_json(cls, parent, json_string):
"""Create a cloud_sql_instance from a JSON string.
Args:
parent (Resource): resource this cloud_sql_instance belongs to.
json_string(str): JSON string of a cloud_sql_instance GCP API
response.
Returns:
CloudSQLInstance: cloud sql instance resource.
"""
instance_dict = json.loads(json_string)
instance_id = instance_dict['name']
return cls(
parent=parent,
instance_id=instance_id,
full_name='{}cloudsqlinstance/{}/'.format(parent.full_name,
instance_id),
display_name=instance_id,
locations=[instance_dict['region']],
data=json_string,
)
|
Meet Edgar; an important common link.
The top two American ATV brands share both a common founder and founding state. Edgar Heeten formed Arctic Cat in 1960 in Thief River Falls, Minnesota and prior to that he was co-founder of Polaris Industries in Roseau, Minnesota around 1954.
Edgar Hetteen, a farmer before heading off to WWII, was the son of first generation Swedish immigrants. Sadly, he passed away back in 2011 at the age of 90.
Named after the North Star, Polaris was in fact the northern most company in the United States at the time of its founding.
|
from setuptools import find_packages
from setuptools import setup
setup(
name='sandglass.time',
version='0.1.0',
packages=find_packages(),
namespace_packages=['sandglass'],
zip_safe=False,
include_package_data=True,
install_requires=[
'gunicorn',
'pyramid==1.5',
'pyramid_tm==0.7',
'pyramid_mailer==0.13',
'sqlalchemy==0.9.1',
'alembic==0.6.3',
'zope.sqlalchemy',
'zope.component==4.1.0',
# Enum support for python < 3.4
'flufl.enum',
# Forms/data handling
'colander',
# Translations extraction support
'Babel',
'lingua',
# Documentation support
'Sphinx',
# Timezone support
'pytz',
# Command line support
'cement',
'PasteScript',
],
entry_points={
'paste.app_factory': [
'main = sandglass.time.main:make_wsgi_app',
],
'console_scripts': [
'sandglass = sandglass.time.command:main',
],
},
paster_plugins=['pyramid'],
message_extractors={
'sandglass/time': [
('**.py', 'lingua_python', None),
('tests/**', 'ignore', None),
('locales/**', 'ignore', None),
],
},
)
|
Nematrian provides actuarial consultancy services to a range of client types, see Introduction – Consultancy. In due course we intend to make more generally available via the Nematrian website some of the algorithms that we ourselves use for such purposes.
Some of the material available in the Nematrian Publication Library is also particularly designed to be relevant to actuaries.
In the meantime, if you need help with an actuarial problem then please speak to your usual contact at Nematrian.
Please note that no web functions currently publicly available through the Nematrian website are classified by Nematrian as being explicitly ‘actuarial’ in nature. For example, they do not necessarily adhere to relevant model standards imposed on actuarial work by the UK’s Board for Actuarial Standards. If you are seeking access to models that do explicitly adhere to these standards then please speak to your usual contact at Nematrian. Usage of the Nematrian website is subject to to the terms of the Nematrian License Agreement.
|
import struct
def fontName(path):
tags = {}
ntoffset, offset, records = None, None, None
with open(path, 'rb') as f:
data = f.read()
tables = struct.unpack_from('>H', data, 4)[0]
for i in range(tables):
tag = data[i*16 + 12:i*16 + 16]
if tag == b"name":
ntoffset = struct.unpack_from('>I', data, i*16 + 20)[0]
offset = struct.unpack_from('>H', data, ntoffset + 4)[0]
records = struct.unpack_from('>H', data, ntoffset + 2)[0]
break
if ntoffset is None:
return tags
storage = ntoffset + offset
for i in range(records):
id = struct.unpack_from('>H', data, ntoffset + i*12 + 12)[0]
length = struct.unpack_from('>H', data, ntoffset + i*12 + 14)[0]
offset = struct.unpack_from('>H', data, ntoffset + i*12 + 16)[0]
value = data[storage + offset:storage + offset + length]
value = ''.join([chr(x) for x in value if x != 0])
tags[id] = value
return tags[1] if 1 in tags else None
|
Video of my very first print using LinuxCNC running on a BeagleBone with the BeBoPr cape to control my MendeMax RepRap 3D printer. The BeagleBone is running a Xenomai real-time kernel, but using the PRU (Programmable Realtime Unit, a 200 MHz deterministic microcontroller for off-loading time-critical tasks) to do step/dir and pwm generation. The PRU code is fairly advanced, and supports a configurable task list with full control over the number and types of tasks (ie: make as few/many step/dir and/or pwm outputs as you want). The PRU is currently running a 10 uS task loop, but can run faster or slower depending on how many tasks you want to run. The 10 uS is easily handling 4 step/dir tasks plus a pwm task with 3 outputs and is only busy about 1.5 uS (so 85% idle).
Look in the src/hal/drivers/hal_pru_generic directory. The *.p files are for the PRU, and the *.c files are the Linux/LinuxCNC side driver for it.
I like your printer a lot. Have made major changes since this post? Do you use slic3r or other slicing software or do you do this all in linuxcnc?
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api, exceptions, _
class PreventiveOperationtype(models.Model):
_name = 'preventive.operation.type'
_description = 'Machinery preventive operation template type'
name = fields.Char('Name')
ref = fields.Char('Operation Reference')
cycles = fields.Integer('Cycles')
basedoncy = fields.Boolean('Based on Cycles')
basedontime = fields.Boolean('Based on Time')
margin_cy1 = fields.Integer(
'Cycles Margin 1', help="A negative number means that the alarm will "
"be activated before the condition is met")
margin_cy2 = fields.Integer('Cycles Margin 2')
frequency = fields.Integer('Frequency', help="Estimated time for the next"
" operation.")
interval_unit = fields.Selection([('day', 'Days'), ('week', 'Weeks'),
('mon', 'Months'), ('year', 'Years')],
'Interval Unit')
margin_fre1 = fields.Integer(
'Frequency Margin 1', help="A negative number means that the alarm "
"will be activated before the compliance date")
interval_unit1 = fields.Selection([('day', 'Days'), ('week', 'Weeks'),
('mon', 'Months'), ('year', 'Years')],
'Interval Unit')
margin_fre2 = fields.Integer(
'Frequency Margin 2', help="A negative number means that the alarm "
"will be activated before the compliance date")
interval_unit2 = fields.Selection([('day', 'Days'), ('week', 'Weeks'),
('mon', 'Months'), ('year', 'Years')],
'Interval Unit')
description = fields.Text('Description')
hours_qty = fields.Float('Quantity Hours', required=False,
help="Expected time for the execution of the "
"operation. hh:mm")
@api.constrains('basedoncy', 'cycles')
def _check_basedoncy(self):
for record in self:
if record.basedoncy and record.cycles <= 0:
raise exceptions.ValidationError(
_("Operations based on cycles must have a positive cycle "
"frequency"))
@api.constrains('basedontime', 'frequency', 'interval_unit')
def _check_basedontime(self):
for record in self:
if record.basedontime and (
record.frequency <= 0 or not record.interval_unit):
raise exceptions.Warning(
_("Operations based on time must have a positive time "
" frequency"))
@api.one
@api.onchange('interval_unit')
def onchange_interval_unit(self):
if self.interval_unit:
self.interval_unit1 = self.interval_unit
self.interval_unit2 = self.interval_unit
@api.constrains('margin_cy1', 'margin_cy2')
def check_cycle_margins(self):
for record in self:
if record.margin_cy1 and record.margin_cy2 and (
record.margin_cy1 > record.margin_cy2):
raise exceptions.ValidationError(
_('First margin must be before second'))
@api.constrains('margin_fre1', 'interval_unit1', 'margin_fre2',
'interval_unit2')
def _check_time_margins(self):
for record in self:
if record.interval_unit1 and record.interval_unit2:
machine_operations = self.env['preventive.machine.operation']
date = fields.Date.today()
margin1 = machine_operations.get_interval_date(
date, record.margin_fre1, record.interval_unit1)
margin2 = machine_operations.get_interval_date(
date, record.margin_fre2, record.interval_unit2)
if margin1 > margin2:
raise exceptions.ValidationError(
_("First margin must be before second"))
class PreventiveOperationMaterial(models.Model):
_name = "preventive.operation.material"
_description = "New material line."
op_machi_mat = fields.Many2one('preventive.operation.matmach', 'Operation')
product_id = fields.Many2one('product.product', 'Product', required=True)
product_uom_qty = fields.Float('Quantity', default='1')
product_uom = fields.Many2one('product.uom', 'Unit of Measure',
required=True)
@api.one
@api.onchange('product_id')
def onchange_product(self):
if self.product_id:
self.product_uom = self.product_id.uom_id.id
class PreventiveOperationMatmach(models.Model):
# operation_machine_materials
_name = 'preventive.operation.matmach'
_description = 'Operation - Material - Machine Relation'
name = fields.Char('Name')
optype_id = fields.Many2one('preventive.operation.type', 'Operation')
opmaster = fields.Many2one('preventive.master', 'Master Operation')
material = fields.One2many('preventive.operation.material', 'op_machi_mat',
'Material')
basedoncy = fields.Boolean(related='optype_id.basedoncy')
basedontime = fields.Boolean(related='optype_id.basedontime')
cycles = fields.Integer(related='optype_id.cycles')
frequency = fields.Integer(related='optype_id.frequency')
interval_unit = fields.Selection(related='optype_id.interval_unit')
hours_qty = fields.Float(related='optype_id.hours_qty')
description = fields.Text('Description')
@api.one
@api.onchange('optype_id')
def onchange_optype_id(self):
if self.optype_id:
self.description = self.optype_id.description
|
Martin Luther may not have been taking an SU for MOT but, had he been, he'd have twiddled the blinds accordingly.
Inner Hope paid off for our friend (and five of his friends) on Monday, as Dave Guscott awarded us yet another year of motoring.
"Still going strong", he said. We've become his regulars.
Actually, a bit more than hope went into 270 KTA's MOT. A two-day inspection and service, full grease and filter change kept me busy last week, alongside coats of paint on the new front wheels (hopefully to be fitted this weekend).
And so to Kingsbridge Running Day on September 17, where 270 KTA will be giving free rides in the capable hands of Driver Farley while I nurse yet another delicate SU back to the road (no - not the brother quite yet!).
Our friend will be tackling the notorious route to East Portlemouth, plus a new foray to Bigbury-on-Sea and another to... Inner Hope.
|
from fractions import Fraction
from copy import deepcopy
z_val = {}
def gcd(a, b):
if b == 0:
return a
return gcd(b, a%b)
def lcm(a, b):
return a * b / gcd(a, b)
class PolyTerm(object):
def __init__(self, frac=Fraction(0, 1)):
self.coef = frac
self.a = [] # a_i^j sub & pow [ [1, 2], [2, 1], ... ]
def mul_coef(self, frac):
self.coef *= frac
def inner_sort(self):
self.a = sorted(self.a, reverse=False)
def out(self):
print("coef: %s, term: %s" % (self.coef, self.a))
class Poly(object):
def __init__(self):
self.poly = []
def mul_coef(self, coef):
for term in self.poly:
term.mul_coef(coef)
def mul_ai(self, sub):
new_poly = deepcopy(self.poly)
for term in new_poly:
find = False
for a in term.a:
if a[0] == sub:
find = True
a[1] += 1
break
if not find:
term.a.append([sub, 1])
term.inner_sort()
self.poly = new_poly
def add_poly(self, polyb):
ret_poly = Poly()
all_terms = []
ret_terms = []
for terma in self.poly:
all_terms.append(terma)
for termb in polyb.poly:
all_terms.append(termb)
ll = len(all_terms)
for i in range(ll):
for j in range(i+1, ll):
sta = set([ (s, p) for s, p in all_terms[i].a ] )
stb = set([ (s, p) for s, p in all_terms[j].a ] )
if sta == stb:
all_terms[i].coef = all_terms[i].coef + all_terms[j].coef
all_terms[j].coef = 0
for term in all_terms:
if term.coef != 0:
ret_terms.append(term)
ret_poly.poly = deepcopy(ret_terms)
return ret_poly
def get_poly(self):
ret = deepcopy(self.poly)
return ret
def out(self):
for term in self.poly:
term.out()
print("poly end")
def get_z_val(n):
"""
https://en.wikipedia.org/wiki/Cycle_index
"""
global z_val
if n in z_val:
return deepcopy(z_val[n])
if n == 0:
one = PolyTerm(Fraction(1.0))
poly = Poly()
poly.poly = [one]
z_val[n] = deepcopy(poly)
return z_val[n]
res = Poly()
for i in range(1, n+1):
v1 = get_z_val(n - i)
v = deepcopy(v1)
v.mul_ai(i)
res = res.add_poly(v)
res.mul_coef(Fraction(1, n))
z_val[n] = deepcopy(res)
return z_val[n]
def func(n, m):
poly_n = get_z_val(n)
poly_m = get_z_val(m)
# poly_n.out()
# poly_m.out()
ret_poly = Poly()
for terma in poly_n.poly:
for termb in poly_m.poly:
new_term = PolyTerm()
new_term.coef = terma.coef * termb.coef
for ta in terma.a:
for tb in termb.a:
sa = ta[0]
pa = ta[1]
sb = tb[0]
pb = tb[1]
ll = lcm(sa, sb)
new_term.a.append([ll, (sa * sb * pa * pb / ll)])
ret_poly.poly.append(new_term)
return ret_poly
def subs(term, v):
total = 1
for a in term.a:
total *= v**a[1]
return term.coef * total
def answer(w, h, s):
poly = func(w, h)
total = 0
for term in poly.poly:
total += subs(term, s)
return str(total)
def table():
for i in range(1, 11):
for j in range(1, 11):
if i * j > 25:
continue
ans = answer(i, j, 2)
s = "ans[%s][%s] = %s;" % (i, j, ans)
print(s)
def main():
with open("out", "w") as f:
for i in range(1, 11):
for j in range(1, 11):
if i * j > 25:
continue
ans = answer(i, j, 2)
s = "%s\n" % (ans)
f.write(s)
if __name__ == "__main__":
table()
# main()
|
At Nyland School, we believe Red Nose Day is much more than just an opportunity for us to arrange a number of fun and exciting fundraising activities. The event gives us the platform to teach our pupils about the importance of charity, being generous, and supporting and helping others. These lessons will help to develop good habits that last long into their adult life.
|
import networkx
import logging
from ..errors import AngrCFGError
l = logging.getLogger(name="angr.cfg_base")
class CFGBase(object):
"""
The base class for control flow graphs.
"""
def __init__(self, project, context_sensitivity_level):
self._project = project
# Initialization
self._graph = None
self._nodes = None
self._edge_map = None
self._loop_back_edges = None
self._overlapped_loop_headers = None
self._thumb_addrs = set()
if context_sensitivity_level < 0:
raise Exception("Unsupported context sensitivity level %d" % context_sensitivity_level)
self._context_sensitivity_level=context_sensitivity_level
def __contains__(self, cfg_node):
return cfg_node in self._graph
@property
def context_sensitivity_level(self):
return self._context_sensitivity_level
def _initialize_cfg(self):
"""
Re-create the DiGraph
"""
self._graph = networkx.DiGraph()
# pylint: disable=no-self-use
def copy(self):
raise Exception("Not implemented.")
def _construct(self):
raise Exception("Not implemented")
def output(self):
raise Exception("Not implemented")
# TODO: Mark as deprecated
def get_bbl_dict(self):
return self._nodes
def get_predecessors(self, cfgnode, excluding_fakeret=True):
"""
Get predecessors of a node on the control flow graph.
:param CFGNode cfgnode: The node
:param bool excluding_fakeret: True if you want to exclude all predecessors that is connected to the node with
a fakeret edge.
:return: A list of predecessors
:rtype: list
"""
if not excluding_fakeret:
if cfgnode in self._graph:
return self._graph.predecessors(cfgnode)
else:
return []
else:
predecessors = []
for pred, _, data in self._graph.in_edges_iter([cfgnode], data=True):
jumpkind = data['jumpkind']
if jumpkind != 'Ijk_FakeRet':
predecessors.append(pred)
return predecessors
def get_successors(self, basic_block, excluding_fakeret=True):
if not excluding_fakeret:
if basic_block in self._graph:
return self._graph.successors(basic_block)
else:
return []
else:
successors = []
for _, suc, data in self._graph.out_edges_iter([basic_block], data=True):
jumpkind = data['jumpkind']
if jumpkind != 'Ijk_FakeRet':
successors.append(suc)
return successors
def get_successors_and_jumpkind(self, basic_block, excluding_fakeret=True):
successors = []
for _, suc, data in self._graph.out_edges_iter([basic_block], data=True):
if not excluding_fakeret or data['jumpkind'] != 'Ijk_FakeRet':
successors.append((suc, data['jumpkind']))
return successors
def get_all_predecessors(self, cfgnode):
"""
Get all predecessors of a specific node on the control flow graph.
:param CFGNode cfgnode: The CFGNode object
:return: A list of predecessors in the CFG
:rtype: list
"""
return networkx.dfs_predecessors(self._graph, cfgnode)
def get_all_successors(self, basic_block):
return networkx.dfs_successors(self._graph, basic_block)
def get_node(self, addr_tuple):
"""
Get a single node from node key.
:param addr_tuple: The node key
:return:
"""
if addr_tuple in self._nodes.keys():
return self._nodes[addr_tuple]
else:
return None
def nodes(self):
return self._graph.nodes()
def get_any_node(self, addr, is_syscall=None, anyaddr=False):
"""
Get an artitrary CFGNode (without considering their contexts) from our graph.
:param addr: Address of the beginning of the basic block. Set anyaddr to True to support arbitrary address.
:param is_syscall: Whether you want to get the syscall node or any other node. This is due to the fact that
syscall SimProcedures have the same address as the targer it returns to.
None means get either, True means get a syscall node, False means get something that isn't
a syscall node.
:param anyaddr: If anyaddr is True, then addr doesn't have to be the beginning address of a basic block.
`anyaddr=True` makes more sense after the CFG is normalized.
:return: A CFGNode if there is any that satisfies given conditions, or None otherwise
"""
# TODO: Loop though self._nodes instead of self.graph.nodes()
# TODO: Of course, I should first fix the issue that .normalize() doesn't update self._nodes
for n in self.graph.nodes_iter():
cond = n.looping_times == 0
if anyaddr and n.size is not None:
cond = cond and (addr >= n.addr and addr < n.addr + n.size)
else:
cond = cond and (addr == n.addr)
if cond:
if is_syscall is None:
return n
if n.is_syscall == is_syscall:
return n
return None
def _get_irsb(self, cfg_node):
if cfg_node is None:
return None
if cfg_node.input_state is None:
raise AngrCFGError(
'You should save the input state when generating the CFG if you want to retrieve the SimIRSB later.')
# Recreate the SimIRSB
return self._project.factory.sim_run(cfg_node.input_state)
def irsb_from_node(self, cfg_node):
"""
Create SimRun from a CFGNode object.
"""
return self._get_irsb(cfg_node)
def get_any_irsb(self, addr):
"""
Returns a SimRun of a certain address. If there are many SimRuns with the same address in CFG,
return an arbitrary one.
You should never assume this method returns a specific one.
"""
cfg_node = self.get_any_node(addr)
return self._get_irsb(cfg_node)
def get_all_nodes(self, addr, is_syscall=None):
"""
Get all CFGNodes whose address is the specified one,
:param addr: Address of the node
:param is_syscall: True returns the syscall node, False returns the normal CFGNode, None returns both
:return: all CFGNodes
"""
results = [ ]
for cfg_node in self._graph.nodes_iter():
if cfg_node.addr == addr:
if is_syscall and cfg_node.is_syscall:
results.append(cfg_node)
elif is_syscall == False and not cfg_node.is_syscall:
results.append(cfg_node)
else:
results.append(cfg_node)
return results
def get_all_irsbs(self, addr):
"""
Returns all SimRuns of a certain address, without considering contexts.
"""
nodes = self.get_all_nodes(addr)
results = [ ]
for n in nodes:
results.append(self._get_irsb(n))
return results
def get_loop_back_edges(self):
return self._loop_back_edges
def get_irsb_addr_set(self):
irsb_addr_set = set()
for tpl, _ in self._nodes:
irsb_addr_set.add(tpl[-1]) # IRSB address
return irsb_addr_set
def get_branching_nodes(self):
"""
Returns all nodes that has an out degree >= 2
"""
nodes = set()
for n in self._graph.nodes():
if self._graph.out_degree(n) >= 2:
nodes.add(n)
return nodes
def get_exit_stmt_idx(self, src_block, dst_block):
"""
Get the corresponding exit statement ID for control flow to reach destination block from source block. The exit
statement ID was put on the edge when creating the CFG.
Note that there must be a direct edge between the two blocks, otherwise an exception will be raised.
:return: The exit statement ID
"""
if not self.graph.has_edge(src_block, dst_block):
raise AngrCFGError('Edge (%s, %s) does not exist in CFG' % (src_block, dst_block))
return self.graph[src_block][dst_block]['exit_stmt_idx']
@property
def graph(self):
return self._graph
def remove_edge(self, simrun_from, simrun_to):
edge = (simrun_from, simrun_to)
if edge in self._graph:
self._graph.remove_edge(edge)
def is_thumb_addr(self, addr):
return addr in self._thumb_addrs
|
DOHA, Qatar (AP) — The economic slowdown gripping countries across the Persian Gulf can be seen in layoffs, slowed construction projects and government cutbacks. For the millions of foreign workers drawn by brighter job prospects, it can have a far-darker side if they find themselves deep in debt.
Gulf countries like Qatar largely don’t have bankruptcy laws, leaving laid-off workers on the hook for huge outstanding sums while often banned from traveling outside of the country. That leaves many unemployed begging friends and family for help while frantically selling off all their belongings. Others have killed themselves out of desperation.
The Middle East has weathered several boom-and-bust cycles over the last decades, both buoyed and beaten by the global price of crude oil, as well as the recent recession. In 2009, the financial meltdown in Dubai saw dusty luxury cars parked and abandoned at its international airport and across the city as foreigners fled their debts.
This recent financial collapse began with oil prices falling from over $100 a barrel in the summer of 2014 to bottom out this January to under $30, a 12-year low. In the time since, oil has clawed back to $50 on supply disruptions and lowered reserves, but the damage already had been done in the Mideast.
Among those hard hit was Qatar, a small oil-and-gas-rich country on the Arabian Peninsula where construction accelerated with the announcement it would host the 2022 FIFA World Cup. As oil and gas prices sank, so too did Qatar’s coffers, leading to layoffs across both private and public companies.
The state-run Qatar Petroleum fired at least 1,500 foreign workers in recent restructuring, said Mohammed bin Saleh al-Sada, Qatar’s energy and industry minister.
Maersk Oil said in October it would cut as much as 12 percent of its staff in Qatar. Vodafone’s Qatar subsidiary announced on May 17 it would cut about 10 percent of its workforce, while mobile phone competitor Ooredoo also made layoffs this year. Al-Jazeera, the peninsula nation’s satellite news broadcaster, also shut down its American channel in April.
Foster, 50, a former senior operation manager for the state-linked Hamad Medical Corp.’s ambulance services, began work in March 2014 on a three-year contract, hoping to stay for at least six years to make enough to buy a house in the United States. However, he said he didn’t receive his first paycheck until three months into his job, which forced him to get a loan of 300,000 Qatari riyals ($82,000) to cover his living expenses, debts and child support payments in the U.S.
In January, Foster said his boss called him into his office and laid him off, along with other staffers. Four days later, Qatar National Bank closed his account, putting all he had toward his remaining loan, he said.
“There was no notification. It was just a text that said: ‘You’re now overdrawn,'” Foster said.
Under Qatari law, foreign workers must apply for an exit permit through their employer to leave the country. When Foster couldn’t leave for a cruise he planned before with his wife, he realized he was trapped.
Foster said he put his wife, Pepper, on a flight out, then sold all of his belongings, sleeping at night on the floor of his company-provided villa and hiding his remaining cash in the freezer, fearful he could be arrested as a debtor. He dodged phone calls and knocks at the door while trying to pull together the cash needed to pay off his debt.
“I had to give them my retirement and my dad’s retirement to leave,” he said.
Hamad Medical Corp., Qatar’s main health care provider, and Qatari officials did not respond to requests for comment.
But Foster said he knew others in far worse shape, including one colleague who even purchased a rope at one point to hang himself. Others have taken their own lives.
A British coroner investigating the suspected suicide of an engineer from Gloucestershire found hanging in his Doha home in February 2015 ruled this March that “financial worries” may have played a part. The case remains open as Qatari authorities provided only “limited information,” according to the inquest report obtained by the AP.
Suicides also affect those coming to Gulf countries for work as laborers, taxi drivers and other low-paying jobs. They often pay recruiters back home in Asia or Africa huge sums that take several years to pay off.
India, one of the main countries supplying low-paid workers to the Gulf, saw at least 541 of its citizens kill themselves in the United Arab Emirates in the last three years, according to government statistics offered to parliament in December. At least 337 Indians died in suspected suicides in Saudi Arabia during the same period, while other Gulf countries saw annual suicide numbers in the double digits.
In Qatar, 21 Indians alone killed themselves in 2015. The deaths continue into this year.
Associated Press writers Chonchui Ngashangva in New Delhi and Adam Schreck contributed to this report.
|
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from datetime import date, datetime
from session import Session
db = None
Base = declarative_base()
def enum(**enums):
return type('Enum', (), enums)
def create_db_engine(server):
global db
db = create_engine('sqlite:///' + server.dbFile)
def create_tables():
Base.metadata.create_all(db)
Session.configure(bind=db)
class EventLog(Base):
__tablename__ = 'eventLog'
date = Column('date', DateTime, primary_key=True)
username = Column('username', String, primary_key=True)
ipAddress = Column('ipAddress', String)
success = Column('success', Boolean)
server = Column('server', String)
def __init__(self, date, username, ipAddress, success, server):
self.date = date
self.username = username
self.ipAddress = ipAddress
self.success = success
self.server = server
class User(Base):
__tablename__ = 'users'
username = Column('username', String, primary_key=True)
date = Column('date', DateTime)
score = Column('score', Integer)
scareCount = Column('scareCount', Integer)
lastScareDate = Column('lastScareDate', DateTime)
def __init__(self, username, date, score):
self.username=username
self.date=date
self.score=score
self.scareCount=0
self.lastScareDate = date.today()
class Days(Base):
__tablename__ = 'days'
date = Column('date', DateTime, primary_key=True)
username = Column('username', String, primary_key=True)
profile = Column('profile', PickleType)
totalCount = Column('totalCount', Integer)
def __init__(self, date, username, profile, totalCount):
self.date=date
self.username=username
self.profile = profile
self.totalCount = totalCount
class Hours(Base):
__tablename__ = 'hours'
date = Column('date', DateTime, primary_key=True)
username = Column('username', String, primary_key=True)
profile = Column('profile', PickleType)
totalCount = Column('totalCount', Integer)
def __init__(self, date, username, profile, totalCount):
self.date=date
self.username=username
self.profile = profile
self.totalCount = totalCount
class Servers(Base):
__tablename__ = 'servers'
date = Column('date', DateTime, primary_key=True)
username = Column('username', String, primary_key=True)
profile = Column('profile', PickleType)
totalCount = Column('totalCount', Integer)
def __init__(self, date, username, profile, totalCount):
self.date=date
self.username=username
self.profile = profile
self.totalCount = totalCount
class IpAddress(Base):
__tablename__ = 'ipAddress'
date = Column('date', DateTime, primary_key=True)
username = Column('username', String, primary_key=True)
profile = Column('profile', PickleType)
totalCount = Column('totalCount', Integer)
def __init__(self, date, username, profile, totalCount):
self.date=date
self.username=username
self.profile = profile
self.totalCount = totalCount
@staticmethod
def checkIpForVpn(ip):
quadrantList = ip.split('.')
if quadrantList[0] == '10' and quadrantList[1] == '42':
return True
return False
@staticmethod
def checkIpForInternal(ip):
quadrantList = ip.split('.')
if quadrantList[0] == '10':
if quadrantList[1] == '24' or quadrantList[1] == '26':
return True
elif quadrantList[0] == '172' and quadrantList[1] == '16':
return True
return False
class SyslogMsg():
def __init__(self, data='', host='', port=0):
self.data = data
self.host = host
self.port = port
self.date = datetime.now()
class MailConf():
def __init__(self, emailTest=False):
self.emailTest = emailTest
|
I can trace part of my increased expenses to the fact that my lunch patterns have changed again.
In the few times I am at the office, I end up eating lunch by myself. That means I have the freedom, somewhat, to choose what to eat and when to eat. Are there vegetables? Are you in the mood for Korean? Are you willing to walk to the mall in this intense heat? Do you really have a choice?
In my previous job that only was a problem when I'm absolutely certain I'm alone. Otherwise, I eat with the other managers - I'm not one, by the way - and we always eat at this clump of carinderias right behind our building. We leave earlier to avoid the employees of the call center above us, the sheer size (numbers-wise) of which almost always results in overwhelmed elevators.
Eating at carinderias was never beneath me. They're just too far from me now, and the fact that I tend to have lunch after everybody else has means they're likely to have run out of food by the time I get there. It's a shame. The food is cheap, of course, but more importantly, they're the stuff you don't easily find in malls. For under a hundred bucks - maybe not anymore; thank you, record inflation - I can have a cup and a half of rice, a meat viand, a vegetable viand, and a bottle of Mountain Dew.
One of my favorite combos are adobong isaw and kilawing puso ng saging sa gata. Okay. Sorry. English, right. The latter is a vegetable dish: banana hearts (they're not really hearts, but do you call them cores?) cooked in vinegar and coconut milk. It's got both a nice zing and a nice bite, and it can actually stand on its own. The former is a meat dish, although some supposedly advanced Westerners might be squeamish: chopped-up pork intestines boiled in vinegar and soy sauce and pepper and garlic, until the broth has reduced and the offal is cooking in its own oils.
We've come a long way, perhaps not for the better, from the thinking that every bit of the animal must be cooked and served. (It's weird hearing historical food documentaries highlight the fact that our ancestors left nothing to waste.) Sure, we Filipinos still eat offal, but the impression that it is dirty never really leaves us. It's ingrained from childhood. You don't buy "dirty" ice cream. You don't buy squidballs. You don't buy isaw. They're cooked outside - you don't know where that's come from! Therefore, me getting adobong isaw is an act of rebellion. That, and we've romanticized street food when eaten in a country that's not our own. Take that cup of beef offal I had in Hong Kong - but I digress.
"Ano 'yan?" one of my colleagues would ask me as I sit down on the table.
"Ah, bawal na sa amin 'yan," he'd reply. "Mataas sa uric 'yan, eh. Pero bata ka pa! Puwede ka pa diyan."
The conversation inevitably ends up on maintenance medication for hypertension.
I honestly thought I'd be safe from the flu bug that seems to be affecting everyone lately. It's been in our home for weeks and I haven't been hit. However, yes, I fell for it too. I genuinely believe I must've gotten it somewhere else, though. I assume it's at the cramped DFA passport renewal center. We really should move towards a culture of face masks, but then, that takes a country-wide panic centered on an obscure respiratory illness, like in Hong Kong, and that would batter our #PinoyPride, so no - but, again, I digress.
I had my passport renewed on a Friday. The following day I had a big meeting for work: our annual planning session, and I was leading a good chunk of it to boot. But I started getting sick just before my presentation, and it happened just before lunch was served.
We were at this Cebuano restaurant, in a function room right by floor-to-ceiling windows overlooking the city. I've heard good things about the food. Naturally, I'm excited. Grilled scallops and roast pork belly (flown in from Cebu, supposedly) and desserts I fail to get when I'm actually in the city! And more importantly, you don't have to pay for a thing, although arguably you've worked hard for it some months back.
But I wasn't feeling well. But it doesn't mean I won't try. The dishes come in and I tried to eat, if only for the simplistic logic that eating gives you energy. I tried one scallop. I felt queasy. I tried one piece of ngo hiong - essentially kikiam, but with vegetables you can discern. Still queasy. By the time the sisig came in, I've had enough - and I haven't touched half of my rice. Considering I'm the guy who gets the rice of those who avoid carbohydrates, this is unusual.
"Wala ka yatang ganang kumain ngayon, Niko?" one of my colleagues ask.
"Ang rich masyado," I answer.
Right there, it all hits me. I'm thirty. I'm no longer young. Okay, you'll all surely debate me about what young really means, and then there's the fact that, at least amongst my peers, I'm still on the young side, if not the youngest. But I'm definitely getting older. I now have to really start watching what I eat, or else I doom myself to a lifetime of blood chemistry procedures and a weekly bill, in the thousands, of blood thinners, cholesterol regulators and other compound phrases, Well, even if I watch myself I'll likely end up there anyway. I'm doomed.
Being the youngest in conversations about middle-age health adjustments has its benefits, however. In some instances I've been doing it unknowingly. I'm not fond of adding seasoning, for instance. (Do it while you're cooking, sure, but otherwise let them set their pace. But really, it's because my mother used to sprinkle patis on my plate, and I hated how some parts are saltier than others.) Add to that the family not using MSG in daily cooking, and you can argue that my taste buds are attuned towards the bland. In other instances I've been trying to learn, although it's virtually impossible to disable my sweet tooth. I just finished a donut!
Still, the inevitable ending is, I will have to stop eating some, or all, of the things that I enjoy. At some point my doctor's dietary advice would be to stop eating anything that has flavor. At some point, the phrase "bawal ang masarap" - used in a non-sexual context - will come out of my mouth. At some point I will long for the days when I could delay what really is a series of attempts to delay death.
But then, existentialists say you haven't fully lived until you die, and if dying means eating what you want, well, why not live a little? Or a lot, perhaps.
Also, getting adobong isaw for lunch will always be an act of rebellion I'll always look forward to doing, provided I'm in that carinderia again, and it's all in stock.
|
from psoDNF import PSODNF
from pso import QtApp
from PyQt4 import QtGui
from dnfpyUtils.models.modelNSpike import ModelNSpike
import numpy as np
from dnfpyUtils.scenarios.scenarioRobustness import ScenarioRobustness
import dnfpy.controller.runner as runner
from dnfpyUtils.scenarios.scenarioSwitch import ScenarioSwitch
from dnfpyUtils.scenarios.scenarioNoise import ScenarioNoise
class PSODNFLin(PSODNF):
def getListParam(self):
#correspond to betaE,betaI,alphaE,alphaI
return ["iExc","iInh","wExc","wInh"]
def getBounds(self):
"""return (lowerBounds,upperBounds"""
z = 10e-6
lowerBounds = np.array([z,z,z,z])
upperBounds = np.array([10,1,1,2])
return (lowerBounds,upperBounds)
def getEvaluationParamsDict(self):
return dict(timeEnd=20,allowedTime=2)
def getConstantParamsDict(self):
return dict(size=49,lateral='dol',activation='step',model='spike')
def evaluate(self,indiv):
#TODO have a list of scenario
scenarioR = ScenarioRobustness()
scenarioS = ScenarioSwitch()
scenarioN = ScenarioNoise()
#indiv.update(self.constantParamsDict)
#print("evaluate %s"%indiv)
model = self.getModel(indiv)
timeEnd = self.evaluationParamsDict['timeEnd']
allowedTime = self.evaluationParamsDict['allowedTime']
(errorR,wellClusterizedR,time,convergenceR,maxNbAct,meanNbAct,elapsedTime,errorShapeR,compEmpty)\
= runner.launch(model, scenarioR, timeEnd,allowedTime)
if errorR < 1 and errorShapeR < 3. and convergenceR <30:
# #print("indiv %s"%indiv)
#print("error %s shape %s convergence %s"%(errorR,errorShapeR,convergenceR))
(errorS,wellClusterizedS,time,convergenceS,maxNbAct,meanNbAct,elapsedTime,errorShapeS,compEmpty)\
= runner.launch(model, scenarioS, 6.,allowedTime)
(errorN,wellClusterizedN,time,convergenceN,maxNbAct,meanNbAct,elapsedTime,errorShapeN,compEmpty)\
= runner.launch(model, scenarioN, timeEnd,allowedTime)
else:
(errorS, wellClusterizedS,errorShapeS,convergenceS) = (10, 10, 10,100)
(errorN, wellClusterizedN,errorNhapeN,convergenceN) = (10, 10, 10,100)
#
if convergenceS == None:
convergenceS = 100
if convergenceR == None:
convergenceR = 100
if convergenceN == None:
convergenceN = 100
#
fitnessError = (errorR + errorS + errorN )/3.
fitnessCluster = (wellClusterizedR + wellClusterizedS + wellClusterizedN)/3.
fitnessShape = (errorShapeR + errorShapeS + wellClusterizedN)/3.
fitnessConv = (convergenceR + convergenceS + convergenceN)/3.
#print("error %s, conv %s, shape %s"%(fitnessError*10,fitnessConv/10.,fitnessShape))
return fitnessShape + fitnessError*10 + fitnessConv/10.
#return errorShapeN + errorN*10 + convergenceN/10.
if __name__ == "__main__":
import sys
app = QtGui.QApplication([""])
view = QtApp()
model = PSODNFLin(view,swarmSize=100,nbEvaluationMax=30000,nbThread=8)
view.setModel(model)
model.start()
sys.exit(app.exec_())
res = (model.bestX,model.bestFitness)
print(res)
|
I’m struggling, can you help?
HI, I appreciate you taking the time to read through my plea.
I’m 19 living on my own, trying to work towards a successful career. I work 43 hours a week at a boatyard training to become a qualified Boatbuilder. Since i’m young and I am still training my wage is very low.
Even though I work long hours during the week, my wallet still falls short. After i have payed my bills, monthly car insurance .etc I have virtually nothing left. I have worked out how much money I can spend a week exactly and I break even.
However that doesn’t leave me with anything if I want a social life, if my car breaks down or my washing machine breaks. Both of which happened last week. My car failed its Mot leaving me with repairs of £400, and my washing machine broke. I can live without a washing machine for a while but I need my vehicle to get me to and from work.
Even if it’s just pound, I appreciate any contribution.
|
import cv2
import numpy as np
import torch
from torchvision import transforms
from PIL import Image
from visnav.algo import tools
from visnav.algo.base import AlgorithmBase
from visnav.algo.image import ImageProc
from visnav.algo.tools import Stopwatch, PositioningException
from poseilluminet import PoseIllumiNet, PoseIllumiDataset
from visnav.settings import *
class AbsoluteNavigationNN(AlgorithmBase):
DEF_MODEL_NAME = 'rose-mob-v10.tar' # median ~28deg: v6, v8; ~25deg: v9, 27.5deg: v10
DEF_LUMINOSITY_THRESHOLD = 65
DEF_CROP_MARGIN = 10
DEF_MIN_PIXELS = int(np.pi * 50 ** 2 * 0.3)
DEF_ESTIMATE_THRESHOLD = False
def __init__(self, system_model, render_engine, obj_idx, model_name=None, use_cuda=True, verbose=True):
super(AbsoluteNavigationNN, self).__init__(system_model, render_engine, obj_idx)
self.model_name = model_name or AbsoluteNavigationNN.DEF_MODEL_NAME
self.model = None # lazy load
self.verbose = verbose
if use_cuda:
torch.cuda.current_device()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = torch.device("cpu")
def load_model(self, path=None):
path = path or os.path.join(DATA_DIR, self.model_name)
data = torch.load(path)
name = data.get('name', '')
if len(name) > 0:
assert name.split('-')[0] == self.system_model.mission_id, \
"wrong model loaded (%s) for current mission (%s)" % (name.split('-')[0], self.system_model.mission_id)
if self.verbose:
print("model '%s' loaded (%s: %s, nf=%d, do=%.1f), trained for %d epochs, validation loss %.3f" % (
path, name, data['arch'], data.get('features', 2048), data.get('dropout', 0.5),
data['epoch'], data.get('loss', np.nan),))
# referred from densepose-project
self.model = PoseIllumiNet(arch=data['arch'],
width_mult=data.get('width_mult', 1.0),
num_features=data.get('features', 2048),
dropout=data.get('dropout', 0.5))
for k in ('cost_fn.gamma', 'cost_fn.beta'):
data['model'].pop(k)
self.model.load_state_dict(data['model'])
# optimizer.load_state_dict(data['optimizer'])
self.model.to(self.device)
self.model.eval()
def process(self, orig_sce_img, outfile, rotate_sc=False, **kwargs):
# maybe load torch model
if self.model is None:
self.load_model()
if outfile is not None:
self.debug_filebase = outfile + ('n' if isinstance(orig_sce_img, str) else '')
# maybe load scene image
if isinstance(orig_sce_img, str):
orig_sce_img = self.load_target_image(orig_sce_img)
self.timer = Stopwatch()
self.timer.start()
if self.DEF_ESTIMATE_THRESHOLD:
threshold = ImageProc.optimal_threshold(None, orig_sce_img)
else:
threshold = self.DEF_LUMINOSITY_THRESHOLD
# detect target, get bounds
x, y, w, h = ImageProc.single_object_bounds(orig_sce_img, threshold=threshold,
crop_marg=self.DEF_CROP_MARGIN,
min_px=self.DEF_MIN_PIXELS, debug=DEBUG)
if x is None:
raise PositioningException('asteroid not detected in image')
# crop image
img_bw = ImageProc.crop_and_zoom_image(orig_sce_img, x, y, w, h, None, (224, 224))
# save cropped image in log archive
if BATCH_MODE and self.debug_filebase:
self.timer.stop()
cv2.imwrite(self.debug_filebase+'a.png', img_bw)
self.timer.start()
# massage input
input = cv2.cvtColor(img_bw, cv2.COLOR_GRAY2BGR)
input = Image.fromarray(input)
input = PoseIllumiDataset.eval_transform(input)[None, :, :, :].to(self.device, non_blocking=True)
# run model
with torch.no_grad():
output = self.model(input)
# massage output
output = output[0] if isinstance(output, (list, tuple)) else output
output = output.detach().cpu().numpy()
# check if estimated illumination direction is close or not
ill_est = self.model.illumination(output)[0]
r_ini, q_ini, ill_ini = self.system_model.get_cropped_system_scf(x, y, w, h)
if tools.angle_between_v(ill_est, ill_ini) > 10: # max 10 degree discrepancy accepted
print('bad illumination direction estimated, initial=%s, estimated=%s' % (ill_ini, ill_est))
# apply result
r_est = self.model.position(output)[0]
q_est = np.quaternion(*self.model.rotation(output)[0])
self.system_model.set_cropped_system_scf(x, y, w, h, r_est, q_est, rotate_sc=rotate_sc)
self.timer.stop()
if False:
r_est2, q_est2, ill_est2 = self.system_model.get_cropped_system_scf(x, y, w, h)
self.system_model.swap_values_with_real_vals()
r_real, q_real, ill_real = self.system_model.get_cropped_system_scf(x, y, w, h)
self.system_model.swap_values_with_real_vals()
print('compare q_est vs q_est2, q_real vs q_est, q_real vs q_est2')
# save result image
if BATCH_MODE and self.debug_filebase:
# save result in log archive
res_img = self.render(textures=False)
sce_img = cv2.resize(orig_sce_img, tuple(np.flipud(res_img.shape)))
cv2.imwrite(self.debug_filebase+'b.png', np.concatenate((sce_img, res_img), axis=1))
if DEBUG:
cv2.imshow('compare', np.concatenate((sce_img, res_img), axis=1))
cv2.waitKey()
|
Along with the general focus on geography and culture as they impact contemporary business, I had the opportunity to write an in depth paper on China and prepare a presentation for my class. China is a dynamic and complicated country. They have increasing political and economic leverage, while at the same time eroding their natural resources and polluting their environment. Growth is always complicated, and the pace they’ve maintained is even more challenging. See my findings below.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('encuestas', '0014_encuesta_estacion'),
]
operations = [
migrations.CreateModel(
name='CostoGanaderia',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('costo', models.FloatField(verbose_name=b'Costo total en Moneda local')),
('encuesta', models.ForeignKey(to='encuestas.Encuesta')),
],
options={
'verbose_name_plural': 'Costo para ganaderia mayor y menor',
},
),
migrations.CreateModel(
name='CostoProcesamiento',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('costo', models.FloatField(verbose_name=b'Costo total en Moneda local')),
('encuesta', models.ForeignKey(to='encuestas.Encuesta')),
],
options={
'verbose_name_plural': 'Costo para productos procesados',
},
),
migrations.AlterModelOptions(
name='costofrutas',
options={'verbose_name_plural': 'Total Mz y costo para frutas familiar'},
),
migrations.AddField(
model_name='cultivostradicionales',
name='precio_consumido',
field=models.FloatField(null=True, blank=True),
),
migrations.AlterField(
model_name='distribuciontierra',
name='tierra',
field=models.IntegerField(verbose_name=b'20.1_Distribuci\xc3\xb3n de la tierra en la finca', choices=[(1, b'Bosque'), (2, b'Tacotal/Guamil/Machorra/Llano'), (3, b'Cultivo anual'), (4, b'Plantaci\xc3\xb3n forestal'), (5, b'Potrero'), (6, b'Pasto en asocio con \xc3\xa1rboles'), (7, b'Frutales'), (8, b'Cultivos en asocio')]),
),
migrations.AlterField(
model_name='duenono',
name='no',
field=models.IntegerField(choices=[(1, b'Arrendada'), (2, b'Promesa de venta'), (3, b'Prestada'), (4, b'Tierra Ind\xc3\xadgena/Comunal'), (5, b'Sin escritura'), (6, b'Colectivo/Cooperativa')]),
),
migrations.AlterField(
model_name='entrevistados',
name='cedula',
field=models.CharField(max_length=50, null=True, verbose_name=b'No. C\xc3\xa9dula/DPI', blank=True),
),
migrations.AlterField(
model_name='respuestano41',
name='agricola',
field=multiselectfield.db.fields.MultiSelectField(blank=True, max_length=7, null=True, choices=[(b'A', b'Falta de semilla'), (b'B', b'Mala calidad de la semilla'), (b'C', b'Falta de riego'), (b'D', b'Poca Tierra')]),
),
]
|
Click here to visit the Walmart site and let it load. Then play their this or that game at the top of the page. Once you’re done, just fill out your name and email to enter their sweepstakes (UPC not needed). There will be a total of 81 winners selected. You can enter up to 3 times per day until May 30th.
20 winners – $50 Wal-Mart gift card.
10 winners – $200 Wal-Mart gift card.
50 winners – $20 Wal-Mart gift card.
1 winner – $1,000 Wal-Mart gift card.
|
#!/usr/bin/env python3
import sys
import platform
def getinfo(name):
if name == "OS":
val = platform.system().lower()
if "msys" in val or "mingw" in val:
return "windows"
return val
elif name == "ARCH":
is64bit = platform.architecture()[0] == "64bit"
val = platform.machine().lower()
if val.startswith("arm") or val == "aarch64" or val == "arm64":
return "aarch64" if is64bit else "arm"
elif val in ["i386", "i686", "amd64", "x86_64"]:
return "x64" if is64bit else "x86"
else:
sys.stderr.write("Unknown architecture: '%s'\n" % val)
return "unknown"
else:
return None
if __name__ == "__main__":
def main():
if len(sys.argv) != 2:
sys.stderr.write("Invalid number of arguments: %d\n" % (len(sys.argv) - 1))
sys.exit(1)
val = getinfo(sys.argv[1])
if val is None:
sys.stderr.write("Invalid argument '%s'\n" % sys.argv[1])
sys.exit(1)
print(val)
main()
|
This residential home located in West London, underwent a garden renovation project and required a set of decorative privacy screens.
The homeowners had an idea of the pattern they wanted to create and designed it themselves. As they were seeking something bespoke Canal were the perfect candidate for the job.
In the workshop, Canal laser cut each 3mm stainless steel sheet to achieve the organic pattern the client designed.
To hold and support the decorative panels in place a mild steel box section fabricated and welded together at our Nottingham site. The Canal team also designed tab fixings onto the box section to create for a seamless installation.
A powder coating black finish was then added to the box section and laser cut panels before delivering and fitting on site.
Canal Architectural offer an extensive range design options for laser cut screens. The benefit of laser cutting is that it allows you to create almost any pattern you desire with small intricate details.
To discuss further about decorative privacy screens and its various installation methods, please contact the Canal Team.
To discuss your next staircase, balustrade or architectural metalwork project, contact Canal Engineering; you'll be glad you did!
|
from __future__ import absolute_import, division, print_function
from collections import deque, defaultdict
from datetime import timedelta
import functools
import logging
import six
import sys
import threading
from time import time
import weakref
import toolz
from tornado import gen
from tornado.locks import Condition
from tornado.ioloop import IOLoop
from tornado.queues import Queue
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from collections.abc import Iterable
from .compatibility import get_thread_identity
from .orderedweakset import OrderedWeakrefSet
no_default = '--no-default--'
_global_sinks = set()
_html_update_streams = set()
thread_state = threading.local()
logger = logging.getLogger(__name__)
_io_loops = []
def get_io_loop(asynchronous=None):
if asynchronous:
return IOLoop.current()
if not _io_loops:
loop = IOLoop()
thread = threading.Thread(target=loop.start)
thread.daemon = True
thread.start()
_io_loops.append(loop)
return _io_loops[-1]
def identity(x):
return x
class RefCounter:
""" A counter to track references to data
This class is used to track how many nodes in the DAG are referencing
a particular element in the pipeline. When the count reaches zero,
then parties interested in knowing if data is done being processed are
notified
Parameters
----------
initial: int, optional
The initial value of the reference counter
cb: callable
The function to use a callback when the reference count reaches zero
loop: tornado.ioloop.IOLoop
The loop on which to create a callback when the reference count
reaches zero
"""
def __init__(self, initial=0, cb=None, loop=None):
self.loop = loop if loop else get_io_loop()
self.count = initial
self.cb = cb
def retain(self, n=1):
"""Retain the reference
Parameters
----------
n: The number of times to retain the reference
"""
self.count += n
def release(self, n=1):
"""Release the reference
If the reference count is equal to or less than zero, the callback, if
provided will added to the provided loop or default loop
Parameters
----------
n: The number of references to release
"""
self.count -= n
if self.count <= 0 and self.cb:
self.loop.add_callback(self.cb)
def __str__(self):
return '<RefCounter count={}>'.format(self.count)
__repr__ = __str__
class Stream(object):
""" A Stream is an infinite sequence of data.
Streams subscribe to each other passing and transforming data between them.
A Stream object listens for updates from upstream, reacts to these updates,
and then emits more data to flow downstream to all Stream objects that
subscribe to it. Downstream Stream objects may connect at any point of a
Stream graph to get a full view of the data coming off of that point to do
with as they will.
Parameters
----------
stream_name: str or None
This is the name of the stream.
asynchronous: boolean or None
Whether or not this stream will be used in asynchronous functions or
normal Python functions. Leave as None if you don't know.
True will cause operations like emit to return awaitable Futures
False will use an Event loop in another thread (starts it if necessary)
ensure_io_loop: boolean
Ensure that some IOLoop will be created. If asynchronous is None or
False then this will be in a separate thread, otherwise it will be
IOLoop.current
Examples
--------
>>> def inc(x):
... return x + 1
>>> source = Stream() # Create a stream object
>>> s = source.map(inc).map(str) # Subscribe to make new streams
>>> s.sink(print) # take an action whenever an element reaches the end
>>> L = list()
>>> s.sink(L.append) # or take multiple actions (streams can branch)
>>> for i in range(5):
... source.emit(i) # push data in at the source
'1'
'2'
'3'
'4'
'5'
>>> L # and the actions happen at the sinks
['1', '2', '3', '4', '5']
"""
_graphviz_shape = 'ellipse'
_graphviz_style = 'rounded,filled'
_graphviz_fillcolor = 'white'
_graphviz_orientation = 0
str_list = ['func', 'predicate', 'n', 'interval']
def __init__(self, upstream=None, upstreams=None, stream_name=None,
loop=None, asynchronous=None, ensure_io_loop=False):
self.downstreams = OrderedWeakrefSet()
if upstreams is not None:
self.upstreams = list(upstreams)
else:
self.upstreams = [upstream]
self._set_asynchronous(asynchronous)
self._set_loop(loop)
if ensure_io_loop and not self.loop:
self._set_asynchronous(False)
if self.loop is None and self.asynchronous is not None:
self._set_loop(get_io_loop(self.asynchronous))
for upstream in self.upstreams:
if upstream:
upstream.downstreams.add(self)
self.name = stream_name
def _set_loop(self, loop):
self.loop = None
if loop is not None:
self._inform_loop(loop)
else:
for upstream in self.upstreams:
if upstream and upstream.loop:
self.loop = upstream.loop
break
def _inform_loop(self, loop):
"""
Percolate information about an event loop to the rest of the stream
"""
if self.loop is not None:
if self.loop is not loop:
raise ValueError("Two different event loops active")
else:
self.loop = loop
for upstream in self.upstreams:
if upstream:
upstream._inform_loop(loop)
for downstream in self.downstreams:
if downstream:
downstream._inform_loop(loop)
def _set_asynchronous(self, asynchronous):
self.asynchronous = None
if asynchronous is not None:
self._inform_asynchronous(asynchronous)
else:
for upstream in self.upstreams:
if upstream and upstream.asynchronous:
self.asynchronous = upstream.asynchronous
break
def _inform_asynchronous(self, asynchronous):
"""
Percolate information about an event loop to the rest of the stream
"""
if self.asynchronous is not None:
if self.asynchronous is not asynchronous:
raise ValueError("Stream has both asynchronous and synchronous elements")
else:
self.asynchronous = asynchronous
for upstream in self.upstreams:
if upstream:
upstream._inform_asynchronous(asynchronous)
for downstream in self.downstreams:
if downstream:
downstream._inform_asynchronous(asynchronous)
def _add_upstream(self, upstream):
"""Add upstream to current upstreams, this method is overridden for
classes which handle stream specific buffers/caches"""
if self.upstreams == [None]:
self.upstreams[0] = upstream
else:
self.upstreams.append(upstream)
def _add_downstream(self, downstream):
"""Add downstream to current downstreams"""
self.downstreams.add(downstream)
def _remove_downstream(self, downstream):
"""Remove downstream from current downstreams"""
self.downstreams.remove(downstream)
def _remove_upstream(self, upstream):
"""Remove upstream from current upstreams, this method is overridden for
classes which handle stream specific buffers/caches"""
if len(self.upstreams) == 1:
self.upstreams[0] = [None]
else:
self.upstreams.remove(upstream)
@classmethod
def register_api(cls, modifier=identity, attribute_name=None):
""" Add callable to Stream API
This allows you to register a new method onto this class. You can use
it as a decorator.::
>>> @Stream.register_api()
... class foo(Stream):
... ...
>>> Stream().foo(...) # this works now
It attaches the callable as a normal attribute to the class object. In
doing so it respects inheritance (all subclasses of Stream will also
get the foo attribute).
By default callables are assumed to be instance methods. If you like
you can include modifiers to apply before attaching to the class as in
the following case where we construct a ``staticmethod``.
>>> @Stream.register_api(staticmethod)
... class foo(Stream):
... ...
>>> Stream.foo(...) # Foo operates as a static method
You can also provide an optional ``attribute_name`` argument to control
the name of the attribute your callable will be attached as.
>>> @Stream.register_api(attribute_name="bar")
... class foo(Stream):
... ...
>> Stream().bar(...) # foo was actually attached as bar
"""
def _(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
name = attribute_name if attribute_name else func.__name__
setattr(cls, name, modifier(wrapped))
return func
return _
@classmethod
def register_plugin_entry_point(cls, entry_point, modifier=identity):
if hasattr(cls, entry_point.name):
raise ValueError(
f"Can't add {entry_point.name} from {entry_point.module_name} "
f"to {cls.__name__}: duplicate method name."
)
def stub(*args, **kwargs):
""" Entrypoints-based streamz plugin. Will be loaded on first call. """
node = entry_point.load()
if not issubclass(node, Stream):
raise TypeError(
f"Error loading {entry_point.name} "
f"from module {entry_point.module_name}: "
f"{node.__class__.__name__} must be a subclass of Stream"
)
if getattr(cls, entry_point.name).__name__ == "stub":
cls.register_api(
modifier=modifier, attribute_name=entry_point.name
)(node)
return node(*args, **kwargs)
cls.register_api(modifier=modifier, attribute_name=entry_point.name)(stub)
def start(self):
""" Start any upstream sources """
for upstream in self.upstreams:
upstream.start()
def __str__(self):
s_list = []
if self.name:
s_list.append('{}; {}'.format(self.name, self.__class__.__name__))
else:
s_list.append(self.__class__.__name__)
for m in self.str_list:
s = ''
at = getattr(self, m, None)
if at:
if not callable(at):
s = str(at)
elif hasattr(at, '__name__'):
s = getattr(self, m).__name__
elif hasattr(at.__class__, '__name__'):
s = getattr(self, m).__class__.__name__
else:
s = None
if s:
s_list.append('{}={}'.format(m, s))
if len(s_list) <= 2:
s_list = [term.split('=')[-1] for term in s_list]
text = "<"
text += s_list[0]
if len(s_list) > 1:
text += ': '
text += ', '.join(s_list[1:])
text += '>'
return text
__repr__ = __str__
def _ipython_display_(self, **kwargs): # pragma: no cover
try:
from ipywidgets import Output
from IPython.core.interactiveshell import InteractiveShell
except ImportError:
if hasattr(self, '_repr_html_'):
return self._repr_html_()
else:
return self.__repr__()
output = Output(_view_count=0)
output_ref = weakref.ref(output)
def update_cell(val):
output = output_ref()
if output is None:
return
with output:
content, *_ = InteractiveShell.instance().display_formatter.format(val)
output.outputs = ({'output_type': 'display_data',
'data': content,
'metadata': {}},)
s = self.map(update_cell)
_html_update_streams.add(s)
self.output_ref = output_ref
s_ref = weakref.ref(s)
def remove_stream(change):
output = output_ref()
if output is None:
return
if output._view_count == 0:
ss = s_ref()
ss.destroy()
_html_update_streams.remove(ss) # trigger gc
output.observe(remove_stream, '_view_count')
return output._ipython_display_(**kwargs)
def _emit(self, x, metadata=None):
"""
Push data into the stream at this point
Parameters
----------
x: any
an element of data
metadata: list[dict], optional
Various types of metadata associated with the data element in `x`.
ref: RefCounter
A reference counter used to check when data is done
"""
if metadata:
self._retain_refs(metadata, len(self.downstreams))
else:
metadata = []
result = []
for downstream in list(self.downstreams):
r = downstream.update(x, who=self, metadata=metadata)
if type(r) is list:
result.extend(r)
else:
result.append(r)
self._release_refs(metadata)
return [element for element in result if element is not None]
def emit(self, x, asynchronous=False, metadata=None):
""" Push data into the stream at this point
This is typically done only at source Streams but can theoretically be
done at any point
Parameters
----------
x: any
an element of data
asynchronous:
emit asynchronously
metadata: list[dict], optional
Various types of metadata associated with the data element in `x`.
ref: RefCounter
A reference counter used to check when data is done
"""
ts_async = getattr(thread_state, 'asynchronous', False)
if self.loop is None or asynchronous or self.asynchronous or ts_async:
if not ts_async:
thread_state.asynchronous = True
try:
result = self._emit(x, metadata=metadata)
if self.loop:
return gen.convert_yielded(result)
finally:
thread_state.asynchronous = ts_async
else:
@gen.coroutine
def _():
thread_state.asynchronous = True
try:
result = yield self._emit(x, metadata=metadata)
finally:
del thread_state.asynchronous
raise gen.Return(result)
sync(self.loop, _)
def update(self, x, who=None, metadata=None):
return self._emit(x, metadata=metadata)
def gather(self):
""" This is a no-op for core streamz
This allows gather to be used in both dask and core streams
"""
return self
def connect(self, downstream):
""" Connect this stream to a downstream element.
Parameters
----------
downstream: Stream
The downstream stream to connect to
"""
self._add_downstream(downstream)
downstream._add_upstream(self)
def disconnect(self, downstream):
""" Disconnect this stream to a downstream element.
Parameters
----------
downstream: Stream
The downstream stream to disconnect from
"""
self._remove_downstream(downstream)
downstream._remove_upstream(self)
@property
def upstream(self):
if len(self.upstreams) != 1:
raise ValueError("Stream has multiple upstreams")
else:
return self.upstreams[0]
def destroy(self, streams=None):
"""
Disconnect this stream from any upstream sources
"""
if streams is None:
streams = self.upstreams
for upstream in list(streams):
upstream.downstreams.remove(self)
self.upstreams.remove(upstream)
def scatter(self, **kwargs):
from .dask import scatter
return scatter(self, **kwargs)
def remove(self, predicate):
""" Only pass through elements for which the predicate returns False """
return self.filter(lambda x: not predicate(x))
@property
def scan(self):
return self.accumulate
@property
def concat(self):
return self.flatten
def sink_to_list(self):
""" Append all elements of a stream to a list as they come in
Examples
--------
>>> source = Stream()
>>> L = source.map(lambda x: 10 * x).sink_to_list()
>>> for i in range(5):
... source.emit(i)
>>> L
[0, 10, 20, 30, 40]
"""
L = []
self.sink(L.append)
return L
def frequencies(self, **kwargs):
""" Count occurrences of elements """
def update_frequencies(last, x):
return toolz.assoc(last, x, last.get(x, 0) + 1)
return self.scan(update_frequencies, start={}, **kwargs)
def visualize(self, filename='mystream.png', **kwargs):
"""Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` and ``networkx`` to be installed.
Parameters
----------
filename : str, optional
The name of the file to write to disk.
kwargs:
Graph attributes to pass to graphviz like ``rankdir="LR"``
"""
from .graph import visualize
return visualize(self, filename, **kwargs)
def to_dataframe(self, example):
""" Convert a stream of Pandas dataframes to a DataFrame
Examples
--------
>>> source = Stream()
>>> sdf = source.to_dataframe()
>>> L = sdf.groupby(sdf.x).y.mean().stream.sink_to_list()
>>> source.emit(pd.DataFrame(...)) # doctest: +SKIP
>>> source.emit(pd.DataFrame(...)) # doctest: +SKIP
>>> source.emit(pd.DataFrame(...)) # doctest: +SKIP
"""
from .dataframe import DataFrame
return DataFrame(stream=self, example=example)
def to_batch(self, **kwargs):
""" Convert a stream of lists to a Batch
All elements of the stream are assumed to be lists or tuples
Examples
--------
>>> source = Stream()
>>> batches = source.to_batch()
>>> L = batches.pluck('value').map(inc).sum().stream.sink_to_list()
>>> source.emit([{'name': 'Alice', 'value': 1},
... {'name': 'Bob', 'value': 2},
... {'name': 'Charlie', 'value': 3}])
>>> source.emit([{'name': 'Alice', 'value': 4},
... {'name': 'Bob', 'value': 5},
... {'name': 'Charlie', 'value': 6}])
"""
from .batch import Batch
return Batch(stream=self, **kwargs)
def _retain_refs(self, metadata, n=1):
""" Retain all references in the provided metadata `n` number of times
Parameters
----------
metadata: list[dict], optional
Various types of metadata associated with the data element in `x`.
ref: RefCounter
A reference counter used to check when data is done
n: The number of times to retain the provided references
"""
for m in metadata:
if 'ref' in m:
m['ref'].retain(n)
def _release_refs(self, metadata, n=1):
""" Release all references in the provided metadata `n` number of times
Parameters
----------
metadata: list[dict], optional
Various types of metadata associated with the data element in `x`.
ref: RefCounter
A reference counter used to check when data is done
n: The number of times to retain the provided references
"""
for m in metadata:
if 'ref' in m:
m['ref'].release(n)
@Stream.register_api()
class sink(Stream):
""" Apply a function on every element
Examples
--------
>>> source = Stream()
>>> L = list()
>>> source.sink(L.append)
>>> source.sink(print)
>>> source.sink(print)
>>> source.emit(123)
123
123
>>> L
[123]
See Also
--------
map
Stream.sink_to_list
"""
_graphviz_shape = 'trapezium'
def __init__(self, upstream, func, *args, **kwargs):
self.func = func
# take the stream specific kwargs out
stream_name = kwargs.pop("stream_name", None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
_global_sinks.add(self)
def update(self, x, who=None, metadata=None):
result = self.func(x, *self.args, **self.kwargs)
if gen.isawaitable(result):
return result
else:
return []
@Stream.register_api()
class map(Stream):
""" Apply a function to every element in the stream
Parameters
----------
func: callable
*args :
The arguments to pass to the function.
**kwargs:
Keyword arguments to pass to func
Examples
--------
>>> source = Stream()
>>> source.map(lambda x: 2*x).sink(print)
>>> for i in range(5):
... source.emit(i)
0
2
4
6
8
"""
def __init__(self, upstream, func, *args, **kwargs):
self.func = func
# this is one of a few stream specific kwargs
stream_name = kwargs.pop('stream_name', None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
try:
result = self.func(x, *self.args, **self.kwargs)
except Exception as e:
logger.exception(e)
raise
else:
return self._emit(result, metadata=metadata)
@Stream.register_api()
class starmap(Stream):
""" Apply a function to every element in the stream, splayed out
See ``itertools.starmap``
Parameters
----------
func: callable
*args :
The arguments to pass to the function.
**kwargs:
Keyword arguments to pass to func
Examples
--------
>>> source = Stream()
>>> source.starmap(lambda a, b: a + b).sink(print)
>>> for i in range(5):
... source.emit((i, i))
0
2
4
6
8
"""
def __init__(self, upstream, func, *args, **kwargs):
self.func = func
# this is one of a few stream specific kwargs
stream_name = kwargs.pop('stream_name', None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
y = x + self.args
try:
result = self.func(*y, **self.kwargs)
except Exception as e:
logger.exception(e)
raise
else:
return self._emit(result, metadata=metadata)
def _truthy(x):
return not not x
@Stream.register_api()
class filter(Stream):
""" Only pass through elements that satisfy the predicate
Parameters
----------
predicate : function
The predicate. Should return True or False, where
True means that the predicate is satisfied.
*args :
The arguments to pass to the predicate.
**kwargs:
Keyword arguments to pass to predicate
Examples
--------
>>> source = Stream()
>>> source.filter(lambda x: x % 2 == 0).sink(print)
>>> for i in range(5):
... source.emit(i)
0
2
4
"""
def __init__(self, upstream, predicate, *args, **kwargs):
if predicate is None:
predicate = _truthy
self.predicate = predicate
stream_name = kwargs.pop("stream_name", None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
if self.predicate(x, *self.args, **self.kwargs):
return self._emit(x, metadata=metadata)
@Stream.register_api()
class accumulate(Stream):
""" Accumulate results with previous state
This performs running or cumulative reductions, applying the function
to the previous total and the new element. The function should take
two arguments, the previous accumulated state and the next element and
it should return a new accumulated state,
- ``state = func(previous_state, new_value)`` (returns_state=False)
- ``state, result = func(previous_state, new_value)`` (returns_state=True)
where the new_state is passed to the next invocation. The state or result
is emitted downstream for the two cases.
Parameters
----------
func: callable
start: object
Initial value, passed as the value of ``previous_state`` on the first
invocation. Defaults to the first submitted element
returns_state: boolean
If true then func should return both the state and the value to emit
If false then both values are the same, and func returns one value
**kwargs:
Keyword arguments to pass to func
Examples
--------
A running total, producing triangular numbers
>>> source = Stream()
>>> source.accumulate(lambda acc, x: acc + x).sink(print)
>>> for i in range(5):
... source.emit(i)
0
1
3
6
10
A count of number of events (including the current one)
>>> source = Stream()
>>> source.accumulate(lambda acc, x: acc + 1, start=0).sink(print)
>>> for _ in range(5):
... source.emit(0)
1
2
3
4
5
Like the builtin "enumerate".
>>> source = Stream()
>>> source.accumulate(lambda acc, x: ((acc[0] + 1, x), (acc[0], x)),
... start=(0, 0), returns_state=True
... ).sink(print)
>>> for i in range(3):
... source.emit(0)
(0, 0)
(1, 0)
(2, 0)
"""
_graphviz_shape = 'box'
def __init__(self, upstream, func, start=no_default, returns_state=False,
**kwargs):
self.func = func
self.kwargs = kwargs
self.state = start
self.returns_state = returns_state
# this is one of a few stream specific kwargs
stream_name = kwargs.pop('stream_name', None)
self.with_state = kwargs.pop('with_state', False)
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
if self.state is no_default:
self.state = x
if self.with_state:
return self._emit((self.state, x), metadata=metadata)
else:
return self._emit(x, metadata=metadata)
else:
try:
result = self.func(self.state, x, **self.kwargs)
except Exception as e:
logger.exception(e)
raise
if self.returns_state:
state, result = result
else:
state = result
self.state = state
if self.with_state:
return self._emit((self.state, result), metadata=metadata)
else:
return self._emit(result, metadata=metadata)
@Stream.register_api()
class slice(Stream):
"""
Get only some events in a stream by position. Works like list[] syntax.
Parameters
----------
start : int
First event to use. If None, start from the beginnning
end : int
Last event to use (non-inclusive). If None, continue without stopping.
Does not support negative indexing.
step : int
Pass on every Nth event. If None, pass every one.
Examples
--------
>>> source = Stream()
>>> source.slice(2, 6, 2).sink(print)
>>> for i in range(5):
... source.emit(0)
2
4
"""
def __init__(self, upstream, start=None, end=None, step=None, **kwargs):
self.state = 0
self.star = start or 0
self.end = end
self.step = step or 1
if any((_ or 0) < 0 for _ in [start, end, step]):
raise ValueError("Negative indices not supported by slice")
stream_name = kwargs.pop('stream_name', None)
Stream.__init__(self, upstream, stream_name=stream_name)
self._check_end()
def update(self, x, who=None, metadata=None):
if self.state >= self.star and self.state % self.step == 0:
self.emit(x, metadata=metadata)
self.state += 1
self._check_end()
def _check_end(self):
if self.end and self.state >= self.end:
# we're done
for upstream in self.upstreams:
upstream._remove_downstream(self)
@Stream.register_api()
class partition(Stream):
""" Partition stream into tuples of equal size
Parameters
----------
n: int
Maximum partition size
timeout: int or float, optional
Number of seconds after which a partition will be emitted,
even if its size is less than ``n``. If ``None`` (default),
a partition will be emitted only when its size reaches ``n``.
key: hashable or callable, optional
Emit items with the same key together as a separate partition.
If ``key`` is callable, partition will be identified by ``key(x)``,
otherwise by ``x[key]``. Defaults to ``None``.
Examples
--------
>>> source = Stream()
>>> source.partition(3).sink(print)
>>> for i in range(10):
... source.emit(i)
(0, 1, 2)
(3, 4, 5)
(6, 7, 8)
>>> source = Stream()
>>> source.partition(2, key=lambda x: x % 2).sink(print)
>>> for i in range(4):
... source.emit(i)
(0, 2)
(1, 3)
>>> from time import sleep
>>> source = Stream()
>>> source.partition(5, timeout=1).sink(print)
>>> for i in range(3):
... source.emit(i)
>>> sleep(1)
(0, 1, 2)
"""
_graphviz_shape = 'diamond'
def __init__(self, upstream, n, timeout=None, key=None, **kwargs):
self.n = n
self._timeout = timeout
self._key = key
self._buffer = defaultdict(lambda: [])
self._metadata_buffer = defaultdict(lambda: [])
self._callbacks = {}
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
def _get_key(self, x):
if self._key is None:
return None
if callable(self._key):
return self._key(x)
return x[self._key]
@gen.coroutine
def _flush(self, key):
result, self._buffer[key] = self._buffer[key], []
metadata_result, self._metadata_buffer[key] = self._metadata_buffer[key], []
yield self._emit(tuple(result), list(metadata_result))
self._release_refs(metadata_result)
@gen.coroutine
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
key = self._get_key(x)
buffer = self._buffer[key]
metadata_buffer = self._metadata_buffer[key]
buffer.append(x)
if isinstance(metadata, list):
metadata_buffer.extend(metadata)
else:
metadata_buffer.append(metadata)
if len(buffer) == self.n:
if self._timeout is not None and self.n > 1:
self._callbacks[key].cancel()
yield self._flush(key)
return
if len(buffer) == 1 and self._timeout is not None:
self._callbacks[key] = self.loop.call_later(
self._timeout, self._flush, key
)
@Stream.register_api()
class sliding_window(Stream):
""" Produce overlapping tuples of size n
Parameters
----------
return_partial : bool
If True, yield tuples as soon as any events come in, each tuple being
smaller or equal to the window size. If False, only start yielding
tuples once a full window has accrued.
Examples
--------
>>> source = Stream()
>>> source.sliding_window(3, return_partial=False).sink(print)
>>> for i in range(8):
... source.emit(i)
(0, 1, 2)
(1, 2, 3)
(2, 3, 4)
(3, 4, 5)
(4, 5, 6)
(5, 6, 7)
"""
_graphviz_shape = 'diamond'
def __init__(self, upstream, n, return_partial=True, **kwargs):
self.n = n
self._buffer = deque(maxlen=n)
self.metadata_buffer = deque(maxlen=n)
self.partial = return_partial
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
self._buffer.append(x)
if not isinstance(metadata, list):
metadata = [metadata]
self.metadata_buffer.append(metadata)
if self.partial or len(self._buffer) == self.n:
flat_metadata = [m for ml in self.metadata_buffer for m in ml]
ret = self._emit(tuple(self._buffer), flat_metadata)
if len(self.metadata_buffer) == self.n:
completed = self.metadata_buffer.popleft()
self._release_refs(completed)
return ret
else:
return []
def convert_interval(interval):
if isinstance(interval, str):
import pandas as pd
interval = pd.Timedelta(interval).total_seconds()
return interval
@Stream.register_api()
class timed_window(Stream):
""" Emit a tuple of collected results every interval
Every ``interval`` seconds this emits a tuple of all of the results
seen so far. This can help to batch data coming off of a high-volume
stream.
"""
_graphviz_shape = 'octagon'
def __init__(self, upstream, interval, **kwargs):
self.interval = convert_interval(interval)
self._buffer = []
self.metadata_buffer = []
self.last = gen.moment
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.loop.add_callback(self.cb)
def update(self, x, who=None, metadata=None):
self._buffer.append(x)
self._retain_refs(metadata)
self.metadata_buffer.append(metadata)
return self.last
@gen.coroutine
def cb(self):
while True:
L, self._buffer = self._buffer, []
metadata, self.metadata_buffer = self.metadata_buffer, []
m = [m for ml in metadata for m in ml]
self.last = self._emit(L, m)
self._release_refs(m)
yield self.last
yield gen.sleep(self.interval)
@Stream.register_api()
class delay(Stream):
""" Add a time delay to results """
_graphviz_shape = 'octagon'
def __init__(self, upstream, interval, **kwargs):
self.interval = convert_interval(interval)
self.queue = Queue()
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.loop.add_callback(self.cb)
@gen.coroutine
def cb(self):
while True:
last = time()
x, metadata = yield self.queue.get()
yield self._emit(x, metadata=metadata)
self._release_refs(metadata)
duration = self.interval - (time() - last)
if duration > 0:
yield gen.sleep(duration)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
return self.queue.put((x, metadata))
@Stream.register_api()
class rate_limit(Stream):
""" Limit the flow of data
This stops two elements of streaming through in an interval shorter
than the provided value.
Parameters
----------
interval: float
Time in seconds
"""
_graphviz_shape = 'octagon'
def __init__(self, upstream, interval, **kwargs):
self.interval = convert_interval(interval)
self.next = 0
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
@gen.coroutine
def update(self, x, who=None, metadata=None):
now = time()
old_next = self.next
self.next = max(now, self.next) + self.interval
if now < old_next:
yield gen.sleep(old_next - now)
yield self._emit(x, metadata=metadata)
@Stream.register_api()
class buffer(Stream):
""" Allow results to pile up at this point in the stream
This allows results to buffer in place at various points in the stream.
This can help to smooth flow through the system when backpressure is
applied.
"""
_graphviz_shape = 'diamond'
def __init__(self, upstream, n, **kwargs):
self.queue = Queue(maxsize=n)
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.loop.add_callback(self.cb)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
return self.queue.put((x, metadata))
@gen.coroutine
def cb(self):
while True:
x, metadata = yield self.queue.get()
yield self._emit(x, metadata=metadata)
self._release_refs(metadata)
@Stream.register_api()
class zip(Stream):
""" Combine streams together into a stream of tuples
We emit a new tuple once all streams have produce a new tuple.
See also
--------
combine_latest
zip_latest
"""
_graphviz_orientation = 270
_graphviz_shape = 'triangle'
def __init__(self, *upstreams, **kwargs):
self.maxsize = kwargs.pop('maxsize', 10)
self.condition = Condition()
self.literals = [(i, val) for i, val in enumerate(upstreams)
if not isinstance(val, Stream)]
self.buffers = {upstream: deque()
for upstream in upstreams
if isinstance(upstream, Stream)}
upstreams2 = [upstream for upstream in upstreams if isinstance(upstream, Stream)]
Stream.__init__(self, upstreams=upstreams2, **kwargs)
def _add_upstream(self, upstream):
# Override method to handle setup of buffer for new stream
self.buffers[upstream] = deque()
super(zip, self)._add_upstream(upstream)
def _remove_upstream(self, upstream):
# Override method to handle removal of buffer for stream
self.buffers.pop(upstream)
super(zip, self)._remove_upstream(upstream)
def pack_literals(self, tup):
""" Fill buffers for literals whenever we empty them """
inp = list(tup)[::-1]
out = []
for i, val in self.literals:
while len(out) < i:
out.append(inp.pop())
out.append(val)
while inp:
out.append(inp.pop())
return tuple(out)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
L = self.buffers[who] # get buffer for stream
L.append((x, metadata))
if len(L) == 1 and all(self.buffers.values()):
vals = [self.buffers[up][0] for up in self.upstreams]
tup, md = __builtins__['zip'](*vals)
for buf in self.buffers.values():
buf.popleft()
self.condition.notify_all()
if self.literals:
tup = self.pack_literals(tup)
md = [m for ml in md for m in ml]
ret = self._emit(tup, md)
self._release_refs(md)
return ret
elif len(L) > self.maxsize:
return self.condition.wait()
@Stream.register_api()
class combine_latest(Stream):
""" Combine multiple streams together to a stream of tuples
This will emit a new tuple of all of the most recent elements seen from
any stream.
Parameters
----------
emit_on : stream or list of streams or None
only emit upon update of the streams listed.
If None, emit on update from any stream
See Also
--------
zip
"""
_graphviz_orientation = 270
_graphviz_shape = 'triangle'
def __init__(self, *upstreams, **kwargs):
emit_on = kwargs.pop('emit_on', None)
self._initial_emit_on = emit_on
self.last = [None for _ in upstreams]
self.metadata = [None for _ in upstreams]
self.missing = set(upstreams)
if emit_on is not None:
if not isinstance(emit_on, Iterable):
emit_on = (emit_on, )
emit_on = tuple(
upstreams[x] if isinstance(x, int) else x for x in emit_on)
self.emit_on = emit_on
else:
self.emit_on = upstreams
Stream.__init__(self, upstreams=upstreams, **kwargs)
def _add_upstream(self, upstream):
# Override method to handle setup of last and missing for new stream
self.last.append(None)
self.metadata.append(None)
self.missing.update([upstream])
super(combine_latest, self)._add_upstream(upstream)
if self._initial_emit_on is None:
self.emit_on = self.upstreams
def _remove_upstream(self, upstream):
# Override method to handle removal of last and missing for stream
if self.emit_on == upstream:
raise RuntimeError("Can't remove the ``emit_on`` stream since that"
"would cause no data to be emitted. "
"Consider adding an ``emit_on`` first by "
"running ``node.emit_on=(upstream,)`` to add "
"a new ``emit_on`` or running "
"``node.emit_on=tuple(node.upstreams)`` to "
"emit on all incoming data")
self.last.pop(self.upstreams.index(upstream))
self.metadata.pop(self.upstreams.index(upstream))
self.missing.remove(upstream)
super(combine_latest, self)._remove_upstream(upstream)
if self._initial_emit_on is None:
self.emit_on = self.upstreams
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
idx = self.upstreams.index(who)
if self.metadata[idx]:
self._release_refs(self.metadata[idx])
self.metadata[idx] = metadata
if self.missing and who in self.missing:
self.missing.remove(who)
self.last[idx] = x
if not self.missing and who in self.emit_on:
tup = tuple(self.last)
md = [m for ml in self.metadata for m in ml]
return self._emit(tup, md)
@Stream.register_api()
class flatten(Stream):
""" Flatten streams of lists or iterables into a stream of elements
Examples
--------
>>> source = Stream()
>>> source.flatten().sink(print)
>>> for x in [[1, 2, 3], [4, 5], [6, 7, 7]]:
... source.emit(x)
1
2
3
4
5
6
7
See Also
--------
partition
"""
def update(self, x, who=None, metadata=None):
L = []
for i, item in enumerate(x):
if i == len(x) - 1:
y = self._emit(item, metadata=metadata)
else:
y = self._emit(item)
if type(y) is list:
L.extend(y)
else:
L.append(y)
return L
@Stream.register_api()
class unique(Stream):
""" Avoid sending through repeated elements
This deduplicates a stream so that only new elements pass through.
You can control how much of a history is stored with the ``maxsize=``
parameter. For example setting ``maxsize=1`` avoids sending through
elements when one is repeated right after the other.
Parameters
----------
maxsize: int or None, optional
number of stored unique values to check against
key : function, optional
Function which returns a representation of the incoming data.
For example ``key=lambda x: x['a']`` could be used to allow only
pieces of data with unique ``'a'`` values to pass through.
hashable : bool, optional
If True then data is assumed to be hashable, else it is not. This is
used for determining how to cache the history, if hashable then
either dicts or LRU caches are used, otherwise a deque is used.
Defaults to True.
Examples
--------
>>> source = Stream()
>>> source.unique(maxsize=1).sink(print)
>>> for x in [1, 1, 2, 2, 2, 1, 3]:
... source.emit(x)
1
2
1
3
"""
def __init__(self, upstream, maxsize=None, key=identity, hashable=True,
**kwargs):
self.key = key
self.maxsize = maxsize
if hashable:
self.seen = dict()
if self.maxsize:
from zict import LRU
self.seen = LRU(self.maxsize, self.seen)
else:
self.seen = []
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None, metadata=None):
y = self.key(x)
emit = True
if isinstance(self.seen, list):
if y in self.seen:
self.seen.remove(y)
emit = False
self.seen.insert(0, y)
if self.maxsize:
del self.seen[self.maxsize:]
if emit:
return self._emit(x, metadata=metadata)
else:
if self.seen.get(y, '~~not_seen~~') == '~~not_seen~~':
self.seen[y] = 1
return self._emit(x, metadata=metadata)
@Stream.register_api()
class union(Stream):
""" Combine multiple streams into one
Every element from any of the upstreams streams will immediately flow
into the output stream. They will not be combined with elements from
other streams.
See also
--------
Stream.zip
Stream.combine_latest
"""
def __init__(self, *upstreams, **kwargs):
super(union, self).__init__(upstreams=upstreams, **kwargs)
def update(self, x, who=None, metadata=None):
return self._emit(x, metadata=metadata)
@Stream.register_api()
class pluck(Stream):
""" Select elements from elements in the stream.
Parameters
----------
pluck : object, list
The element(s) to pick from the incoming element in the stream
If an instance of list, will pick multiple elements.
Examples
--------
>>> source = Stream()
>>> source.pluck([0, 3]).sink(print)
>>> for x in [[1, 2, 3, 4], [4, 5, 6, 7], [8, 9, 10, 11]]:
... source.emit(x)
(1, 4)
(4, 7)
(8, 11)
>>> source = Stream()
>>> source.pluck('name').sink(print)
>>> for x in [{'name': 'Alice', 'x': 123}, {'name': 'Bob', 'x': 456}]:
... source.emit(x)
'Alice'
'Bob'
"""
def __init__(self, upstream, pick, **kwargs):
self.pick = pick
super(pluck, self).__init__(upstream, **kwargs)
def update(self, x, who=None, metadata=None):
if isinstance(self.pick, list):
return self._emit(tuple([x[ind] for ind in self.pick]),
metadata=metadata)
else:
return self._emit(x[self.pick], metadata=metadata)
@Stream.register_api()
class collect(Stream):
"""
Hold elements in a cache and emit them as a collection when flushed.
Examples
--------
>>> source1 = Stream()
>>> source2 = Stream()
>>> collector = collect(source1)
>>> collector.sink(print)
>>> source2.sink(collector.flush)
>>> source1.emit(1)
>>> source1.emit(2)
>>> source2.emit('anything') # flushes collector
...
[1, 2]
"""
def __init__(self, upstream, cache=None, metadata_cache=None, **kwargs):
if cache is None:
cache = deque()
self.cache = cache
if metadata_cache is None:
metadata_cache = deque()
self.metadata_cache = metadata_cache
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
self.cache.append(x)
if metadata:
if isinstance(metadata, list):
self.metadata_cache.extend(metadata)
else:
self.metadata_cache.append(metadata)
def flush(self, _=None):
out = tuple(self.cache)
metadata = list(self.metadata_cache)
self._emit(out, metadata)
self._release_refs(metadata)
self.cache.clear()
self.metadata_cache.clear()
@Stream.register_api()
class zip_latest(Stream):
"""Combine multiple streams together to a stream of tuples
The stream which this is called from is lossless. All elements from
the lossless stream are emitted reguardless of when they came in.
This will emit a new tuple consisting of an element from the lossless
stream paired with the latest elements from the other streams.
Elements are only emitted when an element on the lossless stream are
received, similar to ``combine_latest`` with the ``emit_on`` flag.
See Also
--------
Stream.combine_latest
Stream.zip
"""
def __init__(self, lossless, *upstreams, **kwargs):
upstreams = (lossless,) + upstreams
self.last = [None for _ in upstreams]
self.metadata = [None for _ in upstreams]
self.missing = set(upstreams)
self.lossless = lossless
self.lossless_buffer = deque()
Stream.__init__(self, upstreams=upstreams, **kwargs)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
idx = self.upstreams.index(who)
if who is self.lossless:
self.lossless_buffer.append((x, metadata))
elif self.metadata[idx]:
self._release_refs(self.metadata[idx])
self.metadata[idx] = metadata
self.last[idx] = x
if self.missing and who in self.missing:
self.missing.remove(who)
if not self.missing:
L = []
while self.lossless_buffer:
self.last[0], self.metadata[0] = self.lossless_buffer.popleft()
md = [m for ml in self.metadata for m in ml]
L.append(self._emit(tuple(self.last), md))
self._release_refs(self.metadata[0])
return L
@Stream.register_api()
class latest(Stream):
""" Drop held-up data and emit the latest result
This allows you to skip intermediate elements in the stream if there is
some back pressure causing a slowdown. Use this when you only care about
the latest elements, and are willing to lose older data.
This passes through values without modification otherwise.
Examples
--------
>>> source.map(f).latest().map(g) # doctest: +SKIP
"""
_graphviz_shape = 'octagon'
def __init__(self, upstream, **kwargs):
self.condition = Condition()
self.next = []
self.next_metadata = None
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.loop.add_callback(self.cb)
def update(self, x, who=None, metadata=None):
if self.next_metadata:
self._release_refs(self.next_metadata)
self._retain_refs(metadata)
self.next = [x]
self.next_metadata = metadata
self.loop.add_callback(self.condition.notify)
@gen.coroutine
def cb(self):
while True:
yield self.condition.wait()
[x] = self.next
yield self._emit(x, self.next_metadata)
@Stream.register_api()
class to_kafka(Stream):
""" Writes data in the stream to Kafka
This stream accepts a string or bytes object. Call ``flush`` to ensure all
messages are pushed. Responses from Kafka are pushed downstream.
Parameters
----------
topic : string
The topic which to write
producer_config : dict
Settings to set up the stream, see
https://docs.confluent.io/current/clients/confluent-kafka-python/#configuration
https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
Examples:
bootstrap.servers: Connection string (host:port) to Kafka
Examples
--------
>>> from streamz import Stream
>>> ARGS = {'bootstrap.servers': 'localhost:9092'}
>>> source = Stream()
>>> kafka = source.map(lambda x: str(x)).to_kafka('test', ARGS)
<to_kafka>
>>> for i in range(10):
... source.emit(i)
>>> kafka.flush()
"""
def __init__(self, upstream, topic, producer_config, **kwargs):
import confluent_kafka as ck
self.topic = topic
self.producer = ck.Producer(producer_config)
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.stopped = False
self.polltime = 0.2
self.loop.add_callback(self.poll)
self.futures = []
@gen.coroutine
def poll(self):
while not self.stopped:
# executes callbacks for any delivered data, in this thread
# if no messages were sent, nothing happens
self.producer.poll(0)
yield gen.sleep(self.polltime)
def update(self, x, who=None, metadata=None):
future = gen.Future()
self.futures.append(future)
@gen.coroutine
def _():
while True:
try:
# this runs asynchronously, in C-K's thread
self.producer.produce(self.topic, x, callback=self.cb)
return
except BufferError:
yield gen.sleep(self.polltime)
except Exception as e:
future.set_exception(e)
return
self.loop.add_callback(_)
return future
@gen.coroutine
def cb(self, err, msg):
future = self.futures.pop(0)
if msg is not None and msg.value() is not None:
future.set_result(None)
yield self._emit(msg.value())
else:
future.set_exception(err or msg.error())
def flush(self, timeout=-1):
self.producer.flush(timeout)
def sync(loop, func, *args, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
# This was taken from distrbuted/utils.py
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and ((isinstance(loop, PollIOLoop) and getattr(loop, '_closing', False))
or (hasattr(loop, 'asyncio_loop') and loop.asyncio_loop._closed)):
raise RuntimeError("IOLoop is closed")
timeout = kwargs.pop('callback_timeout', None)
e = threading.Event()
main_tid = get_thread_identity()
result = [None]
error = [False]
@gen.coroutine
def f():
try:
if main_tid == get_thread_identity():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
thread_state.asynchronous = True
future = func(*args, **kwargs)
if timeout is not None:
future = gen.with_timeout(timedelta(seconds=timeout), future)
result[0] = yield future
except Exception:
error[0] = sys.exc_info()
finally:
thread_state.asynchronous = False
e.set()
loop.add_callback(f)
if timeout is not None:
if not e.wait(timeout):
raise gen.TimeoutError("timed out after %s s." % (timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
six.reraise(*error[0])
else:
return result[0]
|
Mid Century Post and Beam with stunning jetliner views of the canyon & beyond! Main house features a fireplace and bar, 5 bedrooms, 3 bathrooms, oversized deck with amazing views and privacy! Attached 2 car direct entry garage! Situated on over 2 acres! Recently subdivided. APN number to be provided prior to close of escrow.
25256 Piuma Rd is a residential property located in Calabasas, CA. This property sits on a 108900 acre lot, is 2492 sqft, with 5 bedrooms, 3 baths.
|
import time
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, Deferred
from vumi.blinkenlights import metrics
from vumi.tests.utils import get_stubbed_channel
from vumi.message import Message
from vumi.service import Worker
from vumi.tests.helpers import VumiTestCase, WorkerHelper
class TestMetricPublisher(VumiTestCase):
def setUp(self):
self.worker_helper = self.add_helper(WorkerHelper())
@inlineCallbacks
def start_publisher(self, publisher):
channel = yield get_stubbed_channel(self.worker_helper.broker)
publisher.start(channel)
def _sleep(self, delay):
d = Deferred()
reactor.callLater(delay, lambda: d.callback(None))
return d
def _check_msg(self, prefix, metric, values):
msgs = self.worker_helper.get_dispatched_metrics()
if values is None:
self.assertEqual(msgs, [])
return
[datapoint] = msgs[-1]
self.assertEqual(datapoint[0], prefix + metric.name)
self.assertEqual(datapoint[1], list(metric.aggs))
# check datapoints within 2s of now -- the truncating of
# time.time() to an int for timestamps can cause a 1s
# difference by itself
now = time.time()
self.assertTrue(all(abs(p[0] - now) < 2.0
for p in datapoint[2]),
"Not all datapoints near now (%f): %r"
% (now, datapoint))
self.assertEqual([dp[1] for dp in datapoint[2]], values)
@inlineCallbacks
def test_publish_single_metric(self):
publisher = metrics.MetricPublisher()
yield self.start_publisher(publisher)
msg = metrics.MetricMessage()
cnt = metrics.Count("my.count")
msg.append(
("vumi.test.%s" % (cnt.name,), cnt.aggs, [(time.time(), 1)]))
publisher.publish_message(msg)
self._check_msg("vumi.test.", cnt, [1])
def test_publisher_provides_interface(self):
publisher = metrics.MetricPublisher()
self.assertTrue(metrics.IMetricPublisher.providedBy(publisher))
class TestMetricManager(VumiTestCase):
def setUp(self):
self._next_publish = Deferred()
self.add_cleanup(lambda: self._next_publish.callback(None))
self.worker_helper = self.add_helper(WorkerHelper())
def on_publish(self, mm):
d, self._next_publish = self._next_publish, Deferred()
d.callback(mm)
def wait_publish(self):
return self._next_publish
@inlineCallbacks
def start_manager_as_publisher(self, manager):
channel = yield get_stubbed_channel(self.worker_helper.broker)
manager.start(channel)
self.add_cleanup(manager.stop)
def _sleep(self, delay):
d = Deferred()
reactor.callLater(delay, lambda: d.callback(None))
return d
def _check_msg(self, manager, metric, values):
msgs = self.worker_helper.get_dispatched_metrics()
if values is None:
self.assertEqual(msgs, [])
return
[datapoint] = msgs[-1]
self.assertEqual(datapoint[0], manager.prefix + metric.name)
self.assertEqual(datapoint[1], list(metric.aggs))
# check datapoints within 2s of now -- the truncating of
# time.time() to an int for timestamps can cause a 1s
# difference by itself
now = time.time()
self.assertTrue(all(abs(p[0] - now) < 2.0
for p in datapoint[2]),
"Not all datapoints near now (%f): %r"
% (now, datapoint))
self.assertEqual([dp[1] for dp in datapoint[2]], values)
@inlineCallbacks
def test_start_manager_no_publisher(self):
mm = metrics.MetricManager("vumi.test.")
self.assertEqual(mm._publisher, None)
self.assertEqual(mm._task, None)
channel = yield get_stubbed_channel(self.worker_helper.broker)
mm.start(channel)
self.add_cleanup(mm.stop)
self.assertIsInstance(mm._publisher, metrics.MetricPublisher)
self.assertNotEqual(mm._task, None)
@inlineCallbacks
def test_start_manager_publisher_and_channel(self):
publisher = metrics.MetricPublisher()
mm = metrics.MetricManager("vumi.test.", publisher=publisher)
self.assertEqual(mm._publisher, publisher)
self.assertEqual(mm._task, None)
channel = yield get_stubbed_channel(self.worker_helper.broker)
self.assertRaises(RuntimeError, mm.start, channel)
def test_start_polling_no_publisher(self):
mm = metrics.MetricManager("vumi.test.")
self.assertEqual(mm._publisher, None)
self.assertEqual(mm._task, None)
mm.start_polling()
self.add_cleanup(mm.stop_polling)
self.assertEqual(mm._publisher, None)
self.assertNotEqual(mm._task, None)
def test_start_polling_with_publisher(self):
publisher = metrics.MetricPublisher()
mm = metrics.MetricManager("vumi.test.", publisher=publisher)
self.assertEqual(mm._publisher, publisher)
self.assertEqual(mm._task, None)
mm.start_polling()
self.add_cleanup(mm.stop_polling)
self.assertEqual(mm._publisher, publisher)
self.assertNotEqual(mm._task, None)
def test_oneshot(self):
self.patch(time, "time", lambda: 12345)
mm = metrics.MetricManager("vumi.test.")
cnt = metrics.Count("my.count")
mm.oneshot(cnt, 3)
self.assertEqual(cnt.name, "my.count")
self.assertEqual(mm._oneshot_msgs, [
(cnt, [(12345, 3)]),
])
def test_register(self):
mm = metrics.MetricManager("vumi.test.")
cnt = mm.register(metrics.Count("my.count"))
self.assertEqual(cnt.name, "my.count")
self.assertEqual(mm._metrics, [cnt])
def test_double_register(self):
mm = metrics.MetricManager("vumi.test.")
mm.register(metrics.Count("my.count"))
self.assertRaises(metrics.MetricRegistrationError,
mm.register, metrics.Count("my.count"))
def test_lookup(self):
mm = metrics.MetricManager("vumi.test.")
cnt = mm.register(metrics.Count("my.count"))
self.assertTrue("my.count" in mm)
self.assertTrue(mm["my.count"] is cnt)
self.assertEqual(mm["my.count"].name, "my.count")
@inlineCallbacks
def test_publish_metrics_poll(self):
mm = metrics.MetricManager("vumi.test.", 0.1, self.on_publish)
cnt = mm.register(metrics.Count("my.count"))
yield self.start_manager_as_publisher(mm)
cnt.inc()
mm.publish_metrics()
self._check_msg(mm, cnt, [1])
@inlineCallbacks
def test_publish_metrics_oneshot(self):
mm = metrics.MetricManager("vumi.test.", 0.1, self.on_publish)
cnt = metrics.Count("my.count")
yield self.start_manager_as_publisher(mm)
mm.oneshot(cnt, 1)
mm.publish_metrics()
self._check_msg(mm, cnt, [1])
@inlineCallbacks
def test_start(self):
mm = metrics.MetricManager("vumi.test.", 0.1, self.on_publish)
cnt = mm.register(metrics.Count("my.count"))
yield self.start_manager_as_publisher(mm)
self.assertTrue(mm._task is not None)
self._check_msg(mm, cnt, None)
cnt.inc()
yield self.wait_publish()
self._check_msg(mm, cnt, [1])
cnt.inc()
cnt.inc()
yield self.wait_publish()
self._check_msg(mm, cnt, [1, 1])
@inlineCallbacks
def test_publish_metrics(self):
mm = metrics.MetricManager("vumi.test.", 0.1, self.on_publish)
cnt = metrics.Count("my.count")
yield self.start_manager_as_publisher(mm)
mm.oneshot(cnt, 1)
self.assertEqual(len(mm._oneshot_msgs), 1)
mm.publish_metrics()
self.assertEqual(mm._oneshot_msgs, [])
self._check_msg(mm, cnt, [1])
def test_publish_metrics_not_started_no_publisher(self):
mm = metrics.MetricManager("vumi.test.")
self.assertEqual(mm._publisher, None)
mm.oneshot(metrics.Count("my.count"), 1)
self.assertRaises(ValueError, mm.publish_metrics)
def test_stop_unstarted(self):
mm = metrics.MetricManager("vumi.test.", 0.1, self.on_publish)
mm.stop()
mm.stop() # Check that .stop() is idempotent.
@inlineCallbacks
def test_in_worker(self):
worker = yield self.worker_helper.get_worker(Worker, {}, start=False)
mm = yield worker.start_publisher(metrics.MetricManager,
"vumi.test.", 0.1, self.on_publish)
acc = mm.register(metrics.Metric("my.acc"))
try:
self.assertTrue(mm._task is not None)
self._check_msg(mm, acc, None)
acc.set(1.5)
acc.set(1.0)
yield self.wait_publish()
self._check_msg(mm, acc, [1.5, 1.0])
finally:
mm.stop()
@inlineCallbacks
def test_task_failure(self):
channel = yield get_stubbed_channel()
mm = metrics.MetricManager("vumi.test.", 0.1)
wait_error = Deferred()
class BadMetricError(Exception):
pass
class BadMetric(metrics.Metric):
def poll(self):
wait_error.callback(None)
raise BadMetricError("bad metric")
mm.register(BadMetric("bad"))
mm.start(channel)
yield wait_error
yield self._sleep(0) # allow log message to be processed
error, = self.flushLoggedErrors(BadMetricError)
self.assertTrue(error.type is BadMetricError)
class TestAggregators(VumiTestCase):
def test_sum(self):
self.assertEqual(metrics.SUM([]), 0.0)
self.assertEqual(metrics.SUM([1.0, 2.0]), 3.0)
self.assertEqual(metrics.SUM([2.0, 1.0]), 3.0)
self.assertEqual(metrics.SUM.name, "sum")
self.assertEqual(metrics.Aggregator.from_name("sum"), metrics.SUM)
def test_avg(self):
self.assertEqual(metrics.AVG([]), 0.0)
self.assertEqual(metrics.AVG([1.0, 2.0]), 1.5)
self.assertEqual(metrics.AVG([2.0, 1.0]), 1.5)
self.assertEqual(metrics.AVG.name, "avg")
self.assertEqual(metrics.Aggregator.from_name("avg"), metrics.AVG)
def test_min(self):
self.assertEqual(metrics.MIN([]), 0.0)
self.assertEqual(metrics.MIN([1.0, 2.0]), 1.0)
self.assertEqual(metrics.MIN([2.0, 1.0]), 1.0)
self.assertEqual(metrics.MIN.name, "min")
self.assertEqual(metrics.Aggregator.from_name("min"), metrics.MIN)
def test_max(self):
self.assertEqual(metrics.MAX([]), 0.0)
self.assertEqual(metrics.MAX([1.0, 2.0]), 2.0)
self.assertEqual(metrics.MAX([2.0, 1.0]), 2.0)
self.assertEqual(metrics.MAX.name, "max")
self.assertEqual(metrics.Aggregator.from_name("max"), metrics.MAX)
def test_last(self):
self.assertEqual(metrics.LAST([]), 0.0)
self.assertEqual(metrics.LAST([1.0, 2.0]), 2.0)
self.assertEqual(metrics.LAST([2.0, 1.0]), 1.0)
self.assertEqual(metrics.LAST.name, "last")
self.assertEqual(metrics.Aggregator.from_name("last"), metrics.LAST)
def test_already_registered(self):
self.assertRaises(metrics.AggregatorAlreadyDefinedError,
metrics.Aggregator, "sum", sum)
class CheckValuesMixin(object):
def _check_poll_base(self, metric, n):
datapoints = metric.poll()
# check datapoints within 2s of now -- the truncating of
# time.time() to an int for timestamps can cause a 1s
# difference by itself
now = time.time()
self.assertTrue(all(abs(d[0] - now) <= 2.0
for d in datapoints),
"Not all datapoints near now (%f): %r"
% (now, datapoints))
self.assertTrue(all(isinstance(d[0], (int, long)) for d in datapoints))
actual_values = [dp[1] for dp in datapoints]
return actual_values
def check_poll_func(self, metric, n, test):
actual_values = self._check_poll_base(metric, n)
self.assertEqual([test(v) for v in actual_values], [True] * n)
def check_poll(self, metric, expected_values):
n = len(expected_values)
actual_values = self._check_poll_base(metric, n)
self.assertEqual(actual_values, expected_values)
class TestMetric(VumiTestCase, CheckValuesMixin):
def test_manage(self):
mm = metrics.MetricManager("vumi.test.")
metric = metrics.Metric("foo")
metric.manage(mm)
self.assertEqual(metric.name, "foo")
mm2 = metrics.MetricManager("vumi.othertest.")
self.assertRaises(metrics.MetricRegistrationError, metric.manage,
mm2)
def test_managed(self):
metric = metrics.Metric("foo")
self.assertFalse(metric.managed)
mm = metrics.MetricManager("vumi.test.")
metric.manage(mm)
self.assertTrue(metric.managed)
def test_poll(self):
metric = metrics.Metric("foo")
self.check_poll(metric, [])
metric.set(1.0)
metric.set(2.0)
self.check_poll(metric, [1.0, 2.0])
class TestCount(VumiTestCase, CheckValuesMixin):
def test_inc_and_poll(self):
metric = metrics.Count("foo")
self.check_poll(metric, [])
metric.inc()
self.check_poll(metric, [1.0])
self.check_poll(metric, [])
metric.inc()
metric.inc()
self.check_poll(metric, [1.0, 1.0])
class TestTimer(VumiTestCase, CheckValuesMixin):
def patch_time(self, starting_value):
def fake_time():
return self._fake_time
self.patch(time, 'time', fake_time)
self._fake_time = starting_value
def incr_fake_time(self, value):
self._fake_time += value
def test_start_and_stop(self):
timer = metrics.Timer("foo")
self.patch_time(12345.0)
timer.start()
self.incr_fake_time(0.1)
timer.stop()
self.check_poll_func(timer, 1, lambda x: 0.09 < x < 0.11)
self.check_poll(timer, [])
def test_already_started(self):
timer = metrics.Timer("foo")
timer.start()
self.assertRaises(metrics.TimerAlreadyStartedError, timer.start)
def test_not_started(self):
timer = metrics.Timer("foo")
self.assertRaises(metrics.TimerNotStartedError, timer.stop)
def test_stop_and_stop(self):
timer = metrics.Timer("foo")
timer.start()
timer.stop()
self.assertRaises(metrics.TimerNotStartedError, timer.stop)
def test_double_start_and_stop(self):
timer = metrics.Timer("foo")
self.patch_time(12345.0)
timer.start()
self.incr_fake_time(0.1)
timer.stop()
timer.start()
self.incr_fake_time(0.1)
timer.stop()
self.check_poll_func(timer, 2, lambda x: 0.09 < x < 0.11)
self.check_poll(timer, [])
def test_context_manager(self):
timer = metrics.Timer("foo")
self.patch_time(12345.0)
with timer:
self.incr_fake_time(0.1) # feign sleep
self.check_poll_func(timer, 1, lambda x: 0.09 < x < 0.11)
self.check_poll(timer, [])
def test_accumulate_times(self):
timer = metrics.Timer("foo")
self.patch_time(12345.0)
with timer:
self.incr_fake_time(0.1) # feign sleep
with timer:
self.incr_fake_time(0.1) # feign sleep
self.check_poll_func(timer, 2, lambda x: 0.09 < x < 0.11)
self.check_poll(timer, [])
def test_timeit(self):
timer = metrics.Timer("foo")
self.patch_time(12345.0)
with timer.timeit():
self.incr_fake_time(0.1)
self.check_poll_func(timer, 1, lambda x: 0.09 < x < 0.11)
self.check_poll(timer, [])
def test_timeit_start_and_stop(self):
timer = metrics.Timer("foo")
self.patch_time(12345.0)
event_timer = timer.timeit()
event_timer.start()
self.incr_fake_time(0.1)
event_timer.stop()
self.check_poll_func(timer, 1, lambda x: 0.09 < x < 0.11)
self.check_poll(timer, [])
def test_timeit_start_and_start(self):
event_timer = metrics.Timer("foo").timeit()
event_timer.start()
self.assertRaises(metrics.TimerAlreadyStartedError, event_timer.start)
def test_timeit_stop_without_start(self):
event_timer = metrics.Timer("foo").timeit()
self.assertRaises(metrics.TimerNotStartedError, event_timer.stop)
def test_timeit_stop_and_stop(self):
event_timer = metrics.Timer("foo").timeit()
event_timer.start()
event_timer.stop()
self.assertRaises(metrics.TimerAlreadyStoppedError, event_timer.stop)
def test_timeit_autostart(self):
timer = metrics.Timer("foo")
self.patch_time(12345.0)
event_timer = timer.timeit(start=True)
self.incr_fake_time(0.1)
event_timer.stop()
self.check_poll_func(timer, 1, lambda x: 0.09 < x < 0.11)
self.check_poll(timer, [])
class TestMetricsConsumer(VumiTestCase):
def test_consume_message(self):
expected_datapoints = [
("vumi.test.v1", 1234, 1.0),
("vumi.test.v2", 3456, 2.0),
]
datapoints = []
callback = lambda *v: datapoints.append(v)
consumer = metrics.MetricsConsumer(None, callback)
msg = metrics.MetricMessage()
msg.extend(expected_datapoints)
vumi_msg = Message.from_json(msg.to_json())
consumer.consume_message(vumi_msg)
self.assertEqual(datapoints, expected_datapoints)
|
Where does your passion for Customer Experience start?
I started at Autoglass in accounts 14 years ago, having previously worked in sales at Interbrew. I worked with our corporate partners and customers to ensure our invoices were paid on time. I progressed to head the department and became responsible for the Customer Experience we provided at that stage.
I enjoyed the corporate/client interactions, so I moved to become a senior business manager. This gave me great insight into the corporate relationship. I then had my twins so needed to rethink being on the road so much.
So when I came back I became a Credit Manager, which I enjoyed. I could see where things went wrong for our customers and so started to push to improve our processes. At the same time, I noticed there was a Head of Customer Experience advertised internally. For me, that was a perfect opportunity because I wanted to improve the experience for our customers at all stages.
Do you feel having held a few roles in the company has helped you in your current role?
I think it has. I have the advantage of a rounded perspective when I look at Customer Experience – be that for consumers or client partner journeys. I know the whole journey so can draw upon those experiences. I love the fact that I know the journey from the beginning to the end including the client partner side of things as well.
How does your role impact your corporate partners’ Customer’s Experience, for say Direct Line?
I’m always aware of the challenges our partners face with their customers, so I keep close to our partners. But if we are getting it right for our customers, we are getting right for our corporate partners too.
Practitioners in CX usually have a service background, but you have a commercial view on things. Does that change the business perception of Customer Experience?
It helps, but we have such a passionate workforce here at Autoglass, with all of us focussing on doing the right thing for the customer. Putting customers first is what we do.
Of course I can put myself in others’ shoes internally, because I’ve been there, which helps them see what we are trying to achieve. But the passion across Autoglass is one of the things that has kept me here so long. Everyone puts our customers at the forefront of their thinking and tries to solve people’s problems with real care.
You’ve been in the role as Head of Customer Experience for two years. Would you say that Customer Experience has progressed much in that time?
We’ve learned a lot over that time. We’ve taken operational learning forward, so we can get it right for the customer first time, which is what our customers want. We’ve been able to make improvements based on that right first-time philosophy with changes to our working practices.
As a framework, we have a set of five customer personas we have developed, which we use as a focal point for training staff across the business in how to interact with customers in different ways to achieve complete satisfaction. For example, some customers are most interested in being able to easily book an appointment online without having to speak to anyone; others are interested in having their car off the road for as little time as possible; and others are happiest when we assist them with each step of the booking and service process.
The other thing we’ve undertaken is to create a more cross-functional team working dynamic. Within the last couple of years, we now join departments up to work together towards a common purpose for our customers. This gives people consistency, with the same voice of the customer understanding.
We work collaboratively and encourage feedback to be able to make those improvements.
In terms of taking CX improvements forward, how does it work at Autoglass?
An Autoglass technician repairs a chip in a windscreen.
Well I’m the business lead for taking improvements forward across all our journeys. I report to the Customer & Digital Director, but I also attend the supply chain and operational monthly meeting so have a dotted line to the Supply Chain & Operations Director. I participate in all team meetings, which is great.
Although I have my own team, I’m very much a part of others’ teams too. Within my own areas of responsibility are the Head of Service Recovery and the Billings Validations Manager. The reason for these areas is that both those departments highlight where things haven’t gone right for the customers. Service recovery is where the complaints come in, so it keeps me really close to those areas. I can then see the insights and can sit with the team to make improvements.
Just recently I’ve attended the sales and marketing meeting, so we keep it really close in terms of the way we work on customer improvements.
Now you’ve seen this approach work first hand, is it an organisational structure you’d apply again?
Yes, because you get to hear about things first hand. I get to hear what’s not working quite well enough. It also keeps you grounded, because whilst it’s important to look forward, you need to know what’s going on with your customers now as well.
What about the little things, do they get attention in this structure?
Sometimes we will make an improvement which is great for our customers but was difficult for our people to deliver. So, we have a ‘Speak to Sarah framework,which provides employees the opportunity to send any frustrations to a central point if something didn’t go quite right, and provide new recommendations for further improvement. The Service Recovery team manage this feedback.
It complements our more structured Voice of The Customer set up with Effort and NPS measurements tracked. It allows us to capture more. I feel it’s a key point of customer experience to listen to your employees, and this achieves it.
What about client partners, does the experience vary for them?
We listen to the motorists, which creates an extra layer of insight which we can then feedback to our client partners. I attend our client partners sessions. We work together to review the various data insights we all have, to arrive at mutual improvement plans which benefit Autoglass, the motorists,and our client partners.
That’s where we look at insights through the customer personas we’ve created, which makes the insight more accessible. We have a fleet persona too. Our corporate clients love the fact that we are showing an understanding of their customer segments through our personas.
How would you describe the strategic role of Customer Experience at Autoglass?
For me, it’s is our number one priority. I’m constantly pushing others in the business to have the same purpose; to focus on caring about customers. I’m always asking ‘how are we making a difference?’ It’s a core part of everything we do, and I represent that through our cross-functional groups and senior management teams. But it’s more about making sure the customers voice is heard at every stage and everyone considers the customer in their role.
Our Customer Experience strategy is focused on ‘always delivering the easiest and best Customer Experience’. For me it’s about everybody else having that constant focus. It’s most important that everyone else feels the same.
You mentioned customer effort. What KPI’s are on your CX dashboard and how will you measure the success of the journey you are on?
Our NPS is 74, which we are very happy with. Our effort score is 94 percent as well, which is ‘customers are satisfied we’ve resolved their issue for them’. The effort score is applied across all channels.
We are looking to capture specific touchpoint feedback rather than relying on the verbatim. Our technicians are brilliant, so if you remember something it’s likely to be them. We want to start capturing feedback related to other touchpoints as well, so we can get that feedback across the entire journey.
I want to understand in more detail at each stage of the journey what we need to tweak to meet our personas expectation. Needing your windscreen repaired or replaced is usually a stressful situation. So we want people to feel, when they call us or go online, that we are solving their problems with real care. I want to listen in and recognise the personalisation and usefulness of our interactions at these less pronounced touch points.
Keeping momentum on CX is often a challenge. Some are finding the ROI a challenge to prove. You are keeping it alive and thriving, what would your advice be to keep CX a priority focus?
Everyone has a part to play. Continually motivating our customer facing colleagues by recognising when they deliver consistent as well as exceptional service is key. It’s something Autoglass do well – recognising consistent service. We have an internal recognition scheme where individuals can nominate each other forgoing beyond or doing their job well.
That gets shared on our internal bulletin, but where appropriate it gets shared on social media too. This is about making our people feel valued for the contribution they’ve given serving our customers.
We also have the Belron (parent company) Exceptional Customer Service Award across all group companies across all the countries, so there is a bigger recognition to aim for.
Recognising and rewarding your people helps keep Customer Experience on the agenda.
How does technology contribute to your overall customer experience?
Technology is key. We have a comprehensive technology development roadmap in place. We have an R&D function in Belron looking at the tools our technicians use as well. We have just introduced an improvement related to the calibration of advanced driver assistance systems (cameras and sensors for example, emergency braking). So, any repair on a windscreen means those cameras need to be calibrated as well.
But to take your car to one site for the windscreen and another for the cameras is a pain. What we’ve done is to work with our partners, so we can calibrate at the same time we fix the windscreen. I didn’t know I had these cameras, and some of our customers don’t know until the windscreen is damaged either. We are in a great situation where we can identify it at the initial call and can help manage this better for our customers.
We’ve even been looking at how you can use Alexa to book our vehicle repair. Some personas are happy to talk to a chat bot if it gets the answers they need.
We are also beginning to do a 360 video pre-and post-inspection of any work we’ve completed to give customers confidence in the quality of the work we’ve undertaken. This is where technology can really improve the experience the customer is having.
Is there a specific Employee Experience focus at Autoglass or is it part of the culture?
It’s a bit of both to be honest. We want it to be part of our culture, but we do have a deliberate strategy to drive it forward as well. In the same way as I’m Head of Customer Experience, we have a Head of People Engagement too – Rachel Gedge. Her focus in that role is making Autoglass the best place our people have ever worked. She will challenge the leadership to ensure we are working towards that ambition and that our people are focussed on it.
As well as our customers, our people are very important to us. For instance, we are going through a transformation in our Customer Experience Centre and that’s about making it the best place our people have ever worked. At the forefront of our mind we are always asking ‘is it the best place our people have ever worked?’ and ‘will that deliver the easiest and best experiences for our customers?’ We love the culture here and our people and our customers are important to us.
From your own personal experience, which brands have inspired you?
I’m very hard to please as a Head of Customer Experience, But I do have a favourite – Fitbit. I spend all day talking to people so when I go home I want to deal with companies digitally and not have to talk to them. I had a problem with my Fitbit so contacted them on chat. It might even have been a chat bot but I didn’t mind because it felt personal. I had expected it to be a problem, especially because it was a gift and I had no receipt, but they were brilliant. It was so easy; they didn’t question what had gone wrong. They sent me a new one, and told me to dispose of the old one. I expected it to be a hassle because it was a present, but the service was brilliant.
If any readers are looking to embark on a Customer Experience strategy, what advice and wisdom would you impart to increase their chances of success?
Firstly, I would say listen. Listen to your customers and listen to your people. Our people have most of the answers in terms of knowing what needs to be resolved and our customers add the context. Also, make sure your vision is clear and keep it simple. That way you will ensure everyone is aligned.
With Customer Experience it’s a continuum. It’s neverending. So, its important to continue to keep on listening otherwise you will miss out on what now matters.
Scott, thanks very much for agreeing to meet up and share with readers the value and role Customer Experience is playing for the Bank of Cyprus UK. The crossover of commercial and customer in your role must give you a great perspective on creating ‘value’, but first, for those less familiar, perhaps you wouldn’t mind starting by sharing an introduction to the business.
Scott: The bank’s been operating in the UK since 1955. Originally it was a branch of a parent company, but now it’s a regulated company in its own right, authorised by the PRA and regulated by the FCA and PRA.
The business has evolved over time from its origin of being there to service the Cypriot community in the UK. Today we offer a range of deposit products and banking services for both the retail and business markets. We have developed a deep expertise in business lending, such as professional Buy-To-Let and Property Development Financing, and we have recently extended our range of products to include residential mortgages.
We are recognised by property entrepreneurs, business owners and retail customers as a challenging alternative. Our mission is for our customers, communities, and our people to prosper.
We are a business based on the strength of our people. Our people are key, not only because of our community-based heritage, but because they make our service stand out and differentiate the bank. Relationships are integral to what we do.
Do your customers typically have an association with Cyprus? And how, if at all, does that impact their banking Customer Experience expectations?
Scott: A significant proportion of our customers do have some connection to Cyprus, which is unsurprising because of our history – it’s where we’ve come from. We also have a significant number of Cypriot residents who hold an account with us. The link to Cyprus continues to be important to us.
For example, we help meet the needs of Cypriot students coming to study in the UK, by allowing them to set up accounts prior to relocating here, which makes for a smooth arrival in the UK and start to their studies.
However, today we find ourselves extending way beyond this. We have a growing number of customers who have no connection to Cyprus. In addition, we are extending our coverage throughout the UK. We have opened several offices throughout the UK to support our business growth.
In addition to London and Birmingham, we now also operate in Leeds, Manchester, Bristol and Brighton; and these locations were chosen for their proximity to businesses that would benefit from our offering.
We’ve started from a community, but we have grown, due to the quality of our service. Our customers have the same expectations of their bank as anyone else, and our purpose, first and foremost, is to fulfil our customers’ needs and help them realise their ambitions.
Having had the pleasure of meeting your Chief Executive Nick Fahy a couple of years ago, I know he is very committed to adding customer value through Customer Experience. How important would you say it is to have backing from the very top?
Scott: Absolutely critical. No one else in the business will take customer value seriously if it doesn’t come from the CEO and his top team. For example, the first item the senior team discusses at every meeting is the customer. It’s always the first topic on the agenda and permeates through other agenda items such as sales, regulatory changes and competitive/economic landscape.
Having worked at several retail banks, I haven’t seen that anywhere else.
That makes a strong point to the rest of the company. According to research by Prof Dr Phil Klaus, companies can be classed as Preservers, Tranformers, and Vanguards, according to their commitment to and proficiency in Customer Experience. With those at the top end maximising the ROI from CX. Where would you say you are against that scale?
Scott: I’d say our ambition is to be a Vanguard. We are beyond what I’ve seen at other companies. We don’t just focus on fixing what’s broken.
We are more than preservers, I would say we are a Tranformer because it applies to everything we do. Whether developing propositions, improving processes, optimising customer journeys, or other essentials, we are always looking to improve the experience for our customers.
For us CX is not a project, and it can’t be seen as just that. ‘Customer first’ is one of our five cultural values of the business. It impacts everything from our induction process, through to the business strategy, development plans, roles, and responsibilities.
At UK board level, the customer agenda is presented on a quarterly basis. We provide updates on our progress and our KPIs in CX which are our regular NPS and Effort <CES> scores to show how we are progressing. We also have additional metrics, but these two are key board views of Customer Experience performance.
We share what’s been delivered and what’s planned in the in next quarter with the board. They will challenge our plans to make sure we are ambitious enough. However, they will also make sure we operate at the right pace so we don’t over promise. By doing so they are safeguarding the bank’s reputation.
You’ve been in your role for 18 months. In your time, how has Customer Experience moved forward?
Scott: The set-up of our Voice of The Customer programme happened just as I arrived. This programme delivers a benchmark for measuring customer experience.
Using the programme, we’ve been able to build our understanding of what drives key metrics such as NPS and CES. We’ve built other KPIs which, along with these two, are now tracked, reported and action plans are set each month for every function in the business. Our people care about hitting, and exceeding, targets because they know it means customers are happy with us.
There is a great ethos in the company – the customer is always central to what we do. We run customer forums with employees from all business functions. We look at the anecdotal feedback we receive from customers, not just complaints but mentions of inconvenience, as this is an early indication of where we can improve.
As a retail banking company, how are you finding the arrival of new ‘Internet of Things’ technology and the adoption of self-serving customers?
Scott: We will achieve our vision because of our people. It’s about our people, being supported by appropriate technology. If we can use technology to make doing business with us easier for customers we consider it. But we are careful how we develop things. More important is the employee engagement aspect. We need our employees to understand how technology helps our customers to bank with us.
As we extend our reach with our small business proposition, we will be evolving our digital support accordingly. As customers come on board through pure digital channels they will have different expectations of the experience we provide. We must adapt to that as well.
How do you keep the customer-first agenda alive within the organisation? What tools are you using to keep customer insight fresh and optimised for decision making?
Scott: ‘Customer-first’ has been a valuable focus for the last 18 months. It’s embedded from induction, but we also have regular awards for teams, for individual, for doing the right thing, for being your best etc. It’s important to have the customer embedded into celebrating success because it engages and motivates people.
We also run staff forums and surveys to gather suggestions on how we can do more for customers. We have very good VoC feedback rates, but staff hear things that aren’t always captured in VoC feedback, so we operate multiple means of feedback.
Being transparent about our customer experience performance is important, so we communicate directly with our customers on improvements we’ve made as a direct result of their feedback. We’re also developing a new set of ‘customer promises’ which we hope to launch soon, but these must be meaningful to our customers.
In addition to our relationship NPS measurement we’ve chosen to focus on Customer Effort rather than a transactional NPS. We feel it’s more meaningful to get a read on how hard or easy it is to do something with us.
In retail banking it’s often the ‘stress’ and ‘anxiety’ points of pain for customers when your CX must work its hardest. Is that fair or do you have positive moments where the Customer Experience can shine through?
Scott: I’d have to agree. Customers just want their bank to work the way they expect it to. Customer Experience is mainly focussed on reducing anxiety and stress points which can occur in customer journeys. We are building our taxonomy to better understand context and sentiment as well.
Customers want us to take care of issues. They expect us to take action when it goes wrong for them. You need to make contact as easy as possible whilst ensuring good security. You make a real difference when you make the customer experience simple.
Reflecting on the year behind us, are you where you want to be with your CX strategy?
Scott: Yes, we are very pleased. There are many regulatory changes occurring within financial services, particularly with the arrival of PDS2, GDPR, etc. All these initiatives directly impact Customer Experience. It is essential that we keep the Customer Experience in mind as we incorporate and adapt to new regulations. Ultimately, these regulatory changes are opportunities for improving the Customer Experience.
This highlights how far you’ve come with Customer Experience to be considering customers along with tech advancement and compliance adherence. Do you see Employee Experience as an essential part of this as well?
Scott: Yes, Employee Experience goes hand-in-hand with Customer Experience. We are always trying to improve on our Employee Experience and we are looking at how we improve team working across business units and work more closely together.
We are creating an internal service culture – serving one another. Which includes looking to embed awards and remuneration to make CX more than a number. With an effective employee engagement model in place, employees can do even more for our customers.
As you are making progress with Customer Experience, who do you look to for inspiration?
Scott: We obviously keep an eye on our direct competitors, whether they are dealing with business or retail customers or both. We are all striving to differentiate ourselves from the big banks by providing a better Customer Experience.
Beyond this we share great stories throughout the business based on personal experiences about the differences people can make in almost any industry. We care about our people so it’s important to find and share good evidence of CX that reinforce the importance of personal service.
As CX matures at the Bank, what will become the most important focus?
Scott: It is then about keeping it alive. When you’ve made the important changes, it’s a marginal gain business. I liken it to Dave BrailSford’s approach with the Team Sky cycling team – get the fundamentals right and then look for one percent improvements in areas that are being overlooked by others. The cumulative effect of small gains will add up to a remarkable improvement.
If we can ensure we have good levels of interaction, and keep delivering small and meaningful changes to delight our customers, we will be a success. This will also make sure our employees are engaged and enthusiastic. It’s important not to have CX tucked away as another function. It has to be embedded across the business in order to sustain CX and succeed.
Finally, if you had the chance to go back and tell yourself one piece of advice when it comes to driving CX forward, what would that wisdom have been?
Scott: I would say gather even better quality data on customers, much more beyond the transactional level normally associated with banks. For instance, enhancing your data with social media sentiments relating to what matters most in customers’ lives, away from their transactions and interactions with us. All you are trying to really do is understand what your customer expects from you and how you can exceed those expectations.
In the latest in my series on CX leaders, I caught up with Customer Experience Director at intu, Roger Binks to hear how their focus on customer is driving positive change.
With 400 million customer visits and over 35 million unique customer visits a year, intu’s scale and expertise gives the business a unique insight into today’s consumer and what they’re looking for from their shopping experience.
To understand the value of Customer Experience to intu and how they are redefining the shopping experience for consumer’s, we caught up with Roger to get the insight at intu.
CB: Roger, we’ve probably all experienced an intu shopping destination. Whilst I’m aware now of the positive efforts made to enhance the experience, when did this become a priority for the group?
RB: Really this became a priority for the company back in 2011, an exec level team then spent a year working on a strategy that launched in January 2013 alongside the physical rebrand to intu. This strategy brought together 15 separate shopping centres under one, consumer facing brand for the first time and was focused on creating and delivering a uniquely intu customer experience. I then joined the company in February 2013 as intu’s first Customer Experience Director. For both the shopping centre and retail industry at the time, this was a bold step but one that has really paid off in terms of clearly differentiating intu and helping to build loyalty within our customers.
CB: So, as we arrive in 2017, how far up the agenda has customer experience climbed?
RB: Customer experience sits at the heart of what we do at intu, every department thinks about the customer when planning and delivering their plans. We introduced NPS in 2014 and it’s now a KPI that is discussed at operational board meetings as well as customer experience and marketing meetings.
CB: Can you share the Customer strategy underpinning your progress?
RB: It’s quite simple, and to quote our chief executive, our purpose is to put a smile on the face of our customers. We want them to be happier after a visit to our centres than when they walked through the door.
To achieve this we focus on a couple of key areas.
Firstly, there’s insight. We focus on collecting, collating and communicating insight. To do this we use various methodologies from an online customer forum to accompanied shopping trips. The data is then transformed into information by benchmarking, tracking changes and aggregating data from various sources and overlaying external factors and customer behaviours. These rich insights are then shared internally, to ensure that every area of the business has the right insight in order to the changing needs of our customers.
We recognised that a focus on data analytics and research, even of the highest quality is not enough.
To be forward thinking and understand true behavioural changes the insight needs to be made accessible to those who can apply it to business opportunities.
In the customer experience team we then use our insight to create and curate new signature experiences that express our brand proposition of world class service, digital connectivity and events with a difference.
Joy jars, intugrams, the intu app are just some of the ways we provide moments of surprise and delight. We want our centres to provide extraordinary experiences for customers that encourage them to come back more often and stay longer. This in turn helps our retailers flourish.
Finally, and most importantly are our people. We build a culture of success. How we behave is an important part of the intu difference and a big reason behind our success. We encourage and equip all our employees to look at things differently and creatively, to consider carefully and then to act boldly and genuinely.
CB: Can you give me some examples of what you get up to?
RB: You can break this down into a few areas. As I’ve already said, our people are key. We’ve invested heavily in brand engagement and providing world class service training to all our staff, equipping them with the skills they need to deliver exceptional customers service, and empowering them to deliver against our brand promise through their day to day activities.
Then there is the physical environment we provide. Here, we’re creating spaces where people want to be and retailers need to be. So the focus of our short and medium-term development pipeline is on developing leisure opportunities and tasty places to eat. We also look at the smaller touches, such as the toilets, seating and car parking to ensure every touch point is a positive one.
And then there’s intu.co.uk. intu is still the only UK shopping centre landlord to have a multichannel shopping platform that adds even more to the compelling shopping experience we already provide. So from the comfort of your armchair, you can experience the intu brand and all that that means. Already ranked in the top 10 of affiliate websites, intu.co.uk gives our customers the choice of nearly 500 retailers, from John Lewis and Marks & Spencer to Topshop and River Island.
CB: That’s a great structure and must be very empowering for those employed to deliver the intu Customer Experience. From a customer’s perspective is it just the big things like size of centre and retailer choices that matter?
RB: No but it is a big part of the picture. We own some of the largest and most popular shopping centres across the UK. That success comes from our asset teams curating a great retail and leisure mix that attracts people from further and for longer. Rather than just popping to the shops, people come to our centres for a full day out experience combing shopping with dinner and a trip to one of our leisure attractions.
We also know a great customer experience starts with the small things and before you even arrive in a shopping centre. From planning your visit with the help of intu.co.uk, to how easy it is to get around the car park, to how welcoming our staff are. These issues are at top of our list of priorities.
CB: How much does technology contribute to what is principally a bricks and mortar customer experience?
RB: It’s as important as it is in any sector. Our customers are digitally advanced using their devices to shop and communicate in real time in the centres. And with many of our retailers specialising in this area we need to stay ahead too. We know that today’s digital natives want, in fact expect, to be online wherever they are and at intu it’s no different. By investing in a new fibre optic network for every centre we’re able to offer high-quality free Wi-Fi in all our malls.
Our in-house digital innovation team has also developed the new intu app. It helps customers find their way around and gives them access to special offers based on their location in centre. It ensures the customer experience is consistent digitally as it is in person.
CB: I’m a regular visitor to some of your shopping malls, I can recall various special activities catching my children’s attention. How are you using the estate to create memorable experience using 3rd party partners?
RB: Creating and curating events with a difference is an important part of the experience at our centres and through our national brand we’re engaging in new partnerships to bring uniquely creative events that will encourage more customers to visit and stay longer. Our size and scale means we can work with the world’s biggest brands to put on events and experiences that really draw in the crowds, from tie-ups with film companies like Warner Bros. and 20th Century Fox, to promotions with MasterCard, X-box and Coca Cola.
CB: It sounds like there is a philosophy which is underpinned with insight and rigour at intu. What is the perfect customer outcome and how do you achieve it?
CB: That’s been externally insightful to hear more about the science and the art of delivering a memorable and commercially positive experience at intu. So, what can we expect in the future?
RB: At Christmas we launched our consumer facing proposition of “Your kind of shopping”, and a new ad campaign introducing people to our six shopper types and their respective ‘bird’ characters, from the bargain hunting Hawk to the ‘Me me me’ shopper represented by the Flamingo. This approach is helping us to create an emotional connection between our customers and our brand. We’ll continue to develop this proposition and communicate this externally through national and local media.
Insight will continue to be a priority for us, and we’ve got some great initiatives running looking at how we improve and evolve what we do, ensuring that our understanding of the customer continues to grow.
Finally, intu.co.uk remains a focus, as we look at how digital innovation can help the customer experience, both in-centre and online.
CB: It’s been a delight and an education Roger. I wish you all the best for the future and look forward to the latest Intu experiences the next time I head to the shopping centre.
Christopher Brooks is a regular contributor to Customer Experience Magazine, Awards judge and Director of t Customer Experience Consultancy , Lexden..
Most companies now have a Voice of the Customer (VoC) programme in place. Often these are underpinned by technology platforms collecting customer data via feedback forms deployed following customer interactions with the business. These are served on the end of customer service calls, or emails triggered by customer purchase behaviour or links on the end of online chats, to name just a few methods.
It’s become big business for the research-technology companies managing these systems collecting and analysing this data. At Lexden, we have been helping clients navigate their way through scoping, searching, selecting and set-up of new VoC partners. So, we know first-hand how impactful this insight is on the business and how quickly the business becomes dependent on effective VoC.
Get it right and the insight will inform the direction of business travel. But if the quality of insight is impaired the whole Customer Experience (CX) Strategy can be in jeopardy.
We invited some Voice of the Customer practising expertsto tell us what 2017 holds for them.
In summary, our participants highlighted that VoC is going from strength to strength, with growth planned in 2017. However, we heard for it to remain integral to the business, more must be in terms of data collection techniques and analytics capability as well as improved application of VoC insights in the business.
Below is the feedback highlightingareas which will receive more attention and where investment and improvement in VoC is anticipated to be focussed in 2017.
Most respondents stated budget would rise in 2017. Speaking to one Head of Insight they said that previously they’d missed including VoC as a specific line on the budget submissions so had pulled in spare budget from ad hoc research to cover it, whereas now it has its own budget line.
At Lexden, we very much see Customer Experience as a business model. One which if embraced by the board and will be aligned to objectives to drive greater profitability from more content and committed customers. So, whilst it’s still disappointing, it’s not altogether unsurprising that from a broader list of options three of the top five answers for what value VoC provides are principally tactical.
In fact, only 1 in 10 CX programmes achieve their ROI* which is line with these findings where 11% use VoC to results to lead investment decisions. Some commented that VoC is a support indicator in their business, but as leaders become more comfortable with customer experiences importance and impact on customers they expect this to change.
We run ‘Client Only VoC groups’(free to join – new entrants always welcome) where practitioners meet to share customer feedback challenges and solve each other’s problems. So, these results very much reflect the discussions in the groups.
Answers 1-4 can be grouped as demonstrating strategic value of VoC. Which we highlighted was a shortfall on how VoC value is currently assessed. The primary focus will be on connecting VoC feedback to BI to express value in ways in which the business is more familiar such as sales and profit.
We are aware that CFO’s are now challenging Customer Experience investment which is proving to be difficult to specifically account for. But like sponsorship or brand investment, CX must be accounted for. The newer CX measures can identify much better where ROI is achieved against CX investment.
Conventional customer experience performance measures arenot designed to provide such an answer, but are being used in this way. And whilst Net Promoter Score, Customer Satisfaction and Customer Effort have their role in focussing colleagues on customers, we’ve seen numerous client examples showing they have a low correlation to customer share of category accountability.
Along with this, the focus will be on getting the right customer insights to the right stakeholders internally in a format which is more accessible to them, such as business ready solutions. VoC often resides with Customer Insight which in turn is within the Customer Research function. For many it will also be about repositioning the function as a proactive intelligence centre in the business.
Answer 5 – ‘sentiment quality’, is worth dwelling on. The quality of sentiment analysis as well as coding and categorising of verbatim is a hotly debated topic. During some of the pitch meetings we ran with vendors in 2016 it was an area we found some vendors falling short on. Many systems are not able to interpret expressions such as “it’s sic, hah!” as positive or negative leading to mis-categorisation or exclusion of some insights.
There are now data collection tools which dynamically update questionnaires based on the customer’s feedback.But it’s still up there as an industry thorn to be removed.
area where innovation will be driven from. Customers are bombarded with so many requests and it is impacting 1) who bothers to respond and 2) the quality of their response.
From personal experience, I receive between 4-5 feedback surveys from various companies I’ve interacted with on flights, in restaurants, at hotels etc. per trip. In addition,respondents to our survey expect new data sources to come through. With the arrival of data capture such as heart monitoring activity, companies such as Fit Bit, PlayStation and Apple will be able to read exactly how you felt when you fed back. This will provide great context.
Presentation of content features which is understandable when dealing with large amounts of complex data. A decent VoC reporting tool can take weak data and cut through with a less familiar audience. More customer video footage, live customer interactions and ‘issue demos’ are ways we anticipate this will grow.
We are currently trialling a simultaneous customer and employee feedback platform which enables the business to react quicker toa live data feed. It also has a compelling impact on audience confidence and trust in the data when it’s built by them.
So, there we have it. The future for VoC is bright, for 2017 at least. With much opportunity to make the feedback programme a part of the extended customer experience.
Hopefully, this will prove useful insight and the supporting narrative gives you some areas for further consideration.
Source: Invitees to Lexden’s ‘VoC Priorities for 2017’ Survey. Posted on Lexdengroup.WordPress.com and shared with VoC LinkedIn groups in Dec 2016.
In the latest of Lexden’s Customer Experience Leader’s series, CX Consultant Christopher Brooks catches up with British Gas’ Richard Shenton to hear how they are switching on to CX.
Richard is responsible for Customer Experience and Continuous Improvement within part of the most popular of the Centrica brands. With experience in sectors including Home Shopping, Financial Services and Venture philanthropy, I spoke to Richard three years after joining British Gas, to hear how they are working hard to put customers at the top of the agenda today, and what CX advancements we can look forward to in the future.
Christopher: Let’s start at the beginning, how would you define customer experience?
Richard: For me, customer experience is the perception customers have during their interactions with an organisation. As they say, “beauty is in the eye of the beholder”, and that’s what makes customer experience improvement so exciting.
Christopher: What would you say are the key priorities for any company serious about putting cus-tomers first in their organisation?
It’s a cliché, but buy-in from the top is critical, otherwise it will be pushed aside as soon as more commercially focused priorities come along. Leaders also need to be prepared to make some priority calls when it comes to striking a balance between commercial vs. customer focused improvements.
The organisation’s CX strategy needs to align with its commercial strategy, they should even influence each other at times. Far too many organisations have ambitions to ‘grow’ when they don’t have the ‘basic customer experiences’ in place for existing customers. If alignment is achieved, organisations can use the customer experience as a vehicle to grow.
Even then a strategy is useless, unless people within the organisation believe in it and care about it succeeding.
Finally, articulate the commercial benefit of improving the Customer Experience. This not only helps when making some of those though customer vs. commercial decisions, but shows that customer experience can actually be a commercial activity.
Christopher: 90% of CX programmes fail (source: Prof Dr Phil Klaus). Measuring the wrong things is cited as a key driver – what are your thoughts on this?
Richard: I can see that. Organisations tend to measure Net Promoter Score (NPS) without gaining any actionable insight to improve. The other pitfall tends to manifest itself in leadership chasing a number or target for NPS. Often you can’t even find the person who set it or what it means for the organisation if you reach it.
This ‘chasing a number’ can lead to tactical or superficial improvements that are not sustainable and don’t address the root-causes of problems. In my experience, defining the ‘full potential’ and working backwards is a much better approach than chasing a number that’s been plucked out of thin-air.
Christopher: You’ve been driving customer experience and continuous improvement at British Gas. That’s a broad brief. What does it involve?
Richard: Our approach covers four phases; ’Insight-Strategy-Delivery-Sustain’. It starts with the diagnosis phase using internal and external sources of insight. From this we can determine our ‘full potential’. Once we know where the improvement opportunities are and we size the benefit, then we articulate our aspirations and goals through our CX Strategy.
We then deliver those interventions by working with our Customer Journey teams to understand where gaps exist today. Then our Journey Design and Continuous Improvement teams to help us deliver the changes.
Christopher: How do you ensure improvements stick?
Richard: We ensure we get to the root cause. We use team based problem solving activity to achieve this. We recognise customer needs in each Journey through the development of what we call ‘Customer Journey Blueprints’. These help articulate how the customer experience should look, and how we want customers to feel as they interact with us.
Employee engagement is also a huge part of ensuring these changes stick, which is why we focus on ensuring behaviours encourage a customer-centric culture during any change activity.
Christopher: Despite their importance to us, utilities come pretty low on customer’s agenda until something goes wrong. Is it all about firefighting?
If we do our job well, then the customer should rarely need to speak to us. However, like any business we need to recognise when customers want to engage with us, and when they’re happy for things to just work in the background.
Making a regular payment to your energy supplier isn’t anything particular exciting, however, when the time comes to look for a better energy tariff, customers need all of our support, to help them get the best deal that suits their needs.
Christopher: Many organisations have set up group facilities to ensure best practice and consistency across various departments, products or even brands. How difficult is this to achieve and what are you doing at Centrica to make it work?
It’s implicit within our org-design really. We’ve adopt a ‘Hubs and Spokes’ model, where best-practice and improvement approaches are supported by a centre of excellence, with the ownership sitting within the operational areas using common approaches and principles but tailored to the specific environment or culture.
This year we’ve also started using the 5-Day SPRINT approach to designing customer journeys. This means we have our customers helping us design, test and refine ideas before we implement them.
Christopher: CX is evolving fast, what do you think the major trends in your sector will be?
Richard: For the energy sector, the biggest evolution is in Smart Metering. It turns our business model on its head. We’re no longer reliant on customers to provide meter readings. This means that energy suppliers really need to re-evaluate their current customer experiences and back-office processes in place to execute them.
Christopher: There’s some great insights Richard, thank you. I’m sure anyone involved in CX will be inspired by what they’ve read. So my final question is, who inspires you in CX?
It might sound trite to say Apple or Amazon, but take Amazon. They’ve cracked it with “One-Click”, “Subscribe and Save” and “Dash Buttons” – these all make customers lives easier – and ultimately lead to increased loyalty. Another brand that I admire is Ocado.
Christopher: Ocado doesn’t always come up so explain why they make it to the top of your list.
Richard: Well it’s what they do to make your life easier. Ocado drivers take time when they deliver your shopping, nothing is too much trouble so the experience becomes personal. They’ve even gone as far as re-designing their carrier bags – it’s the most over-engineered plastic bag that exists. Handles are bigger than usual, the actual bag is like a Tardis and they’ve colour coded them by type of goods i.e. Fridge, Cupboard etc. This is all done to benefit the customer, however it’s also designed to help storage and collection for the delivery driver en-route.
Ocado have also done a great job in thinking about their customers post introduction of the 5p carrier bag tax. Instead of offering a “bagless” delivery service they’ve opted to refund the cost if you return the bags to the driver at your next delivery.
Christopher: Richard, it’s been great speaking with you. There’s much that many will be able to take away and look to put to good practice straight away. Good luck with your future endeavors.
In a continuation of Lexden’s series of interviews with Customer Experience leaders, MD Christopher Brooks caught up with Kent Reliance’s Head of Customer Strategy & Insight, Stephen Plimmer, to better understand the role of CX at OSB.
Christopher Brooks (CB): OneSavings Bank won several awards this year for its financial services. Does the recognition come as a surprise or is this something you’ve been working towards for some time?
Stephen Plimmer (SP): For us, this was never a completely new way of thinking. The Marketing function always understood how important customer loyalty and experience was. But as a function, knowing that isn’t enough. The whole organisation has to be on-board and understand it and that’s what we’ve been working on over the last few years.
We’re lucky in that we have fantastic customer facing staff, both in branches and over in our call team. How we improved our customer service was a key part of our Customer Experience story and the recognition is all to do with their dedication and enthusiasm. Customer Service is such a key part of the overall customer experience.
Our call team always wanted to deliver exceptional service, to go beyond expectation and to embraceCustomer Experience Management. Listening to customer feedback helped empower them to do so.
CB: Would it be fair to say OneSavings Bank is a relatively new brand for consumers? With a very busy banking services market well established and a host of distinctive new entrants arriving, what is OneSavings Bank bringing that others have failed to do?
SP: OneSavings Bank trades as Kent Reliance, a brand with over 150 years of heritage. When we started out on the Customer Experience programme we needed to know how important that brand name was to people and what it meant. We conducted several focus groups and surveys and aside from spitfires and the White Cliffs of Dover (something people always associate with the country) the recurring themes were around words like traditional, heritage and trusted.
There was a clear affection for the brand and of course at the time, most high street banking brands were considered quite the opposite. We discovered that many customers wanted a brand they felt they could trust, a need for those values. We just needed to make sure we understood and lived up to them. It was from this research we were able to start planning our Customer Experience programme – by setting clear objectives.
CB: Does a digital age increase the challenge for FS brands to deliver a great customer experience, or can it improve things?
SP: It’s a fast evolving sector, mobile technology, greater expectations over speed of transacting; instant gratification and confidence in security are some of our greatest challenges. It is an incredibly competitive market now, with lots of new entrants. It’s about understanding your customer’s requirements and, if you can, staying one step ahead of that. I think that can only improve things but we’re not losing sight of the fact that not all our customers need great customer service delivered only online.
Many expect the same level of service in branch and over the phone. It’s about delivering that consistency of service across all channels.
CB: ICS (Institute Customer Satisfaction) figures show that customer satisfaction has dropped despite more firms investing in it, so do you think this is a reflection of customer expectations increasing, a focus on the wrong things by companies or is there something else to consider here?
SP: I think expectation levels have certainly played a part. I also think that although Customer Service is an incredibly important part of delivering a great customer experience, I think many people still think customer service and customer experience are the same thing.
Customer Experience is in fact the sum of the whole, customer service playing an important part in that, but it is also about brand perception, relationship building, understanding your customers – what they like and dislike. It’s about delivering the brand qualities consistency across all channels and during the entire customer life cycle.
CB: Collecting the awards for CX demonstrates it’s a key priority of your overall proposition, how important overall would you say it is for OSB?
SP: It’s very important. We continuously engage with our customers and measure experience at every touchpoint. For Kent Reliance this has enabled a business transformation rather than a marketing function revolution.
The crucial part was getting all customer facing functions on-board, otherwise you are just producing metrics. Unless all customer facing functions, and ultimately the business strategy units understand what customers were telling us, then key indicators are pointless.
CB: With trust from consumers being typically low in FS, do you think delivering great CX in financial services has its unique challenges other sectors do not face?
SP: There are so many alternatives in the FS sector now that product differentials and relying on customer inertia (as some probably still do) is no longer going to cut it. You need to be easy to deal with and you need to understand just how your customers want to deal with you. Gaining that understanding and then secondly delivering it is key.
CB: Do you think CX is a viable approach to demonstrate and deliver a more trusted brand to consumers?
SP: Trust was one of the key words associated with our brand and one of the traits we are naturally always working hard to retain. Our Customer Experience programme looks at these brand traits and makes sure we keep coming back to them in all we do.
CB: Can you provide an outline on your winning entry and why you think the judges saw merit in your submission beating retail giants RBS and Santander among others?
SP: The entry was around how we had engaged with our customer base, understanding their perceptions of us and what was important to them. This knowledge then prioritised operational change projects and channel development.
When the guest speaker joked at the start of the evening about a poor experience he once had was probably because the hotel group in question had put an accountant in charge of customer experience – the joke wasn’t lost on my colleagues around the table. But actually, my management accounting background has proved incredibly helpful when it comes to Customer Experience programme. From the very start I wanted to track and prove the impact the programme was having. And I think it was that evidence and the clear targets we set ourselves that made the difference.
CB: What would you say has been the key milestones or step changes at OSB in bringing customers more to the forefront of business decision making?
SP: Understanding what our brand meant to customers – existing and potential new ones. From that setting clear objectives to make sure the actual experience was consistent with what our customers wanted.
We gathered an in depth understanding of our customer base, from which we could segment and better understand their needs and how they wanted to transact with us.
We worked with a third party survey provider which allowed us to automate and expand surveying, also providing us with alert functions to be able to gather feedback across all channels and touchpoints – some in real time.
CB: Your CEO, Andy Golding has been associated with some more innovative Customer-led financial services companies in recent times. How important is it to have a CEO who backs the customer too?
SP: Within Andy’s first week here he wanted to sit down with key internal stakeholders and understand what our customers were saying about us; what they liked and disliked and how they rated each channel. Since then, customer feedback has helped prioritise all operational changes – what the operational managers needed to change or improve. He receives detailed customer MI, not just metrics but verbatim – what his customers are actually saying about the business.
We needed the whole business on-board if our customer experience programme was to be a success and having a CEO who feels passionately about delivering great customer service naturally helps convince people.
CB: What would you say is your proudest moment so far at OSB?
SP: There are many projects and initiatives that we’ve been a part of, but I would say the work we did with one of the call teams stands out.
New regulations across the mortgage market led to the call team struggling to answer even the simplest of customer queries; this led to poor CX metric scores and customer frustration. Working with various teams from across the business we were able to provide the call team members with training and simple to follow guides for dealing with customer calls. From call monitoring and understanding the issues customers were facing, we were able to improve the call team’s score dramatically; literally overnight. The call team were able to deliver a far better service which made them more confident which in turn we could see made a very positive impression on our customers.
The team are still improving and learning. It was a fantastic ‘quick win’ which really got them engaged with the customer experience programme.
CB: So the journey has started, what’s next for OSB and what can we expect to see you doing to wow your customers?
SP: More employee engagement, we are redefining our desired employee behaviours and making sure they are aligned to the brand image.
We are also increasing the research programmes, competitor analysis and using NPS from a more strategic perspective.
CB: Who do you admire most in terms of CX – either FS or beyond, and why?
SP: Some of the names here will probably be of no surprise, but in my experience it’s Amazon and John Lewis. Amazon make it easy to transact with and in terms of the whole business brand experience it’s John Lewis. For me, it’s the whole end to end process and in particular post sales. I’m confident that even if there was a problem post sale – it would get resolved. It’s about staff delivering the brand and ease of transaction.
CB: We are talking customer experience; can you give me a personal example of brilliant customer experience from any part of your life, not just financial services, you can recall you really liked and remember?
SP: I always struggle to recall a brilliant experience; like many consumers I can usually recall bad ones very easily. A certain laptop/tablet manufacturer springs to mind.
CB: So getting it right for customers clearly matters to you at OSB. How do you keep track of what matters most to customers? Are these enduring or changing needs?
SP: As we’ve said, this is a fast moving sector with lots of new entrants. For us, it was always more about the verbatim, monitoring shifts in verbal feedback patterns to know first what our customers wanted, liked or disliked and then from acting on that how that changed customer sentiment. Not just a score. A score is just a way of tracking, but it doesn’t tell you why it is what it is and how to change it.
We have also recently launched an online focus group. A panel of customers that we can engage with on specific topics. This allows us to research a new product concept or test new literature to make sure we are getting it right.
We also produce detailed journey maps, into which we put customer sentiment, scores measured at various touch points and data from the complaints team. We then use these maps when looking at key journeys with operational managers so that we can see how we can improve things – see what the pain points are for customers and how we can make these better. Sometimes this is as simple as making a letter clearer but then sometimes the whole process is re-engineered.
CB: Finally, there are many firms just waking up to CX (customer experience). What wisdom would you give anyone starting out on their venture?
SP: Have clear objectives by gaining a deep understanding of current perception of your brand and how this compares to where you want to position it. Let the voice of the customer prioritise change and get buy-in from the highest level.
Also, demonstrate some quick wins, if there is mistrust of CX Strategy then demonstrating how effective it can be helps change perceptions. This doesn’t have to be a profit measure or a traditional CX metric, but more helpful is when you can evidence that you have reduced call wait times or complaints about a specific process – these are real impacts for both customers and staff.
Finally, make sure you take everyone on the journey with you – staff and customers.
Many thanks to Stephen and we wish him and Kent Reliance continued success.
By Christopher Brooks: Continuing my series of interviews with heads of customer experience, I caught up with Toni Adams, Head of Customer Experience at Carpetright.
Based in Purfleet, Essex the retail giant serves almost 600 stores across Europe. From under one roof everyone including marketing, accounts, customer experience, the board, storage, customer service and cutting operates. They’ve come a long way in the last 30 years since the first store was opened in Canning Town, founded by Lord Harris in November 1988.
As I arrived at their offices I was greeted by a delightful receptionist who showed a personalised greeting I’m sure would be envied by any high street retailer. Toni, who joined Carpetright from Nationwide at the start of 2015 took me on a guided tour of the business, where I soon discovered the delightful welcome offered at reception was a repeated trait from everyone I met.
Following a trip to the new concept store close at hand and often visited by head office staff to remind themselves what customers experience, I wanted to find out how much of the recent return to profit had been down to a commitment to customer.
CB: Thank you for an impressive tour. Earlier this year Carpetright posted an equally impressive return to profit across the group. Clearly you’ve invested in customer experience – is there a link between the two?
Absolutely. We have spent a lot of time reviewing our customer’s journey as we needed to better understand what our customers wanted. It has helped us make sure the productswe stock and the services we provide are right for our customers and that they are available when customers want them. We have adopted an end to end journey focus – moving from silo to seamless. We’ve come a long way in a short time,realising the key moments of truth and aligning processes to them. There is always more to do, such as systems work to further enhance CRM.
CB: You have joined Carpetright from Nationwide; a company recognised for their focus on customer service where much has been already complete. So how different were the challenges you faced when you started at Carpetright?
When I came to Carpetright I was looking forward to the new challenge. The culture needed to shift from sales to service. We have put in place a new customer feedback programme called; ‘Do We Measure Up?’ and we use what we hear to put the customer firmly at the centre of the business. One of the first things to change with this programme was to take ownership of customer issues from stores. We have over 142,000 surveys complete and 97% are satisfied or highly satisfied. ‘Do We Measure Up?’ is embedded in the business which allows us to focus on delivering our customer promise of doing the right thing, which relates to our brand values. This has also meant internally thinking about stores differently. We think and treat them as our customers, which in turn means they think more about their customers rather than worrying about level of support from head office.
CB: Is the expression ‘customer is king’ still relevant in retailing today?
Trends from The Institute of Customer Service (ICS) show satisfaction levels have been declining over the last five years. So customer expectations are increasing. Product, price and processes can be copied, but customers won’t forget how the experience made them feel, so it the emotional differentiator. People buy people first. So the challenge is to ensure that the spark and connection that comes from great people understanding customers is a constant rather than sporadic.
CB: From all you’ve said, CX is clearly becoming a priority at Carpetright? What would you say has driven this?
Customer Experience certainly is at the centre of everything here. Wilf (Walsh, CEO) is passionate about making this happen. He personally attended my interview which said to me he was serious about CX. We have a top down leadership approach surrounded by customer-centric people who reinforce our value of exceeding expectations by putting customers first. Our feedback shows this is happening. The content at our internal roadshows is now focused on the customer. In fact, this year’s annual conference is prioritising customers, whereas beforehand that would have been sales.
CB: Are there any specifics about your sector that makes creating a brilliant customer experience more challenging than other sectors?
We are in our customers’ homes. It’s a sensitive purchase so how we handle it is very important. We look to respect our customer’s home as if it was our own.
CB: Has digital changed how you deal with customers?
Customers understandably want to touch and feel carpets and other flooring products. But we are using digital to make other parts of the journey easier, such as researching products. Initiatives such as centralised estimating allow customers to book a slot for a Carpetright estimate based on who is available in the area, rather than the estimator being solely linked to the store they visited.We are particularly pleased with how well the ‘find an estimator’ initiative has worked which has meant customers are visited by an estimator earlier. We’ve been shortlisted for this year’s CX Retail awards, which is great for us.
CB: You’ve been involved in customer experience for much of your career, so what do you find most interesting about this area?
It’s part of who I am. My parents ran a hotel, so I grew up in a service environment. I’ve always been considerate of customers because of it. I love the challenge of making something work better than it did and seeing the results.
CB: So are you pleased with the progress Carpetright is making?
We’ve made good progress and huge changes. It’s a cliché but we are on a journey. We want to ensure our customers have a seamless, hassle-free end to end journey with great service each and every time. This means customer performance targets managed through HR, aligned to our customer promise to continue to drive the right colleague behaviours for our customers. Colleagues who have demonstrated ‘going the extra mile’ for customers’, have been nominated for our Customer Champion Award presented at the annual conference. Wilf rang each nominee up to tell them they had been shortlisted which he said was amongst the best things he’s done since starting. In fact, the winner will become a ‘customer ambassador’ for the year as an example to all others of how committed Carpetright is to putting customers first.
CB: It’s been great hearing about how the customer first philosophy invested in Carpetright is measuring up for customers. Is there any wisdom you have for anyone starting out on their own customer experience venture?
I’d say you need to decide what you want the customer experience to be and then you can build your business decisions with that in mind. Also make sure all areas work together from the start and throughout. It’s a company-wide thing rather than silo driven.
CB: Thank you for your time and candid answers. Best of luck with your future experience endeavours and award entries!
On 25th September I sat down to watch five heavyweights of banking take each other on in a battle of customer experience supremacy. Visa, RBS, NatWest, Virgin Money and Nationwide presented their customer experience initiatives to me and four fellow judges. As MD of Lexden, an independent customer experience agency, I am used to working with senior stakeholders to demonstrate the advantage of improvements. But that didn’t stop me feeling anything less than extremely privileged to chair the group.
As judges we receive the written entries a couple of weeks before to review and grade. Then on the day each company presents their entry. It’s then you get to see the passion for putting the customer first and can share the challenges they’ve had to get to this place. It’s inspiring and makes you realise we work in a brilliant industry when you watch the fabulous entries unpacked and presented for customer and commercial benefit.
Having judged discipline awards such as direct marketing, PR and loyalty you find companies are separated by size as well as sector. With the CX Awards all sized companies can compete alongside each other for categories such as ‘best use of customer insight’ or ‘team of the year’. I was chatting to a team from a smaller finance processing house who had been up against high street brands from other sectors, which made them feel great even though they hadn’t won on this occasion. The reason this can happen is unlike many other disciplines, the customer is the common currency here, which results in such diversity.
On that point of ‘broad cross section’, there is still something that every one of the 2,000 people Awards International attract (and wonderfully look after) at this event has in common; everyone is genuinely passionate about putting the customer first. Some awards collections were delayed as the recipients were in floods of tears such their commitment to that cause. It was emotional to see how much companies want to do the right thing.
Of course it’s more than just interesting for us at Lexden as an Independent Customer Strategy Consultancy helping clients to achieve just that.
I saw many clients, former colleagues and friends at the event. But two stood out for me because they have offers which are not core to customer experience, but highlight how this area really is growing into a leading industry in business and marketing.
I met up with judges, Jo and Kate. They run a growing recruitment company called CX Talent DEDICATED to customer experience. The importance of the DEDICATED bit cannot be stressed enough. Having helped a client recruit a customer insight and customer experience team from a generalist recruiter before it is a nightmare. With two minutes I could tell they knew their onions and their text analytics and their NPS from their EXQ. No recruiter should be without them!
I also met Millie and her colleagues from Boost Marketing. A company dedicated to helping people win awards – what a job! They are fiercely secretive about who they work with but they seemed to be smiling an awful lot when winners were announced. My conclusion being they are very successful at what they do! The conversations I had at our table with Millie suggested a real depth of understanding and interest in CX, which is reassuring if you appoint them to help shape your award entry submissions or help with your award entry presentation.
So there we are, why I enjoy these awards so much. Fortunately the industry is growing and with the quality of submissions each year improving I conclude we are in good shape too.
I am looking forward to helping clients win more awards and judging even better entries as the event which celebrates the very best the CX industry has to offer.
The utility sector has been through some tough times, with press scrutiny, regulatory pressure and customers who are starting to vote with their feet, but despite this backdrop npower, spearheaded by Kelly Iles (Head of Customer Experience), is determined to embed a customer first strategy in order to gain back the trust of customers in the energy sector.
Christopher Brooks, Managing Director from Lexden Customer Strategy Consultants caught up with Kelly to find out just what npower has in store for its customers.
Christopher: You’ve been with npower since 2012. It’s a sector which is striving to provide better customer experience and has a way to go in this space. As head of customer experience at npower, what are your key responsibilities in driving npower’s customer agenda forward?
We have come a long way, npower has put a lot of focus and effort into making it better for our customers but it’s fair to say there’s still much more work to do. Our mission to achieve this should never stop. My team’s remit is to be the voice of the customer, championing what they want, need and deserve. I have the accountability and authority to set our customer experience improvement agenda which for us right now is getting back to the basics and delivering the energy experience that customers expect. This means addressing core processes, people capability, systems, communications as well as changing the culture of the organisation. It’s a pretty full on role!
Christopher: Already I can tell you are clearly passionate about customer experience, what do you find most interesting about CX?
I love the fact that CX touches every facet of the business. There are no hiding places; all areas of the business are involved in the delivery of a seamless experience and to make it work, activities need to bring business and functional silo’s together, which has always been a management challenge.
Christopher: So what is your ‘customer first’ ambition for the company?
We’ve only been serious about building CX capability in the last 3 years. It requires a wholesale business transformation and we continue to move through the different stages of maturity. Whilst this started as a programme ultimately building an enduring capability and culture as well as a well-recognised discipline is our aim.
The core stages are; 1) building a customer insight capability – to understand the issues are customers experience and to measure our progress and performance, 2) map the customer journey to understand when, how and why these issues occur, 3) build and execute an effective improvement plan and finally, 4) embed the methodology, approach, ethos, and culture so that it becomes everyone’s responsibility. Christopher: What’s driving CX up the utility sector’s agenda?
CX has become a core priority across the entire sector. As choice widens, customers become less inert this results in, energy providers have margins being squeezed and commoditisation increased, however for sustainable growth pricing can’t be the only answer. As a result providers are recognising that offering a good service may mean customers are less likely to shop around and might move away from choosing their provider based solely upon price.
Ultimately retention of customers becomes key and delivering a great experience will help to build long standing sustainable customer relationships. Christopher: In the insurance sector the metric is ‘effort’. In a sector such as utility which is very much an essential service, what are the priority areas of CX improvement to impress customers?
Opportunities to delight and impress customers are far less than in other sectors such as retailers. Like insurance firms, the key is to make it as easy as possible for customers to do business with us, effortless in fact. Developments such as SMART and the introduction of digital technology for example our new energy app allows our customers to track usage, manage their energy usage and ultimately keep costs as low as possible. For us it gives an opportunity to build engagement with our customers as well as giving us data that can be used to build a better picture of our customers upon which to offer more targeted products and services.
Christopher: It sounds as if there is much going on, can you give me an example of a one of those improvements made for customers?
Listening to our customers we understood the anxiety that a house move creates. During any home move, Customers have lots of other things to sort. Managing their change of energy to their new property is the least of their priorities. as well as their energy. Our processes made customers contact us at a time suitable to us and we were only prepared to process Home moves by our telephone channel. Ultimately we quickly realised we could do a lot to make this process much less effort and one less thing to worry about at the time of the move. As a result we’ve digitised the whole journey and removed the restrictive contact window so that customers can inform us of their home move when it suits them. At the same time we built key checkpoints so we are able to reassure the customer that everything is going through as planned.
Christopher: You’ve mentioned a number of customer improvements being made. Where do the drivers for improvement come from?
Our Voice of the Customer programme is complimented by our Voice of the People and Voice of the Process programmes. This gives us a complete view on what’s happening to both our customers, our people and why. By bringing together multiple data sources and developing insight, we are able to clearly see the priority customer issues that need to be addressed. Our focus is on what matters to the customer.
Christopher: What are the contributors to your CX programme you value the most?
There are many areas, but three I’ll highlight. Firstly, it needs the support and buyin from the snr leadership team which will ensure that CX remains on the agenda. The leadership team need to take ownership, set the agenda and ensure followership. Secondly, our people on the ground. These are the team that deliver the experience to the customer day in, day out. They also know what the issues are and often how to fix them. Listening, empowering and giving them the accountability to make a difference for customers is vital.
Finally, the ability to upskill and embed CX capability into the DNA of the organisation. For this I look to my team who have the right skills and expertise to work across the business and define what good looks like. This could be practically how you delivery change in a customer centric way right through to building the right operational lead metrics to monitor and evaluate CX change.
Christopher: Are you pleased with the progress you are making?
We’ve come a long way but CX isn’t a project, it doesn’t stop. There is always a better way to serve the customer. Real-time feedback as a measure shows the power of ‘in the moment’ feedback. It gives you the opportunity to address a poor experience and to build advocacy through heroic recovery activity. To take a customer whose expectations haven’t been met and then exceed offersa powerful opportunity to build loyalty.
Christopher: Who do you look to for customer first thinking inspiration?
For me, I think those companies who just make the whole interaction effortless impress me the most. The AA breakdown service – I was on my own when I found myself stuck on a side of a road, they asked me specifically whether I was accompanied and then applied a very targeted to experience based upon my situation; text updates to manage what’s going on and even a message to help me recognise the recovery vehicle (driver flicking his lights)importantly it was executed perfectly and against the expectations met.
The winners in the industry will be those who get the basics right, make interacting with the company seamless and then ongoing, build a proactive relationship with the customer that he or she values. Using data and insight will be key so we can put customers back in control. Ultimately, helping them to manage their energy more effectively.
Christopher: it’s been so insightful, your passion is infectious and your expertise evident. So how could you help an organisation just waking up to the potential of customer experience?
Okay, so I’d have to say strong leadership is key. It can get ugly and you need to be prepared to go through the journey. Leaders need to believe and recognise the phases you will go through. They will also help ensure you get your message out there in the organisation. Also it takes time – there are no short cuts. Many organisations transformation programmes can take up to 10 years. Perhaps most important of all, be relentless in your quest. Never give up. It’shard work but the rewards are great.
Christopher: Kelly that’s great. I’ve seen you at the CX Awards, so you are obviously doing the right things. It’s been a pleasure hearing more about where you’ve come from and where you are going. All the best with your mission. Thank you.
Utilities: Is Tesla Friend or Foe?
|
# Copyright (c) 2008-2021 the MRtrix3 contributors.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Covered Software is provided under this License on an "as is"
# basis, without warranty of any kind, either expressed, implied, or
# statutory, including, without limitation, warranties that the
# Covered Software is free of defects, merchantable, fit for a
# particular purpose or non-infringing.
# See the Mozilla Public License v. 2.0 for more details.
#
# For more details, see http://www.mrtrix.org/.
import math, os, shutil
from mrtrix3 import MRtrixError
from mrtrix3 import app, image, matrix, path, run
def usage(base_parser, subparsers): #pylint: disable=unused-variable
parser = subparsers.add_parser('tax', parents=[base_parser])
parser.set_author('Robert E. Smith (robert.smith@florey.edu.au)')
parser.set_synopsis('Use the Tax et al. (2014) recursive calibration algorithm for single-fibre voxel selection and response function estimation')
parser.add_citation('Tax, C. M.; Jeurissen, B.; Vos, S. B.; Viergever, M. A. & Leemans, A. Recursive calibration of the fiber response function for spherical deconvolution of diffusion MRI data. NeuroImage, 2014, 86, 67-80')
parser.add_argument('input', help='The input DWI')
parser.add_argument('output', help='The output response function text file')
options = parser.add_argument_group('Options specific to the \'tax\' algorithm')
options.add_argument('-peak_ratio', type=float, default=0.1, help='Second-to-first-peak amplitude ratio threshold')
options.add_argument('-max_iters', type=int, default=20, help='Maximum number of iterations')
options.add_argument('-convergence', type=float, default=0.5, help='Percentile change in any RF coefficient required to continue iterating')
def check_output_paths(): #pylint: disable=unused-variable
app.check_output_path(app.ARGS.output)
def get_inputs(): #pylint: disable=unused-variable
pass
def needs_single_shell(): #pylint: disable=unused-variable
return True
def execute(): #pylint: disable=unused-variable
lmax_option = ''
if app.ARGS.lmax:
lmax_option = ' -lmax ' + app.ARGS.lmax
convergence_change = 0.01 * app.ARGS.convergence
progress = app.ProgressBar('Optimising')
iteration = 0
while iteration < app.ARGS.max_iters:
prefix = 'iter' + str(iteration) + '_'
# How to initialise response function?
# old dwi2response command used mean & standard deviation of DWI data; however
# this may force the output FODs to lmax=2 at the first iteration
# Chantal used a tensor with low FA, but it'd be preferable to get the scaling right
# Other option is to do as before, but get the ratio between l=0 and l=2, and
# generate l=4,6,... using that amplitude ratio
if iteration == 0:
rf_in_path = 'init_RF.txt'
mask_in_path = 'mask.mif'
# Grab the mean and standard deviation across all volumes in a single mrstats call
# Also scale them to reflect the fact that we're moving to the SH basis
image_stats = image.statistics('dwi.mif', mask='mask.mif', allvolumes=True)
mean = image_stats.mean * math.sqrt(4.0 * math.pi)
std = image_stats.std * math.sqrt(4.0 * math.pi)
# Now produce the initial response function
# Let's only do it to lmax 4
init_rf = [ str(mean), str(-0.5*std), str(0.25*std*std/mean) ]
with open('init_RF.txt', 'w') as init_rf_file:
init_rf_file.write(' '.join(init_rf))
else:
rf_in_path = 'iter' + str(iteration-1) + '_RF.txt'
mask_in_path = 'iter' + str(iteration-1) + '_SF.mif'
# Run CSD
run.command('dwi2fod csd dwi.mif ' + rf_in_path + ' ' + prefix + 'FOD.mif -mask ' + mask_in_path)
# Get amplitudes of two largest peaks, and directions of largest
run.command('fod2fixel ' + prefix + 'FOD.mif ' + prefix + 'fixel -peak peaks.mif -mask ' + mask_in_path + ' -fmls_no_thresholds')
app.cleanup(prefix + 'FOD.mif')
run.command('fixel2voxel ' + prefix + 'fixel/peaks.mif none ' + prefix + 'amps.mif')
run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'first_peaks.mif -coord 3 0 -axes 0,1,2')
run.command('mrconvert ' + prefix + 'amps.mif ' + prefix + 'second_peaks.mif -coord 3 1 -axes 0,1,2')
app.cleanup(prefix + 'amps.mif')
run.command('fixel2peaks ' + prefix + 'fixel/directions.mif ' + prefix + 'first_dir.mif -number 1')
app.cleanup(prefix + 'fixel')
# Revise single-fibre voxel selection based on ratio of tallest to second-tallest peak
run.command('mrcalc ' + prefix + 'second_peaks.mif ' + prefix + 'first_peaks.mif -div ' + prefix + 'peak_ratio.mif')
app.cleanup(prefix + 'first_peaks.mif')
app.cleanup(prefix + 'second_peaks.mif')
run.command('mrcalc ' + prefix + 'peak_ratio.mif ' + str(app.ARGS.peak_ratio) + ' -lt ' + mask_in_path + ' -mult ' + prefix + 'SF.mif -datatype bit')
app.cleanup(prefix + 'peak_ratio.mif')
# Make sure image isn't empty
sf_voxel_count = image.statistics(prefix + 'SF.mif', mask=prefix+'SF.mif').count
if not sf_voxel_count:
raise MRtrixError('Aborting: All voxels have been excluded from single-fibre selection')
# Generate a new response function
run.command('amp2response dwi.mif ' + prefix + 'SF.mif ' + prefix + 'first_dir.mif ' + prefix + 'RF.txt' + lmax_option)
app.cleanup(prefix + 'first_dir.mif')
new_rf = matrix.load_vector(prefix + 'RF.txt')
progress.increment('Optimising (' + str(iteration+1) + ' iterations, ' + str(sf_voxel_count) + ' voxels, RF: [ ' + ', '.join('{:.3f}'.format(n) for n in new_rf) + '] )')
# Detect convergence
# Look for a change > some percentage - don't bother looking at the masks
if iteration > 0:
old_rf = matrix.load_vector(rf_in_path)
reiterate = False
for old_value, new_value in zip(old_rf, new_rf):
mean = 0.5 * (old_value + new_value)
diff = math.fabs(0.5 * (old_value - new_value))
ratio = diff / mean
if ratio > convergence_change:
reiterate = True
if not reiterate:
run.function(shutil.copyfile, prefix + 'RF.txt', 'response.txt')
run.function(shutil.copyfile, prefix + 'SF.mif', 'voxels.mif')
break
app.cleanup(rf_in_path)
app.cleanup(mask_in_path)
iteration += 1
progress.done()
# If we've terminated due to hitting the iteration limiter, we still need to copy the output file(s) to the correct location
if os.path.exists('response.txt'):
app.console('Exited at iteration ' + str(iteration+1) + ' with ' + str(sf_voxel_count) + ' SF voxels due to unchanged RF coefficients')
else:
app.console('Exited after maximum ' + str(app.ARGS.max_iters) + ' iterations with ' + str(sf_voxel_count) + ' SF voxels')
run.function(shutil.copyfile, 'iter' + str(app.ARGS.max_iters-1) + '_RF.txt', 'response.txt')
run.function(shutil.copyfile, 'iter' + str(app.ARGS.max_iters-1) + '_SF.mif', 'voxels.mif')
run.function(shutil.copyfile, 'response.txt', path.from_user(app.ARGS.output, False))
if app.ARGS.voxels:
run.command('mrconvert voxels.mif ' + path.from_user(app.ARGS.voxels), mrconvert_keyval=path.from_user(app.ARGS.input, False), force=app.FORCE_OVERWRITE)
|
--Countries that have nuclear weapons will not help other countries obtain or develop them. Non-weapon states agree not to try to get nuclear arms.
--Countries with nuclear weapons will negotiate for nuclear disarmament.
--Countries without nuclear weapons will allow the International Atomic Energy Agency to oversee their nuclear facilities.
--Countries will exchange peaceful nuclear technology.
--Entered into force in 1970; signed by 187 countries. Extended indefinitely in 1995.
--India, Pakistan, Israel and Cuba are the only countries that haven't signed on. Cuba is a member of a treaty establishing a nuclear-free zone in Latin America.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def get_next(n, i, damen_pos):
for i in range(n):
candidates = set(list(range(n)))
candidates -= set(damen_pos)
candidates -= set(list(range(damen_pos[i]+1)))
candidates = list(candidates)
if len(candidates) > 0:
damen_pos[i] = candidates[0]
return i, damen_pos
else:
damen_pos = damen_pos[0:i] + [0]*(n-i)
i -= 1
def is_attacked(damen, x, y):
""" Wird das Feld (x,y) von einer der Damen angegriffen? """
for dy, dx in enumerate(damen[:y]):
if dx == x or dy == y or abs(x-dx) == abs(y-dy):
return True
return False
def finde_loesung(n):
""" Platziere n Damen so auf einem n×n Feld,
sodass sich keine Damen schlagen.
"""
# damen[i] ist die x-position von Dame i in Zeile i
damen = [0]*n
i = 1
solutions = []
while 0 <= i < n:
while not is_attacked(damen, damen[i], i):
if i == n-1:
yield damen
break
i += 1
i, damen = get_next(n, i, damen)
def alle_loesungen(n):
generator = finde_loesung(n)
return list(generator)
print(len(alle_loesungen(11)))
|
January 20, 2019 in Features, Sunday Shorts.
January 14, 2019 in Features.
January 6, 2019 in Features.
January 2, 2019 in Features.
January 1, 2019 in Features, News.
December 31, 2018 in Features.
|
# encoding: utf-8
from __future__ import unicode_literals, absolute_import
from .slug import slugify as s
from .slug import slugify_attr as s_attr
import enum
class PyConstString(str):
def __new__(cls, name=None, value=None):
if not value:
value = name
else:
value = s(value)
obj = str.__new__(cls, value)
obj.name = name
obj.label = name
obj.value = value
return obj
class Const(object):
def __init__(self, *args, **kwargs):
self.__data = ()
for value in args:
self.add(value)
for name, attr in kwargs.items():
self.add(name, attr)
def __set_iter_value(self, iter_value):
attr, value, name = (None,) * 3
if len(iter_value) == 1:
attr = iter_value[0]
elif len(iter_value) == 2:
attr, value = iter_value
elif len(iter_value) == 3:
attr, value, name = iter_value
elif len(iter_value) > 3:
name = iter_value[2]
value = iter_value[1]
attr = iter_value[0]
return attr, value, name
def to_enum(self):
return enum.Enum('DynamicEnum', {i[0]:i[0] for i in self})
def add(self, attr, value=None, name=None):
"Set values in constant"
if isinstance(attr, tuple) or isinstance(attr, list):
attr, value, name = self.__set_iter_value(attr)
if attr is None:
attr = name
if value is None:
value = attr
if name is None:
name = attr
self.__data += (PyConstString(name=name, value=value),)
# set attribute as slugfiy
self.__dict__[s_attr(attr)] = self.__data[-1]
def __getitem__(self, index):
"Get index item"
return (self.__data[index], self.__data[index].name)
def __iter__(self):
"Lazy return"
return ((i, i.name) for i in self.__data)
def __len__(self):
return len(self.__data)
|
P Type Power Transformer / Top-quality products at competitive prices are our commitment to our customers.
Litone can now provide from P-09 to P-40 to meet different customer and industry requirements.
P Type transformer has good magnetic to be used in switching supplies, computers, ballasts and many electrical appliance.
Size of P Core can choose.
Based in Taiwan, Litone Electronics Co., Ltd is one of the leading P Type Power Transformer | power supply manufacturers since 1987.
|
# -*- encoding: utf-8 -*-
# PKGBUILDer v4.3.0
# An AUR helper (and library) in Python 3.
# Copyright © 2011-2021, Chris Warrick.
# See /LICENSE for licensing information.
"""
The Package class, the most important class in PKGBUILDer.
:Copyright: © 2011-2021, Chris Warrick.
:License: BSD (see /LICENSE).
"""
from . import UTC, DS
from .exceptions import SanityError
import datetime
__all__ = ('Package', 'AURPackage', 'ABSPackage')
def mktime(ts):
return datetime.datetime.utcfromtimestamp(ts).replace(tzinfo=UTC)
class Package(object):
"""The base class for packages."""
is_abs = None
name = ''
version = ''
description = ''
repo = ''
url = ''
licenses = []
human = ''
depends = []
optdepends = []
conflicts = []
provides = []
replaces = []
groups = []
def __init__(self, **kwargs):
"""Initialize the class."""
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
"""Return something nice for people wanting a string."""
return '-'.join((self.name, self.version))
def __repr__(self):
"""Return something nice for people wanting a repr."""
if self.is_abs:
return '<Repository Package {0}-{1}>'.format(self.name, self.version)
elif not self.is_abs:
return '<AUR Package {0}-{1}>'.format(self.name, self.version)
elif self.is_abs is None:
return '<??? Package {0}-{1}>'.format(self.name, self.version)
else:
return SanityError('is_abs is invalid ({0})'.format(self.is_abs),
'Package.__repr__()', is_abs=self.is_abs)
class AURPackage(Package):
"""An AUR package."""
repo = 'aur'
id = None
packagebase = ''
packagebaseid = None
makedepends = []
checkdepends = []
is_abs = False
is_outdated = None
outdated_since = None
added = None
modified = None
votes = None
urlpath = ''
popularity = None
keywords = []
@classmethod
def from_aurdict(cls, aurdict):
"""Create an instance of AURPackage from AUR RPC data."""
bindings = {'Description': 'description',
'ID': 'id',
'Maintainer': 'human',
'Name': 'name',
'NumVotes': 'votes',
'URL': 'url',
'Version': 'version',
'PackageBase': 'packagebase',
'PackageBaseID': 'packagebaseid',
'Depends': 'depends',
'MakeDepends': 'makedepends',
'CheckDepends': 'checkdepends',
'OptDepends': 'optdepends',
'Conflicts': 'conflicts',
'Provides': 'provides',
'Replaces': 'replaces',
'Groups': 'groups',
'License': 'licenses',
'URLPath': 'urlpath',
'Popularity': 'popularity',
'Keywords': 'keywords',
}
ignore = ['OutOfDate', 'FirstSubmitted', 'LastModified']
p = cls()
for k, v in aurdict.items():
try:
if v is not None:
setattr(p, bindings[k], v)
except KeyError:
if k not in ignore:
DS.log.warning('AURDict has an unknown %s key: %s',
k, aurdict)
# Manual overrides.
p.is_outdated = aurdict['OutOfDate'] is not None
if p.is_outdated:
p.outdated_since = mktime(aurdict['OutOfDate'])
else:
p.outdated_since = None
p.added = mktime(aurdict['FirstSubmitted'])
p.modified = mktime(aurdict['LastModified'])
return p
class ABSPackage(Package):
"""A repository package (formerly ABS)."""
is_abs = True
# Most of those aren’t necessary, but I am copying them over because I can.
arch = ''
backup = []
base64_sig = None
builddate = None
deltas = []
download_size = None
filename = ''
files = []
has_scriptlet = None
installdate = None
isize = None
md5sum = ''
reason = []
sha256sum = ''
size = None
@classmethod
def from_pyalpm(cls, alpmpkg):
"""Transform a pyalpm.Package into a pkgbuilder.package.ABSPackage."""
copy = ['arch', 'backup', 'base64_sig', 'conflicts', 'deltas',
'depends', 'download_size', 'filename', 'files', 'groups',
'has_scriptlet', 'isize', 'licenses', 'md5sum', 'name',
'optdepends', 'provides', 'reason', 'replaces', 'sha256sum',
'size', 'url', 'version']
p = cls()
for i in copy:
setattr(p, i, getattr(alpmpkg, i))
p.repo = alpmpkg.db.name
p.description = alpmpkg.desc
p.human = alpmpkg.packager
p.builddate = mktime(alpmpkg.builddate)
p.installdate = mktime(alpmpkg.installdate)
return p
|
2. CEASGA published in spanish, english and portuguese.
3. Authors must send the original manuscript and the COVER LETTER to the publisher.
The works are accepted continuously and must be sent to info[@]ceasga.es.
- MANUSCRIPT FORMAT: the manuscripts writing style will be Arial, size 11, spacing 1,5, full justification. The format should be in DOC or DOCX.
- STRUCTURE. The first page should contain the title of the article, authors name and institutional affiliations. In the second page should include an abstract (150 words approximately ) and key words. The title, abstract and key words should appear at least in English and Spanish. In the next pages the manuscripts should include the title of the article and text (top 40 pages). The sections can be de next order, Introduction, Materials and Methods, Results and Discussion/Conclusions.
- LANGUAGE. The manuscripts can be written in Spanish, English and/or Portuguese.
- TABLES AND GRAPHS. Diagrams, charts and tables will have a brief title and explanation and have to be send in a separate document but indicating its position in the text.
- ABBREVIATION. Define abbreviations and acronyms at first mention in the main text and thereafter use only the abbreviation/acronym.
|
import numpy
import theano
from numpy.testing import assert_allclose
from theano import tensor
from blocks.bricks import Identity
from blocks.bricks.recurrent import SimpleRecurrent
from blocks.bricks.attention import SequenceContentAttention
from blocks.initialization import IsotropicGaussian, Constant
from blocks_extras.bricks.attention2 import AttentionRecurrent
from blocks.graph import ComputationGraph
from blocks.select import Selector
def test_attention_recurrent():
rng = numpy.random.RandomState(1234)
dim = 5
batch_size = 4
input_length = 20
attended_dim = 10
attended_length = 15
wrapped = SimpleRecurrent(dim, Identity())
attention = SequenceContentAttention(
state_names=wrapped.apply.states,
attended_dim=attended_dim, match_dim=attended_dim)
recurrent = AttentionRecurrent(wrapped, attention, seed=1234)
recurrent.weights_init = IsotropicGaussian(0.5)
recurrent.biases_init = Constant(0)
recurrent.initialize()
attended = tensor.tensor3("attended")
attended_mask = tensor.matrix("attended_mask")
inputs = tensor.tensor3("inputs")
inputs_mask = tensor.matrix("inputs_mask")
outputs = recurrent.apply(
inputs=inputs, mask=inputs_mask,
attended=attended, attended_mask=attended_mask)
states, glimpses, weights = outputs
assert states.ndim == 3
assert glimpses.ndim == 3
assert weights.ndim == 3
# For values.
def rand(size):
return rng.uniform(size=size).astype(theano.config.floatX)
# For masks.
def generate_mask(length, batch_size):
mask = numpy.ones((length, batch_size), dtype=theano.config.floatX)
# To make it look like read data
for i in range(batch_size):
mask[1 + rng.randint(0, length - 1):, i] = 0.0
return mask
input_vals = rand((input_length, batch_size, dim))
input_mask_vals = generate_mask(input_length, batch_size)
attended_vals = rand((attended_length, batch_size, attended_dim))
attended_mask_vals = generate_mask(attended_length, batch_size)
func = theano.function([inputs, inputs_mask, attended, attended_mask],
[states, glimpses, weights])
states_vals, glimpses_vals, weight_vals = func(
input_vals, input_mask_vals,
attended_vals, attended_mask_vals)
assert states_vals.shape == (input_length, batch_size, dim)
assert glimpses_vals.shape == (input_length, batch_size, attended_dim)
assert (len(ComputationGraph(outputs).shared_variables) ==
len(Selector(recurrent).get_parameters()))
# Manual reimplementation
inputs2d = tensor.matrix()
states2d = tensor.matrix()
mask1d = tensor.vector()
weighted_averages = tensor.matrix()
distribute_func = theano.function(
[inputs2d, weighted_averages],
recurrent.distribute.apply(
inputs=inputs2d,
weighted_averages=weighted_averages))
wrapped_apply_func = theano.function(
[states2d, inputs2d, mask1d], wrapped.apply(
states=states2d, inputs=inputs2d, mask=mask1d, iterate=False))
attention_func = theano.function(
[states2d, attended, attended_mask],
attention.take_glimpses(
attended=attended, attended_mask=attended_mask,
states=states2d))
states_man = wrapped.initial_states(batch_size).eval()
glimpses_man = numpy.zeros((batch_size, attended_dim),
dtype=theano.config.floatX)
for i in range(input_length):
inputs_man = distribute_func(input_vals[i], glimpses_man)
states_man = wrapped_apply_func(states_man, inputs_man,
input_mask_vals[i])
glimpses_man, weights_man = attention_func(
states_man, attended_vals, attended_mask_vals)
assert_allclose(states_man, states_vals[i], rtol=1e-5)
assert_allclose(glimpses_man, glimpses_vals[i], rtol=1e-5)
assert_allclose(weights_man, weight_vals[i], rtol=1e-5)
# weights for not masked position must be zero
assert numpy.all(weight_vals * (1 - attended_mask_vals.T) == 0)
# weights for masked positions must be non-zero
assert numpy.all(abs(weight_vals + (1 - attended_mask_vals.T)) > 1e-5)
# weights from different steps should be noticeably different
assert (abs(weight_vals[0] - weight_vals[1])).sum() > 1e-2
# weights for all state after the last masked position should be same
for i in range(batch_size):
last = int(input_mask_vals[:, i].sum())
for j in range(last, input_length):
assert_allclose(weight_vals[last, i], weight_vals[j, i], 1e-5)
|
Frey, R. G. & Paton, William (1989) “Vivisection, morals and medicine: An exchange”, en Regan, T. & Singer, P. (eds.) Multi Marrón Asas Bolso Conjunto Adivinar De Vikky ARxwvZqAnimal rights and human obligations, Englewood Cliffs: Prentice Hall, pp. 223-226.
McMahan, J. (2002) The ethics of killing: Problems at the margins of lifeLa Instigador Verde Norte Cara Mochila rHwYpfrq, Oxford: Oxford University Press.
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for preparing data to be compatible with object detection pipeline.
Functions to prepare Waymo, scannet and kitti datasets.
"""
import enum
import gin
import gin.tf
import tensorflow as tf
import tensorflow_datasets as tfds
from tf3d import standard_fields
# TODO(alirezafathi): Remove internal mark when dataset files are moved to tf3d.
from tf3d.datasets.specs import waymo_frames
from tf3d.utils import projections
class ObjectDifficulty(enum.IntEnum):
SUPER_HARD = 0
HARD = 1
MODERATE = 2
EASY = 3
def _random_string_generator(num_numbers=5, max_number_value=100000):
string_tensors = []
for _ in range(num_numbers):
random_number = tf.random.uniform([],
minval=0,
maxval=max_number_value,
dtype=tf.int32)
string_tensors.append(tf.strings.as_string(random_number))
return tf.strings.join(string_tensors)
@gin.configurable
def prepare_scannet_scene_dataset(inputs, valid_object_classes=None):
"""Maps the fields from loaded input to standard fields.
Args:
inputs: A dictionary of input tensors.
valid_object_classes: List of valid object classes. if None, it is ignored.
Returns:
A dictionary of input tensors with standard field names.
"""
prepared_inputs = {}
if 'mesh/vertices/positions' in inputs:
prepared_inputs[standard_fields.InputDataFields
.point_positions] = inputs['mesh/vertices/positions']
if 'mesh/vertices/normals' in inputs:
prepared_inputs[standard_fields.InputDataFields
.point_normals] = inputs['mesh/vertices/normals']
prepared_inputs[standard_fields.InputDataFields.point_normals] = tf.where(
tf.math.is_nan(
prepared_inputs[standard_fields.InputDataFields.point_normals]),
tf.zeros_like(
prepared_inputs[standard_fields.InputDataFields.point_normals]),
prepared_inputs[standard_fields.InputDataFields.point_normals])
if 'mesh/vertices/colors' in inputs:
prepared_inputs[standard_fields.InputDataFields
.point_colors] = inputs['mesh/vertices/colors'][:, 0:3]
prepared_inputs[standard_fields.InputDataFields.point_colors] = tf.cast(
prepared_inputs[standard_fields.InputDataFields.point_colors],
dtype=tf.float32)
prepared_inputs[standard_fields.InputDataFields.point_colors] *= (2.0 /
255.0)
prepared_inputs[standard_fields.InputDataFields.point_colors] -= 1.0
if 'scene_name' in inputs:
prepared_inputs[standard_fields.InputDataFields
.camera_image_name] = inputs['scene_name']
if 'mesh/vertices/semantic_labels' in inputs:
prepared_inputs[
standard_fields.InputDataFields
.object_class_points] = inputs['mesh/vertices/semantic_labels']
if 'mesh/vertices/instance_labels' in inputs:
prepared_inputs[
standard_fields.InputDataFields.object_instance_id_points] = tf.reshape(
inputs['mesh/vertices/instance_labels'], [-1])
if valid_object_classes is not None:
valid_objects_mask = tf.cast(
tf.zeros_like(
prepared_inputs[
standard_fields.InputDataFields.object_class_points],
dtype=tf.int32),
dtype=tf.bool)
for object_class in valid_object_classes:
valid_objects_mask = tf.logical_or(
valid_objects_mask,
tf.equal(
prepared_inputs[
standard_fields.InputDataFields.object_class_points],
object_class))
valid_objects_mask = tf.cast(
valid_objects_mask,
dtype=prepared_inputs[
standard_fields.InputDataFields.object_class_points].dtype)
prepared_inputs[standard_fields.InputDataFields
.object_class_points] *= valid_objects_mask
return prepared_inputs
@gin.configurable
def prepare_scannet_frame_dataset(inputs,
min_pixel_depth=0.3,
max_pixel_depth=6.0,
valid_object_classes=None):
"""Maps the fields from loaded input to standard fields.
Args:
inputs: A dictionary of input tensors.
min_pixel_depth: Pixels with depth values less than this are pruned.
max_pixel_depth: Pixels with depth values more than this are pruned.
valid_object_classes: List of valid object classes. if None, it is ignored.
Returns:
A dictionary of input tensors with standard field names.
"""
prepared_inputs = {}
if 'cameras/rgbd_camera/intrinsics/K' not in inputs:
raise ValueError('Intrinsic matrix is missing.')
if 'cameras/rgbd_camera/extrinsics/R' not in inputs:
raise ValueError('Extrinsic rotation matrix is missing.')
if 'cameras/rgbd_camera/extrinsics/t' not in inputs:
raise ValueError('Extrinsics translation is missing.')
if 'cameras/rgbd_camera/depth_image' not in inputs:
raise ValueError('Depth image is missing.')
if 'cameras/rgbd_camera/color_image' not in inputs:
raise ValueError('Color image is missing.')
if 'frame_name' in inputs:
prepared_inputs[standard_fields.InputDataFields
.camera_image_name] = inputs['frame_name']
camera_intrinsics = inputs['cameras/rgbd_camera/intrinsics/K']
depth_image = inputs['cameras/rgbd_camera/depth_image']
image_height = tf.shape(depth_image)[0]
image_width = tf.shape(depth_image)[1]
x, y = tf.meshgrid(
tf.range(image_width), tf.range(image_height), indexing='xy')
x = tf.reshape(tf.cast(x, dtype=tf.float32) + 0.5, [-1, 1])
y = tf.reshape(tf.cast(y, dtype=tf.float32) + 0.5, [-1, 1])
point_positions = projections.image_frame_to_camera_frame(
image_frame=tf.concat([x, y], axis=1),
camera_intrinsics=camera_intrinsics)
rotate_world_to_camera = inputs['cameras/rgbd_camera/extrinsics/R']
translate_world_to_camera = inputs['cameras/rgbd_camera/extrinsics/t']
point_positions = projections.to_world_frame(
camera_frame_points=point_positions,
rotate_world_to_camera=rotate_world_to_camera,
translate_world_to_camera=translate_world_to_camera)
prepared_inputs[standard_fields.InputDataFields
.point_positions] = point_positions * tf.reshape(
depth_image, [-1, 1])
depth_values = tf.reshape(depth_image, [-1])
valid_depth_mask = tf.logical_and(
tf.greater_equal(depth_values, min_pixel_depth),
tf.less_equal(depth_values, max_pixel_depth))
prepared_inputs[standard_fields.InputDataFields.point_colors] = tf.reshape(
tf.cast(inputs['cameras/rgbd_camera/color_image'], dtype=tf.float32),
[-1, 3])
prepared_inputs[standard_fields.InputDataFields.point_colors] *= (2.0 / 255.0)
prepared_inputs[standard_fields.InputDataFields.point_colors] -= 1.0
prepared_inputs[
standard_fields.InputDataFields.point_positions] = tf.boolean_mask(
prepared_inputs[standard_fields.InputDataFields.point_positions],
valid_depth_mask)
prepared_inputs[
standard_fields.InputDataFields.point_colors] = tf.boolean_mask(
prepared_inputs[standard_fields.InputDataFields.point_colors],
valid_depth_mask)
if 'cameras/rgbd_camera/semantic_image' in inputs:
prepared_inputs[
standard_fields.InputDataFields.object_class_points] = tf.cast(
tf.reshape(inputs['cameras/rgbd_camera/semantic_image'], [-1, 1]),
dtype=tf.int32)
prepared_inputs[
standard_fields.InputDataFields.object_class_points] = tf.boolean_mask(
prepared_inputs[
standard_fields.InputDataFields.object_class_points],
valid_depth_mask)
if 'cameras/rgbd_camera/instance_image' in inputs:
prepared_inputs[
standard_fields.InputDataFields.object_instance_id_points] = tf.cast(
tf.reshape(inputs['cameras/rgbd_camera/instance_image'], [-1]),
dtype=tf.int32)
prepared_inputs[standard_fields.InputDataFields
.object_instance_id_points] = tf.boolean_mask(
prepared_inputs[standard_fields.InputDataFields
.object_instance_id_points],
valid_depth_mask)
if valid_object_classes is not None:
valid_objects_mask = tf.cast(
tf.zeros_like(
prepared_inputs[
standard_fields.InputDataFields.object_class_points],
dtype=tf.int32),
dtype=tf.bool)
for object_class in valid_object_classes:
valid_objects_mask = tf.logical_or(
valid_objects_mask,
tf.equal(
prepared_inputs[
standard_fields.InputDataFields.object_class_points],
object_class))
valid_objects_mask = tf.cast(
valid_objects_mask,
dtype=prepared_inputs[
standard_fields.InputDataFields.object_class_points].dtype)
prepared_inputs[standard_fields.InputDataFields
.object_class_points] *= valid_objects_mask
return prepared_inputs
@gin.configurable
def prepare_waymo_open_dataset(inputs,
valid_object_classes=None,
max_object_distance_from_source=74.88):
"""Maps the fields from loaded input to standard fields.
Args:
inputs: A dictionary of input tensors.
valid_object_classes: List of valid object classes. if None, it is ignored.
max_object_distance_from_source: Maximum distance of objects from source. It
will be ignored if None.
Returns:
A dictionary of input tensors with standard field names.
"""
prepared_inputs = {}
if standard_fields.InputDataFields.point_positions in inputs:
prepared_inputs[standard_fields.InputDataFields.point_positions] = inputs[
standard_fields.InputDataFields.point_positions]
if standard_fields.InputDataFields.point_intensities in inputs:
prepared_inputs[standard_fields.InputDataFields.point_intensities] = inputs[
standard_fields.InputDataFields.point_intensities]
if standard_fields.InputDataFields.point_elongations in inputs:
prepared_inputs[standard_fields.InputDataFields.point_elongations] = inputs[
standard_fields.InputDataFields.point_elongations]
if standard_fields.InputDataFields.point_normals in inputs:
prepared_inputs[standard_fields.InputDataFields.point_normals] = inputs[
standard_fields.InputDataFields.point_normals]
if 'cameras/front/intrinsics/K' in inputs:
prepared_inputs[standard_fields.InputDataFields
.camera_intrinsics] = inputs['cameras/front/intrinsics/K']
if 'cameras/front/extrinsics/R' in inputs:
prepared_inputs[
standard_fields.InputDataFields
.camera_rotation_matrix] = inputs['cameras/front/extrinsics/R']
if 'cameras/front/extrinsics/t' in inputs:
prepared_inputs[standard_fields.InputDataFields
.camera_translation] = inputs['cameras/front/extrinsics/t']
if 'cameras/front/image' in inputs:
prepared_inputs[standard_fields.InputDataFields
.camera_image] = inputs['cameras/front/image']
prepared_inputs[standard_fields.InputDataFields
.camera_raw_image] = inputs['cameras/front/image']
prepared_inputs[standard_fields.InputDataFields
.camera_original_image] = inputs['cameras/front/image']
if 'scene_name' in inputs and 'frame_name' in inputs:
prepared_inputs[
standard_fields.InputDataFields.camera_image_name] = tf.strings.join(
[inputs['scene_name'], inputs['frame_name']], separator='_')
if 'objects/pose/R' in inputs:
prepared_inputs[standard_fields.InputDataFields
.objects_rotation_matrix] = inputs['objects/pose/R']
if 'objects/pose/t' in inputs:
prepared_inputs[standard_fields.InputDataFields
.objects_center] = inputs['objects/pose/t']
if 'objects/shape/dimension' in inputs:
prepared_inputs[
standard_fields.InputDataFields.objects_length] = tf.reshape(
inputs['objects/shape/dimension'][:, 0], [-1, 1])
prepared_inputs[standard_fields.InputDataFields.objects_width] = tf.reshape(
inputs['objects/shape/dimension'][:, 1], [-1, 1])
prepared_inputs[
standard_fields.InputDataFields.objects_height] = tf.reshape(
inputs['objects/shape/dimension'][:, 2], [-1, 1])
if 'objects/category/label' in inputs:
prepared_inputs[standard_fields.InputDataFields.objects_class] = tf.reshape(
inputs['objects/category/label'], [-1, 1])
if valid_object_classes is not None:
valid_objects_mask = tf.cast(
tf.zeros_like(
prepared_inputs[standard_fields.InputDataFields.objects_class],
dtype=tf.int32),
dtype=tf.bool)
for object_class in valid_object_classes:
valid_objects_mask = tf.logical_or(
valid_objects_mask,
tf.equal(
prepared_inputs[standard_fields.InputDataFields.objects_class],
object_class))
valid_objects_mask = tf.reshape(valid_objects_mask, [-1])
for key in standard_fields.get_input_object_fields():
if key in prepared_inputs:
prepared_inputs[key] = tf.boolean_mask(prepared_inputs[key],
valid_objects_mask)
if max_object_distance_from_source is not None:
if standard_fields.InputDataFields.objects_center in prepared_inputs:
object_distances = tf.norm(
prepared_inputs[standard_fields.InputDataFields.objects_center][:,
0:2],
axis=1)
valid_mask = tf.less(object_distances, max_object_distance_from_source)
for key in standard_fields.get_input_object_fields():
if key in prepared_inputs:
prepared_inputs[key] = tf.boolean_mask(prepared_inputs[key],
valid_mask)
return prepared_inputs
@gin.configurable
def prepare_kitti_dataset(inputs, valid_object_classes=None):
"""Maps the fields from loaded input to standard fields.
Args:
inputs: A dictionary of input tensors.
valid_object_classes: List of valid object classes. if None, it is ignored.
Returns:
A dictionary of input tensors with standard field names.
"""
prepared_inputs = {}
prepared_inputs[standard_fields.InputDataFields.point_positions] = inputs[
standard_fields.InputDataFields.point_positions]
prepared_inputs[standard_fields.InputDataFields.point_intensities] = inputs[
standard_fields.InputDataFields.point_intensities]
prepared_inputs[standard_fields.InputDataFields
.camera_intrinsics] = inputs['cameras/cam02/intrinsics/K']
prepared_inputs[standard_fields.InputDataFields.
camera_rotation_matrix] = inputs['cameras/cam02/extrinsics/R']
prepared_inputs[standard_fields.InputDataFields
.camera_translation] = inputs['cameras/cam02/extrinsics/t']
prepared_inputs[standard_fields.InputDataFields
.camera_image] = inputs['cameras/cam02/image']
prepared_inputs[standard_fields.InputDataFields
.camera_raw_image] = inputs['cameras/cam02/image']
prepared_inputs[standard_fields.InputDataFields
.camera_original_image] = inputs['cameras/cam02/image']
if 'scene_name' in inputs and 'frame_name' in inputs:
prepared_inputs[
standard_fields.InputDataFields.camera_image_name] = tf.strings.join(
[inputs['scene_name'], inputs['frame_name']], separator='_')
if 'objects/pose/R' in inputs:
prepared_inputs[standard_fields.InputDataFields
.objects_rotation_matrix] = inputs['objects/pose/R']
if 'objects/pose/t' in inputs:
prepared_inputs[standard_fields.InputDataFields
.objects_center] = inputs['objects/pose/t']
if 'objects/shape/dimension' in inputs:
prepared_inputs[
standard_fields.InputDataFields.objects_length] = tf.reshape(
inputs['objects/shape/dimension'][:, 0], [-1, 1])
prepared_inputs[standard_fields.InputDataFields.objects_width] = tf.reshape(
inputs['objects/shape/dimension'][:, 1], [-1, 1])
prepared_inputs[
standard_fields.InputDataFields.objects_height] = tf.reshape(
inputs['objects/shape/dimension'][:, 2], [-1, 1])
if 'objects/category/label' in inputs:
prepared_inputs[standard_fields.InputDataFields.objects_class] = tf.reshape(
inputs['objects/category/label'], [-1, 1])
if valid_object_classes is not None:
valid_objects_mask = tf.cast(
tf.zeros_like(
prepared_inputs[standard_fields.InputDataFields.objects_class],
dtype=tf.int32),
dtype=tf.bool)
for object_class in valid_object_classes:
valid_objects_mask = tf.logical_or(
valid_objects_mask,
tf.equal(
prepared_inputs[standard_fields.InputDataFields.objects_class],
object_class))
valid_objects_mask = tf.reshape(valid_objects_mask, [-1])
for key in standard_fields.get_input_object_fields():
if key in prepared_inputs:
prepared_inputs[key] = tf.boolean_mask(prepared_inputs[key],
valid_objects_mask)
return prepared_inputs
@gin.configurable
def prepare_proxy_dataset(inputs):
"""Maps the fields from loaded input to standard fields.
Args:
inputs: A dictionary of input tensors.
Returns:
A dictionary of input tensors with standard field names.
"""
prepared_inputs = {}
# Points
prepared_inputs[standard_fields.InputDataFields.point_positions] = inputs[
standard_fields.InputDataFields.point_positions]
prepared_inputs[standard_fields.InputDataFields.point_intensities] = inputs[
standard_fields.InputDataFields.point_intensities]
# Camera
prepared_inputs[
standard_fields.InputDataFields.camera_intrinsics] = tf.reshape(
inputs['camera_intrinsics'], [3, 3])
prepared_inputs[
standard_fields.InputDataFields.camera_rotation_matrix] = tf.reshape(
inputs['camera_rotation_matrix'], [3, 3])
prepared_inputs[
standard_fields.InputDataFields.camera_translation] = tf.reshape(
inputs['camera_translation'], [3])
prepared_inputs[
standard_fields.InputDataFields.camera_image] = inputs['image']
prepared_inputs[
standard_fields.InputDataFields.camera_raw_image] = inputs['image']
prepared_inputs[
standard_fields.InputDataFields.camera_original_image] = inputs['image']
prepared_inputs[standard_fields.InputDataFields
.camera_image_name] = _random_string_generator()
# objects pose
prepared_inputs[
standard_fields.InputDataFields.objects_rotation_matrix] = tf.reshape(
inputs['objects_rotation'], [-1, 3, 3])
prepared_inputs[standard_fields.InputDataFields.objects_center] = tf.reshape(
inputs['objects_center'], [-1, 3])
# objects size
prepared_inputs[standard_fields.InputDataFields.objects_length] = tf.reshape(
inputs['objects_length'], [-1, 1])
prepared_inputs[standard_fields.InputDataFields.objects_width] = tf.reshape(
inputs['objects_width'], [-1, 1])
prepared_inputs[standard_fields.InputDataFields.objects_height] = tf.reshape(
inputs['objects_height'], [-1, 1])
# labels
prepared_inputs[standard_fields.InputDataFields.objects_class] = tf.reshape(
inputs['objects_class'], [-1, 1])
return prepared_inputs
def compute_kitti_difficulty(boxes, occlusions, truncations, image_height):
"""Computes box difficulty as Hard(1), Moderate(2), Easy(3) or 0 (Super hard).
Easy: height >=40 Px, occlusion <= 0, truncation <= 0.15
Moderate: height >=25 Px, occlusion <= 1, truncation <= 0.30
Hard: height >=25 Px, occlusion <= 2, truncation <= 0.50
Note that 'Hard' box is also 'Moderate' and 'Easy'.
Returns a (N, 1) tensor containing object difficulty with following labelmap:
0: SuperHard
1: Hard
2: Moderate
3: Easy
TODO(abhijitkundu): Since difficulty level is very specific to kitti, this
function should be in kitti evaluation rather than detection preprocessor.
Args:
boxes: (N, 4) tensor of 2d boxes with [ymin, xmin, ymax, xmax] each row.
occlusions: (N, 1) tensor containing box occlusion level
truncations: (N, 1) tensor containing box truncation level
image_height: Image height.
Returns:
A (N, 1) int32 tensor containing per box difficulty labels with 0 (SuperHard),
1 (Hard), 2 (Moderate) and 3 (Easy).
"""
# box heights in pixels
heights = tf.reshape((boxes[:, 2] - boxes[:, 0]), [-1, 1]) * tf.cast(
image_height, dtype=tf.float32)
# compute binary masks for each difficulty level
is_easy = (heights >= 40.0) & (occlusions <= 0) & (truncations <= 0.15)
is_moderate = (heights >= 25.0) & (occlusions <= 1) & (truncations <= 0.30)
is_hard = (heights >= 25.0) & (occlusions <= 2) & (truncations <= 0.50)
# set difficulty map
difficulty = tf.maximum(
tf.maximum(
tf.cast(is_hard, dtype=tf.int32) * ObjectDifficulty.HARD,
tf.cast(is_moderate, dtype=tf.int32) * ObjectDifficulty.MODERATE),
tf.cast(is_easy, dtype=tf.int32) * ObjectDifficulty.EASY)
return difficulty
def get_waymo_per_frame_with_prediction_feature_spec(
num_object_classes,
encoded_features_dimension,
include_encoded_features=True):
"""Returns a tfds feature spec with regular per frame entries and predictions.
Args:
num_object_classes: Number of object classes.
encoded_features_dimension: Encoded features dimension.
include_encoded_features: If True, it will include encoded features.
Otherwise, it will not include them.
Returns:
A tfds feature spec.
"""
prediction_feature_dict = {
standard_fields.DetectionResultFields.object_rotation_matrix_points:
tfds.features.Tensor(shape=(None, 3, 3), dtype=tf.float32),
standard_fields.DetectionResultFields.object_length_points:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.object_height_points:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.object_width_points:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.object_center_points:
tfds.features.Tensor(shape=(None, 3), dtype=tf.float32),
standard_fields.DetectionResultFields.object_semantic_points:
tfds.features.Tensor(
shape=(None, num_object_classes), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_rotation_matrix:
tfds.features.Tensor(shape=(None, 3, 3), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_length:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_height:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_width:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_center:
tfds.features.Tensor(shape=(None, 3), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_class:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
standard_fields.DetectionResultFields.objects_score:
tfds.features.Tensor(shape=(None, 1), dtype=tf.float32),
}
if include_encoded_features:
prediction_feature_dict[standard_fields.DetectionResultFields
.encoded_features_points] = tfds.features.Tensor(
shape=(None, encoded_features_dimension),
dtype=tf.float32)
prediction_feature_dict[standard_fields.DetectionResultFields
.objects_encoded_features] = tfds.features.Tensor(
shape=(None, encoded_features_dimension),
dtype=tf.float32)
prediction_feature_spec = tfds.features.FeaturesDict(prediction_feature_dict)
output_feature_spec_dict = {
k: v for k, v in waymo_frames.FRAME_FEATURE_SPEC.items()
}
output_feature_spec_dict['predictions'] = prediction_feature_spec
return tfds.features.FeaturesDict(output_feature_spec_dict)
|
Efficient Diary Pro is a copy of personal journal software program for Windows OS. This powerful, versatile and easy-to-use software will be a right tool for writing and managing your journal and diary entries. Let your brain works without any distraction if you keep all your things in order.
"Just what I was looking for! Pros: I just downloaded this program and already entered my first entry. It was so simple and easy to do and I love all the different fonts and the cute little emoticons. So far, so good! Cons: The interface styles are kind of limited and you can't create your own that I can tell but other than that I can't find anything wrong with it yet. Summary: I adore the fact that you can put a mood (or the weather) and a title on each entry and even make multiple entries for the same day if you want too. ..."
|
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from ticker.models import Entry
from tagging.models import Tag, TaggedItem
def overview(request, num_latest=10, template_name='ticker/overview.html', extra_context={}):
"""Show the 10 latest entries"""
entry_list = Entry.objects.public()[:num_latest]
template_context = {
'entry_list': entry_list,
}
template_context.update(extra_context)
return render_to_response(template_name, template_context,
RequestContext(request))
def archive(request, template_name='ticker/archive.html', extra_context={}):
"""Shows a archive page and a list of tags"""
entry_list = Entry.objects.public()
tag_list = Tag.objects.cloud_for_model(Entry, steps=9,
filters={'status': Entry.STATUS_OPEN })
template_context = {
'entry_list': entry_list,
'tag_list': tag_list,
}
template_context.update(extra_context)
return render_to_response(template_name, template_context,
RequestContext(request))
def archive_by_tag(request, tag, template_name='ticker/archive_by_tag.html', extra_context={}):
"""Shows a list of entries related with a specific `tag`"""
get_object_or_404(Tag, name=tag)
entry_list = TaggedItem.objects.get_by_model(Entry.objects.public(), [tag])
related_tags = Tag.objects.related_for_model([tag], Entry)
template_context = {
'the_tag': tag,
'related_tags': related_tags,
'entry_list': entry_list,
}
template_context.update(extra_context)
return render_to_response(template_name, template_context,
context_instance=RequestContext(request))
def details(request, slug, template_name='ticker/details.html', extra_context={}):
"""Shows a details page for the given entry"""
entry = get_object_or_404(Entry.objects.public(), slug=slug)
template_context = {
'entry': entry,
}
template_context.update(extra_context)
return render_to_response(template_name, template_context,
RequestContext(request))
|
China is currently reported to have the highest afforestation rate of any country in the world, increasing its forest cover from 12 percent thirty years ago to more than 21 percent in 2013. The country is continuing to implement policy measures to increase the quality and quantity of its forests and aims to bring forest coverage to 23 percent, or 223 million hectares, by 2020. The endorsement of the China Forest Certification Scheme (CFCS) by PEFC was re-affirmed by its leaders in Paris in November 2014.
In February 2014, the endorsement of the China Forest Certification Scheme by PEFC was announced, representing a significant milestone for safeguarding global forests given the importance of the country in the forest products value chain and its substantial forest area.
The CFCS was endorsed by the PEFC General Assembly after demonstrating compliance with its globally recognised sustainability benchmarks, a suite of requirements for covering a range of issues including standards development, sustainable forest management, chain of custody certification as well as certification and accreditation requirements.
“This endorsement does not only have the potential to substantially increase the demand for, as well as the supply of, certified material, but we hope that it will also inspire other countries in the region supplying China to follow our example and promote sustainable forest management through this certification,” he added.
China is the second Asian country after Malaysia to successfully achieve PEFC endorsement for a national certification system, and the Indonesian Forestry Certification Cooperation (IFCC) submitted its scheme for assessment in November 2013.
A range of other countries in the region, including India, Japan, Myanmar, Nepal, Philippines, South Korea and Thailand are advancing in national system development and exploring options for eventual international recognition by PEFC.
There are already reportedly about two million hectares of forests in China CFCS-certified, and more than 200 professionals have participated in the CFCC auditor training over the past years to be able to respond to the expected increase in demand for certification services following the endorsement.
Marking their first PEFC General Assembly participation as an endorsed national forest certification system in November 2014 in Paris, the CFCC presented William Street, chairman of the board of PEFC International, with a traditional Chinese gift known as a DING.
The DING can trace its history back to ancient China when it was an important sacrificial utensil made from bronze used to express the wish of prosperity, stability and longevity. Since then, the gift has been given to demonstrate wishes of good cooperation, trust and reliability—most famously given by the Chinese central government to the UN in 1995.
“It is an honour to receive a gift that carries so much meaning, on behalf of everyone who makes up the PEFC alliance,” commented Mr Street, following the presentation. “The endorsement of the Chinese national system represented a significant milestone for PEFC, with China not only one of the world’s largest manufacturers of forest products, but also among the countries with the largest forest area in the world."
The endorsement was the centrepiece of the recent International Symposium on Forest Certification, held in Beijing. Hosted by the Chinese Academy of Forestry (CAF), in collaboration with PEFC International, the PEFC China Initiative and Asia Pulp and Paper, the event brought together over 100 representatives to celebrate the launch of the Chinese National Standard for Sustainable Forest Management.
With so many participants coming from the world of sustainable forest management, including NGOs, universities, companies and certification bodies, the symposium was the perfect setting for exchanging experiences and discussing a wide variety of topics.
The two day event, which took place between in September, was split into a series of sessions covering a range of topics, including a global and Asia-specific overview of forest certification, government procurement policies, market perspectives and due diligence. PEFC members also played an important role during the symposium, with several speaking at the event, providing their experience of creating and managing a national forest certification system in their respective countries.
“The presence of several speakers from countries around the world reflects the internationalism, importance and significance of the development and implementation of the CFCC system,” highlighted Sheam Satkuru-Granzella, PEFC International Vice-Chair, during her keynote remarks.
“This is a system which has been in development since 2001 and we were proud as the world’s leading forest certification scheme to have endorsed the CFCC earlier this year,” she said.
The PEFC China program was built on an existing presence in Japan, and extended PEFC's reach into Asian markets. "Governments and companies around the world recognise PEFC certification as a means of guaranteeing that products come from legal and sustainable sources," said Mr Gunneberg at the time.
PEFC China's director, Benson Yu, said "By increasing the number of PEFC Chain of Custody certifications in China, Chinese producers will gain an effective and third-party means to provide buyers with an assurance that their products come from sustainably managed forests." Due to the fact that China is the world's third largest importer of wood and one of the biggest exporters of wood-based products, it was hoped the new China Office would help benefit PEFC certified companies, both within Asia and globally.
Maharaj K Muthoo, president of the Roman Forum, recently stated that forest certification is a soft policy instrument that seeks to use assessments of forest management, the verification of legality, chains of custody, eco-labelling and trademarks to promote the sustainable management, conservation and development of forests in a holistic manner without compromising the rights, resources or requirements of present and future generations.
It aims to encourage ethical trade and commerce and improve market access through the economically viable, environmentally appropriate and socially beneficial management of trees, forests and related renewable resources. Forest certification, therefore, can be a pragmatic instrument for harnessing market forces, public opinion and civil society in support of sustainable forest management (SFM).
These systems, supported by forest certification, conform with the green economy paradigm because they appropriately balance the social, economic and environmental dimensions of development. Forest certification (and associated chain-of-custody certification) is developing into a prerequisite for public procurement and market access, and has become associated with ethical trade and social responsibility.
In many countries where the certified percentage of the existing forest is small, as in China, there will surely be people internationally who will remain sceptical as to the benefits and overall impact or the CFCC. But the adoption of a national certification scheme and SFM regimes by any nation for its natural forest resource must be regarded as a step in the right direction.
In Malaysia, for example, where both certification and SFM have been established for many years, the trees are bearing fruit—literally. In Indonesia, despite its poor past record of forest husbandry, there is no reason to condemn the country to a boycott of its wood products from its remaining natural forest products if they are sustainably managed.
For the greatest danger remains conversion—to agriculture, palm oil and even rubber trees. ‘Use it or lose it’ was never truer than with today’s demands on natural forests for booming biofuel and fibre markets. Plantations can never be a better option than sustainably managed natural forests. As monocultures, generally lacking in biodiversity and often treated with chemicals, plantations are only second best.
The Paris event therefore should have given some comfort and optimism that China is committed to a system of national certification that respects national sovereignty, based on the inclusion of all stakeholders from the bottom up, which will eventually command the respect not only of government but also of civil society in China and internationally.
Thinking globally, there seems to be another almost insurmountable, unspoken challenge remaining for the forest industry and wood products sector. If one believes that the message of wood’s positive environmental credentials (carbon sequestration, low embodied energy and infinite renewability) is fast being realised and accepted, then there may be too many cooks in the kitchen, causing confusion.
By that it may be suggested that there are now so many governments, NGOs, institutions, consultants and lobby groups, each with differing agenda, and on so many levels, that markets are becoming confused. Worse than that is the risk that the complexities of legislation, regulations, administration and protocols surrounding wood as a material might just cause users and specifiers to switch to other less environmentally acceptable materials as an easier option.
Concrete, steel, aluminium, glass and plastic industries have all invested so much in ‘greenwash marketing’ that consumers could be forgiven for continuing to believe that trees should not be cut and for manufacturers to avoid the material altogether, to the detriment of sustainable resources on the planet. While that was not a specific issue on the agenda in Paris, it gave food for afterthought.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: envbase.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
from abc import abstractmethod, ABCMeta
from collections import defaultdict
import six
from ..utils import get_rng
__all__ = ['RLEnvironment', 'ProxyPlayer',
'DiscreteActionSpace']
@six.add_metaclass(ABCMeta)
class RLEnvironment(object):
""" Base class of RL environment. """
def __init__(self):
self.reset_stat()
@abstractmethod
def current_state(self):
"""
Observe, return a state representation
"""
@abstractmethod
def action(self, act):
"""
Perform an action. Will automatically start a new episode if isOver==True
Args:
act: the action
Returns:
tuple: (reward, isOver)
"""
def restart_episode(self):
""" Start a new episode, even if the current hasn't ended """
raise NotImplementedError()
def finish_episode(self):
""" Get called when an episode finished"""
pass
def get_action_space(self):
""" Returns:
:class:`ActionSpace` """
raise NotImplementedError()
def reset_stat(self):
""" Reset all statistics counter"""
self.stats = defaultdict(list)
def play_one_episode(self, func, stat='score'):
""" Play one episode for eval.
Args:
func: the policy function. Takes a state and returns an action.
stat: a key or list of keys in stats to return.
Returns:
the stat(s) after running this episode
"""
if not isinstance(stat, list):
stat = [stat]
while True:
s = self.current_state()
act = func(s)
r, isOver = self.action(act)
# print r
if isOver:
s = [self.stats[k] for k in stat]
self.reset_stat()
return s if len(s) > 1 else s[0]
class ActionSpace(object):
def __init__(self):
self.rng = get_rng(self)
@abstractmethod
def sample(self):
pass
def num_actions(self):
raise NotImplementedError()
class DiscreteActionSpace(ActionSpace):
def __init__(self, num):
super(DiscreteActionSpace, self).__init__()
self.num = num
def sample(self):
return self.rng.randint(self.num)
def num_actions(self):
return self.num
def __repr__(self):
return "DiscreteActionSpace({})".format(self.num)
def __str__(self):
return "DiscreteActionSpace({})".format(self.num)
class NaiveRLEnvironment(RLEnvironment):
""" For testing only"""
def __init__(self):
self.k = 0
def current_state(self):
self.k += 1
return self.k
def action(self, act):
self.k = act
return (self.k, self.k > 10)
class ProxyPlayer(RLEnvironment):
""" Serve as a proxy to another player """
def __init__(self, player):
self.player = player
def reset_stat(self):
self.player.reset_stat()
def current_state(self):
return self.player.current_state()
def action(self, act):
return self.player.action(act)
@property
def stats(self):
return self.player.stats
def restart_episode(self):
self.player.restart_episode()
def finish_episode(self):
self.player.finish_episode()
def get_action_space(self):
return self.player.get_action_space()
|
Hewland has substantial pedigree in 4WD solutions, having supplied some of the most successful vehicles in rally, rally-raid and circuit competition history.
With our team of highly-skilled design engineers and our world championship-winning provenance, we are confident that we can supply a system to meet every demand of your 4WD project.
In addition to offering bespoke 4WD design services, our high performance differentials are the perfect complement to any 4WD competition vehicle.
Please speak to our sales team to discuss your options, and find out how we can help you achieve the winning-performance you are looking for.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.