text
stringlengths 29
850k
|
|---|
import numpy as np
import re
import itertools
from collections import Counter
"""
Original taken from https://github.com/dennybritz/cnn-text-classification-tf
"""
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_and_labels():
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open("./data/rt-polarity.pos").readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open("./data/rt-polarity.neg").readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
x_text = [s.split(" ") for s in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def pad_sentences(sentences, padding_word="<PAD/>"):
"""
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def build_vocab(sentences):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def build_input_data(sentences, labels, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
y = np.array(labels)
return [x, y]
def load_data():
"""
Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
# Load and preprocess data
sentences, labels = load_data_and_labels()
sentences_padded = pad_sentences(sentences)
vocabulary, vocabulary_inv = build_vocab(sentences_padded)
x, y = build_input_data(sentences_padded, labels, vocabulary)
return [x, y, vocabulary, vocabulary_inv]
def batch_iter(data, batch_size, num_epochs):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data) / batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
|
We stock a wide range of new and refurbished safes in our showroom in sheffield our safe selection cover residential, business and banking with cash ratings from 1,000 to 100,000. We fit era british standard high security locks to almost any pvc door already in your home. It may also have a secondary benefit of frerectionfacile.xyz reminding you of the value of all your tools and equipment which may help you to remember to take sensible security precautions. Our locksmiths in leicester are on call 24 hours a day, 365 days per year. If you need to pay us via internet banking or directly through your bank please use the following details. Are your lockers up-to-date for the new one pound coin which is due in 2017? lockers with older one pound coin return locks will not operate with the new ones when they are released the treasury has advised businesses to get ready for the introduction of the new 1-2-sided coin see. There are currently no thanks for this post. Locksmith provides a bunch of handy enums for configuring your requests, so you can say. We have been trading in grimsby since 1995 and offer a professional, reliable service to the commercial and private sector we are members of the prestige master locksmiths association and are vetted by trading standards a local independent family run business with a reputation for being punctual and offer polite honest advice.
When you order our locksmith service in hackney london, you not only secure your property, but also gain peace of mind because our security experts are expertly trained to tackle all your lock and key problems. Due to the nature of the services we offer our workshop is open by appoinment only. A upvc window lock mechanism is the gearing part inside a window that's driven by a. Lock solid auto locksmiths in carlisle, cumbria, can replace transponders and programme them back to the vehicle in these situations getting you up and running again.
Locksolid are specialits in car/van vehicle entry and replacement car & motorbike keys in norwich, norfolk and the surrounding areas [read more]. Lock stock & barrell is a local family run business, offering locksmith & security services to business and residential customers throughout our local areas the company began trading in 1986 and is experience in the security business we are recognized as an approved company by the mla master locksmith association and have been a member since 1989 we are also an ssaib approved company certification for security service provides . Cranbourne drive, hoddesdon, en11 0qq. We understand, first hand, what people want; real professionalism and reliability in a locksmith using worrall’s you can expect a high quality and professional service at competitive price throughout london we are always on hand to meet the requirements of all of our customers, big and small. We use only the very best locks available to guarantee quality work every time. At portadown locksmiths we don’t believe in just keeping our clients happy! we believe in. Take a look to find out more about our. Replacement car key newark , replacement car keys newark , replacement van key newark , replacement van keys newark , replacement remote keys newark. Ask about “restricted duplication” keys and our lock replacement service. Has your lock come away from the door? is your safe not locking or opening? broken or jammed locks? keys247 can provide the help you need, as soon as you need it. Whether you are locked out of your home or need a new locked fitted our highly skilled master locksmiths can help you gain entry and fit new locks as specialists in locksmithing we can supply, install, maintain and repair locks and replace keys. Our professional and convenient solution is to provide a quick response for sheffield home and property owners.
|
import numpy as np
from numpy.linalg.linalg import LinAlgError
import sys
import traceback
import gd_local_MapReduce as local_MapReduce
def print_out(len_maxiters, display, fnow, current_grad, beta, iteration):
if display:
print '\r',
print '{0:>0{mi}g} {1:> 12e} {2:> 12e} {3:> 12e}'.format(iteration, float(fnow), float(beta), float(current_grad), mi=len_maxiters), # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r',
sys.stdout.flush()
_fail_count = 0
_allowed_failures = 100
def safe_f_and_grad_f(f_and_gradf, x, iteration=0, step_size=0, *optargs):
'''
Calls f and gradf and returns inf for f in case of warnings / assertion errors and so on.
The returned gradf in that case is 0, which screws up SCG's momentum, so a re-start should be done
'''
global _fail_count, _allowed_failures
try:
[f, gradf] = f_and_gradf(x, iteration, step_size, *optargs)
_fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError, Warning, AssertionError) as e:
if _fail_count >= _allowed_failures:
print 'Too many errors...'
raise e
_fail_count += 1
print
_,_,tb = sys.exc_info()
tbInfo = traceback.extract_tb(tb)
filename,line,func,text = tbInfo[-1]
print ('An error occurred on line ' + str(line) + ' in filename ' + filename)
print 'Increasing failed count (' + str(_fail_count) + ') and returning nlml inf'
f = np.inf
gradf = np.ones(x.shape)
return f, gradf
def GD(f_and_gradf, x, tmp_folder, fixed_embeddings=False, optargs=(), maxiters=500, max_f_eval=500, display=True, xtol=None, ftol=None, gtol=None):
"""
Optimisation through Gradient Descent
f: the objective function
gradf : the gradient function (should return a 1D np.ndarray)
x : the initial condition
Returns
x the optimal value for x
flog : a list of all the objective values
function_eval number of fn evaluations
status: string describing convergence status
"""
if xtol is None:
xtol = 1e-16
if ftol is None:
ftol = 1e-6
if gtol is None:
gtol = 1e-6
len_maxiters = len(str(maxiters))
step_size = 0.01
mom_size = 0.0
f_gradf = safe_f_and_grad_f(f_and_gradf, x, iteration=0, step_size=0, *optargs)
fnow = f_gradf[0]
flog = [fnow]
gradnow = f_gradf[1]
direction = - gradnow
if not fixed_embeddings:
local_MapReduce.embeddings_set_grads(tmp_folder)
iteration = 0
while iteration < maxiters:
xprop = x + step_size * direction
f_gradf = safe_f_and_grad_f(f_and_gradf, xprop, iteration=iteration, step_size=step_size, *optargs)
fproposed = f_gradf[0]
if (np.abs(fnow - fproposed) < ftol):
break
print 'converged due to ftol'
if (np.abs(step_size) < xtol):
break
print 'converged due to xtol'
if (fproposed <= fnow):
fnow = fproposed
flog += [fnow]
gradnow = f_gradf[1]
if not fixed_embeddings:
local_MapReduce.embeddings_set_grads_update_grad_now(tmp_folder)
x = xprop
if not fixed_embeddings:
local_MapReduce.embeddings_set_grads_update_X(tmp_folder, step_size)
direction = - (gradnow + mom_size * step_size * direction)
#direction = - (gradnow - mom_size * step_size * direction)
if not fixed_embeddings:
local_MapReduce.embeddings_set_grads_update_d(tmp_folder, mom_size * step_size)
step_size *= 2.0
iteration += 1
max_abs_gradnow = np.max(np.abs(gradnow))
if not fixed_embeddings:
max_abs_gradnow = max(max_abs_gradnow, local_MapReduce.embeddings_get_grads_max_gradnow(tmp_folder))
if (max_abs_gradnow < gtol):
break
print 'converged due to grad'
else:
step_size /= 2.0
if display:
print ' {0:{mi}s} {1:11s} {2:11s} {3:11s}'.format("I", "F", "Scale", "|g|", mi=len_maxiters)
current_grad = np.sum(np.abs(gradnow))
if not fixed_embeddings:
current_grad += local_MapReduce.embeddings_get_grads_current_grad(tmp_folder)
print_out(len_maxiters, display, fnow, current_grad, step_size, iteration)
if display:
current_grad = np.sum(np.abs(gradnow))
if not fixed_embeddings:
current_grad += local_MapReduce.embeddings_get_grads_current_grad(tmp_folder)
print_out(len_maxiters, display, fnow, current_grad, step_size, iteration)
print ""
return x, flog, None, 'converged... NOT'
|
One other really wonderful thing about these massages is that they can really … Read More..
Normally, many couples always like to lead a happy marriage. You will realize that the marriage process demands a lot of patience to one another. The marriage process can lead to some issues arising. You will be forced to look for a marriage counselor. You will be assisted to continue with the marriage, due to the wisdom you will get, from the marriage counseling process. You will benefit in many ways, if you go for professional marriage counseling services. You will need to be advised about marriage counseling professionals in your area. The following are benefits you will get from marriage counseling services.
Your communication skills will be built through the marriage counseling services. The fact that marriage counselors have enough knowledge in the field of counseling, will enable you to understand on ways to relate with one another. As couples you will be able to develop a good understanding, because you will be dealing with a neutral party. You will be able to develop compassion, which will enable you to maneuver through difficult times.
There will be an opportunity for you, to properly address issues. A platform will be provided in this case, where you will be able to express feelings, which you have towards each other. It is important that you resolve the issues here, before they reach a point of causing divorce. Taking a step to engage a marriage counselor, you will be able to resolve some disagreements, which you may have had. There is need to address the marriage issues, because they have potential in promoting a healthful and happier marriage. This is the supporting reason why you need to go for the marriage counseling exercise.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import random, pygame, sys, thread, time
from pygame.locals import *
from mpu6050 import *
y_offset = 0
rotation = 0
def mpu6050_read():
global y_offset
global rotation
mpu = MPU6050()
mpu.initialize()
# Set calibration data
mpu.gyro_offs = {'x': -178, 'y': 259, 'z': -104}
mpu.accel_offs = {'y': -354, 'x': 389, 'z': -1482}
accel_data = mpu.get_accel()
x_rotation = mpu.get_x_rotation(accel_data)
y_rotation = mpu.get_y_rotation(accel_data)
while True:
accel_data = mpu.get_accel()
x_rotation = mpu.get_x_rotation(accel_data)
y_rotation = mpu.get_y_rotation(accel_data)
y_offset = y_rotation * 2
rotation = x_rotation
time.sleep(0.001)
FPS = 100
WINDOWWIDTH = 640
WINDOWHEIGHT = 480
CELLSIZE = 20
CELLWIDTH = int(WINDOWWIDTH / CELLSIZE)
CELLHEIGHT = int(WINDOWHEIGHT / CELLSIZE)
# R G B
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
DARKGREEN = ( 0, 155, 0)
DARKGRAY = ( 40, 40, 40)
BGCOLOR = BLACK
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
BASICFONT = pygame.font.Font('freesansbold.ttf', 18)
pygame.display.set_caption('MPU-6050')
thread.start_new_thread(mpu6050_read,())
while True:
runGame()
def runGame():
global y_offset
global rotation
titleFont = pygame.font.Font('freesansbold.ttf', 50)
titleSurf1 = titleFont.render('MPU-6050', True, WHITE)
while True: # main game loop
for event in pygame.event.get(): # event handling loop
if event.type == QUIT:
terminate()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
terminate()
DISPLAYSURF.fill(BGCOLOR)
drawGrid()
rotatedSurf1 = pygame.transform.rotate(titleSurf1, rotation)
rotatedRect1 = rotatedSurf1.get_rect()
rotatedRect1.center = (WINDOWWIDTH/2, WINDOWHEIGHT/2 + y_offset)
DISPLAYSURF.blit(rotatedSurf1, rotatedRect1)
pygame.display.update()
FPSCLOCK.tick(FPS)
def terminate():
pygame.quit()
sys.exit()
def drawGrid():
for x in range(0, WINDOWWIDTH, CELLSIZE): # draw vertical lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (x, 0), (x, WINDOWHEIGHT))
for y in range(0, WINDOWHEIGHT, CELLSIZE): # draw horizontal lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (0, y), (WINDOWWIDTH, y))
if __name__ == '__main__':
main()
|
I installed latest version of dolphin (5.0-8253) and works perfectly with New Super Mario Bros Wii, wiimotes and 60fps with 100% speed, but some things are missing (blocks, coins, etc..) why? I also tried Mario Kart Wii and it’s too slow, and Paper Mario: TTYD but it doesn’t even open. I also tried these games on Windows 10 and they worked perfectly, why not in Windows 8.1? Downloaded even dolphin 4.0 (because 3.0 didn’t work) but it doesn’t recognize wiimotes.
Post screenshots of your graphics settings.
Your adapter does not seem to support the version of GL that Dolphin requires. Use D3D instead.
(06-29-2018, 11:44 PM)DJBarry004 Wrote: Your adapter does not seem to support the version of GL that Dolphin requires. Use D3D instead.
Set the graphic as you said and now works perfectly for New Suoer Mario Bros Wii, Paper Mario: TTYD and Mario Kart Wii! A little less for Mario Party 9, maybe there’s a specific setting for that game?
And I also wanted to ask how can I setup my Wii classic controller for GameCube games?
Mario Party 9 needs EFB Copies to Texture + RAM, so that might be hitting performance.
About the Wii Classic Controller, I´m not sure. AFAIK that is an add-on for the Wiimote, and only works with Wii games that support it.
You can use other controllers (XB360, DS3 and DS4) though.
(06-30-2018, 09:37 AM)DJBarry004 Wrote: Mario Party 9 needs EFB Copies to Texture + RAM, so that might be hitting performance.
Ok, I wanted to use the Wii Classic Controller for a Gamecube game, but I'll choose another one.
|
# -*- coding: utf-8 -*-
import numpy as nm
from sfepy.homogenization.utils import define_box_regions
import sfepy.homogenization.coefs_base as cb
import sfepy.discrete.fem.periodic as per
from sfepy.base.base import Struct
from sfepy.terms.terms_hyperelastic_ul import\
HyperElasticULFamilyData, NeoHookeanULTerm, BulkPenaltyULTerm
from sfepy.terms.extmods.terms import sym2nonsym
from sfepy.discrete.functions import ConstantFunctionByRegion
from sfepy import data_dir
import sfepy.linalg as la
def recovery_hook(pb, ncoors, region, ts,
naming_scheme='step_iel', recovery_file_tag=''):
from sfepy.base.ioutils import get_print_info
from sfepy.homogenization.recovery import get_output_suffix
import os.path as op
for ii, icell in enumerate(region.cells):
out = {}
pb.set_mesh_coors(ncoors[ii], update_fields=True,
clear_all=False, actual=True)
stress = pb.evaluate('ev_integrate_mat.3.Y(mat_he.S, u)',
mode='el_avg')
out['cauchy_stress'] = Struct(name='output_data',
mode='cell',
data=stress,
dofs=None)
strain = pb.evaluate('ev_integrate_mat.3.Y(mat_he.E, u)',
mode='el_avg')
out['green_strain'] = Struct(name='output_data',
mode='cell',
data=strain,
dofs=None)
out['displacement'] = Struct(name='output_data',
mode='vertex',
data=ncoors[ii] - pb.get_mesh_coors(),
dofs=None)
output_dir = pb.conf.options.get('output_dir', '.')
format = get_print_info(pb.domain.mesh.n_el, fill='0')[1]
suffix = get_output_suffix(icell, ts, naming_scheme, format,
pb.output_format)
micro_name = pb.get_output_name(extra='recovered_'
+ recovery_file_tag + suffix)
filename = op.join(output_dir, op.basename(micro_name))
fpv = pb.conf.options.get('file_per_var', False)
pb.save_state(filename, out=out, file_per_var=fpv)
def def_mat(ts, mode, coors, term, pb):
if not (mode == 'qp'):
return
if not hasattr(pb, 'family_data'):
pb.family_data = HyperElasticULFamilyData()
update_var = pb.conf.options.mesh_update_variable
if pb.equations is None:
state_u = pb.create_variables([update_var])[update_var]
else:
state_u = pb.get_variables()[update_var]
if state_u.data[0] is None:
state_u.init_data()
state_u.set_data(
pb.domain.get_mesh_coors(actual=True) - pb.domain.get_mesh_coors())
state_u.field.clear_mappings()
family_data = pb.family_data(state_u, term.region,
term.integral, term.integration)
if len(state_u.field.mappings0) == 0:
state_u.field.save_mappings()
n_el, n_qp, dim, n_en, n_c = state_u.get_data_shape(term.integral,
term.integration,
term.region.name)
conf_mat = pb.conf.materials
solid_key = [key for key in conf_mat.keys() if 'solid' in key][0]
solid_mat = conf_mat[solid_key].values
mat = {}
for mat_key in ['mu', 'K']:
if isinstance(solid_mat[mat_key], dict):
mat_fun = ConstantFunctionByRegion({mat_key: solid_mat[mat_key]})
mat[mat_key] = mat_fun.function(ts=ts, coors=coors, mode='qp',
term=term, problem=pb)[mat_key].reshape((n_el, n_qp, 1, 1))
else:
mat[mat_key] = nm.ones((n_el, n_qp, 1, 1)) * solid_mat[mat_key]
shape = family_data.green_strain.shape[:2]
sym = family_data.green_strain.shape[-2]
dim2 = dim**2
fargs = [family_data.get(name)
for name in NeoHookeanULTerm.family_data_names]
stress = nm.empty(shape + (sym, 1), dtype=nm.float64)
tanmod = nm.empty(shape + (sym, sym), dtype=nm.float64)
NeoHookeanULTerm.stress_function(stress, mat['mu'], *fargs)
NeoHookeanULTerm.tan_mod_function(tanmod, mat['mu'], *fargs)
fargs = [family_data.get(name)
for name in BulkPenaltyULTerm.family_data_names]
stress_p = nm.empty(shape + (sym, 1), dtype=nm.float64)
tanmod_p = nm.empty(shape + (sym, sym), dtype=nm.float64)
BulkPenaltyULTerm.stress_function(stress_p, mat['K'], *fargs)
BulkPenaltyULTerm.tan_mod_function(tanmod_p, mat['K'], *fargs)
stress_ns = nm.zeros(shape + (dim2, dim2), dtype=nm.float64)
tanmod_ns = nm.zeros(shape + (dim2, dim2), dtype=nm.float64)
sym2nonsym(stress_ns, stress + stress_p)
sym2nonsym(tanmod_ns, tanmod + tanmod_p)
npts = nm.prod(shape)
J = family_data.det_f
mtx_f = family_data.mtx_f.reshape((npts, dim, dim))
out = {
'E': 0.5 * (la.dot_sequences(mtx_f, mtx_f, 'ATB') - nm.eye(dim)),
'A': ((tanmod_ns + stress_ns) / J).reshape((npts, dim2, dim2)),
'S': ((stress + stress_p) / J).reshape((npts, sym, 1)),
}
return out
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square_small.mesh'
dim = 2
options = {
'coefs': 'coefs',
'requirements': 'requirements',
'volume': {'expression': 'd_volume.5.Y(u)'},
'output_dir': './output',
'coefs_filename': 'coefs_hyper_homog',
'multiprocessing': True,
'chunks_per_worker': 2,
'mesh_update_variable': 'u',
'mesh_update_corrector': 'corrs_rs',
'recovery_hook': 'recovery_hook',
'store_micro_idxs': [49, 81],
}
fields = {
'displacement': ('real', 'vector', 'Y', 1),
}
functions = {
'match_x_plane': (per.match_x_plane,),
'match_y_plane': (per.match_y_plane,),
'mat_fce': (lambda ts, coors, mode=None, term=None, problem=None, **kwargs:
def_mat(ts, mode, coors, term, problem),),
}
materials = {
'mat_he': 'mat_fce',
'solid': ({'K': 1000,
'mu': {'Ym': 100, 'Yc': 10},
},),
}
variables = {
'u': ('unknown field', 'displacement'),
'v': ('test field', 'displacement', 'u'),
'Pi': ('parameter field', 'displacement', 'u'),
'Pi1u': ('parameter field', 'displacement', '(set-to-None)'),
'Pi2u': ('parameter field', 'displacement', '(set-to-None)'),
}
regions = {
'Y': 'all',
'Ym': 'cells of group 1',
'Yc': 'cells of group 2',
}
regions.update(define_box_regions(dim, (0., 0.), (1., 1.)))
ebcs = {
'fixed_u': ('Corners', {'u.all': 0.0}),
}
epbcs = {
'periodic_ux': (['Left', 'Right'], {'u.all': 'u.all'}, 'match_x_plane'),
'periodic_uy': (['Bottom', 'Top'], {'u.all': 'u.all'}, 'match_y_plane'),
}
coefs = {
'A': {
'requires': ['pis', 'corrs_rs'],
'expression': 'dw_nonsym_elastic.3.Y(mat_he.A, Pi1u, Pi2u)',
'set_variables': [('Pi1u', ('pis', 'corrs_rs'), 'u'),
('Pi2u', ('pis', 'corrs_rs'), 'u')],
'class': cb.CoefNonSymNonSym,
},
'S': {
'expression': 'ev_integrate_mat.3.Y(mat_he.S, u)',
'set_variables': [],
'class': cb.CoefOne,
}
}
requirements = {
'pis': {
'variables': ['u'],
'class': cb.ShapeDimDim,
},
'corrs_rs': {
'requires': ['pis'],
'ebcs': ['fixed_u'],
'epbcs': ['periodic_ux', 'periodic_uy'],
'equations': {
'balance_of_forces':
"""dw_nonsym_elastic.3.Y(mat_he.A, v, u)
= - dw_nonsym_elastic.3.Y(mat_he.A, v, Pi)"""
},
'set_variables': [('Pi', 'pis', 'u')],
'class': cb.CorrDimDim,
'save_name': 'corrs_hyper_homog',
'dump_variables': ['u'],
},
}
solvers = {
'ls': ('ls.scipy_direct', {}),
'newton': ('nls.newton', {
'i_max': 1,
'eps_a': 1e-4,
'problem': 'nonlinear',
}),
}
|
New Jersey Siding & Windows, Inc. is a premier exterior home remodeling contractor proudly serving clients across northern New Jersey from the New York to Pennsylvania borders. For more than 15 years, the company has helped homeowners renovate with top-quality windows, siding, entry doors, decking, roofing, gutters, stonework, and porch enclosures. New Jersey Siding & Windows is owned by James Anastasio and Neil Sciacca to install a wide range of energy efficient products from leading national manufacturers. From the free estimate through the final reveal, their mission is to add beauty, functionality, and value to every home for 100 percent customer satisfaction.
With a long-standing tradition for excellence, New Jersey Siding & Windows has received several notable recognitions. Every year from 2007 to 2014, they were voted #1 with the Daily Record Readers Choice Award. They’ve won the coveted Pulse of the City Award for excellence in customer satisfaction. Backed by an A+ BBB rating and accreditation, this reputable home improvement contractor was granted the Angie’s List Super Service Award. New Jersey Siding & Windows was even inducted into the Big 50 in Remodeling Magazine. This small business of just 18 staff members maintains a yearly volume of $4.4 million.
As a member of the elite National Association of the Remodeling Industry (NARI), they’re equipped to tackle any outdoor update with attention to detail, superior workmanship, and affordable pricing. No-interest financing options are available year-round and special coupons are hosted periodically to save clients bundles of money. New Jersey Siding & Windows is also a proud provider of GreenSpec listed products for environmental sustainability. These factory-trained, licensed contractors are so passionate about their work that they give back with Habitat for Humanity and the Roofs for Troops program.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from .core import PostSynapticMech_Exp2SynNMDA_Base
from morphforge.simulation.neuron.simulationdatacontainers.mhocfile import MHocFileData
from morphforge.simulation.neuron.simulationdatacontainers.mhocfile import MHOCSections
from morphforge.simulation.neuron.core.neuronsimulationenvironment import NEURONEnvironment
from morphforge.simulation.neuron.biophysics.modfile import ModFile
from morphforge.simulation.neuron.objects.neuronrecordable import NEURONRecordable
from morphforge.simulation.neuron.hocmodbuilders.hocmodutils import HocModUtils
from Cheetah.Template import Template
from morphforge.simulation.neuron.networks import NEURONPostSynapticMechTemplateForwardToTemplate
from morphforge.stdimports import MFRandom
from morphforge.units import parse_unit_str
from morphforge.stdimports import StandardTags
from morphforgecontrib.simulation.synapse_templates.exponential_form.neuron_records import Neuron_PSM_Std_CurrentRecord
from morphforgecontrib.simulation.synapse_templates.exponential_form.neuron_records import Neuron_PSM_Std_ConductanceRecord
from morphforge.simulation.neuron.networks import NEURONSynapse
class Neuron_PSM_Std_NMDAVoltageDependanceRecord(NEURONRecordable):
def __init__(self, neuron_syn_post, **kwargs):
super(Neuron_PSM_Std_NMDAVoltageDependanceRecord, self).__init__(**kwargs)
self.neuron_syn_post = neuron_syn_post
self._description="MyDesc!!"
def get_unit(self):
return parse_unit_str('')
def get_std_tags(self):
return [StandardTags.NMDAVoltageDependancy]
def build_hoc(self, hocfile_obj):
assert len(self.neuron_syn_post.synapses) == 1
obj_name_hoc = hocfile_obj[MHocFileData.Synapses][self.neuron_syn_post]["synnamepost"]
HocModUtils.create_record_from_object(hocfile_obj=hocfile_obj, vecname="RecVec%s" % self.name, objname=obj_name_hoc, objvar="voltage_dependancy", recordobj=self)
def build_mod(self, modfile_set):
pass
class Neuron_PSM_Std_NMDAConductanceWithVoltageDependanceRecord(NEURONRecordable):
def __init__(self, neuron_syn_post, **kwargs):
super(Neuron_PSM_Std_NMDAConductanceWithVoltageDependanceRecord, self).__init__(**kwargs)
self.neuron_syn_post = neuron_syn_post
def get_unit(self):
return parse_unit_str('uS')
def get_std_tags(self):
return [StandardTags.NMDAConductanceWithVDep]
def build_hoc(self, hocfile_obj):
assert len(self.neuron_syn_post.synapses) == 1
obj_name_hoc = hocfile_obj[MHocFileData.Synapses][self.neuron_syn_post]["synnamepost"]
HocModUtils.create_record_from_object(hocfile_obj=hocfile_obj, vecname="RecVec%s" % self.name, objname=obj_name_hoc, objvar="gtot", recordobj=self)
def build_mod(self, modfile_set):
pass
exp2HOCTmpl = """
// Post-Synapse [$synnamepost]
objref $synnamepost
${cellname}.internalsections[$sectionindex] $synnamepost = new Exp2SynNMDAMorphforge ($sectionpos)
${synnamepost}.tau1 = $tau_open.rescale("ms").magnitude
${synnamepost}.tau2 = $tau_close.rescale("ms").magnitude
${synnamepost}.e = $e_rev.rescale("mV").magnitude
${synnamepost}.popening = $pOpening
${synnamepost}.is_vdep_on = $is_vdep_on
${synnamepost}.peak_conductance = $peak_conductance.rescale('uS').magnitude
${synnamepost}.is_conductance_limited_on = $is_conductance_limited_on
${synnamepost}.conductance_limit = $conductance_limit
${synnamepost}.gamma = $gamma.rescale('per_mV').magnitude
${synnamepost}.eta = $eta.rescale('per_mM').magnitude
${synnamepost}.mg2conc = $mg2conc.rescale('mM').magnitude
"""
class NEURONPostSynapticMechTemplate_Exp2SynNMDA(PostSynapticMech_Exp2SynNMDA_Base, NEURONPostSynapticMechTemplateForwardToTemplate):
def __init__(self, **kwargs):
super(NEURONPostSynapticMechTemplate_Exp2SynNMDA, self).__init__(**kwargs)
def build_hoc_for_instance(self, instance, hocfile_obj):
params = instance.get_resolved_parameters()
tau_open = params['tau_open']
tau_close = params['tau_close']
e_rev = params['e_rev']
popening = params['popening']
vdep = params['vdep']
limit_conductance = params['limit_conductance']
peak_conductance = params['peak_conductance']
gamma = params['gamma']
eta = params['eta']
mg2conc = params['mg2conc']
cell = instance.cell_location.cell
section = instance.cell_location.morphlocation.section
syn_name_post = instance.name + 'Post'
hoc_data_cell = hocfile_obj[MHocFileData.Cells][cell]
data = {
'synnamepost': syn_name_post,
'cell': cell,
'cellname': hoc_data_cell['cell_name'],
'sectionindex': hoc_data_cell['section_indexer'][section],
'sectionpos': instance.cell_location.morphlocation.sectionpos,
'tau_open': tau_open,
'tau_close': tau_close,
'e_rev': e_rev,
'pOpening': popening,
'random_seed': MFRandom.get_seed(),
'is_vdep_on': (1.0 if vdep else 0.0),
'is_conductance_limited_on': (1.0 if limit_conductance not in [None,False] else 0.0),
'conductance_limit': (limit_conductance if limit_conductance not in [None,False] else -1.0),
'peak_conductance': peak_conductance,
'gamma':gamma,
'eta':eta,
'mg2conc':mg2conc,
}
hocfile_obj.add_to_section(MHOCSections.InitSynapsesChemPost,
Template(exp2HOCTmpl, data).respond())
assert not instance in hocfile_obj[MHocFileData.Synapses]
hocfile_obj[MHocFileData.Synapses][instance] = data
def template_build_mod(self, modfile_set):
import postsynaptic_mechanisms_exp2syn_nmda_modfile_new
modfile = ModFile(modtxt=postsynaptic_mechanisms_exp2syn_nmda_modfile_new.get_exp2_syn_nmda_modfile(), name='UnusedParameterXXXExpSyn2', strict_modlunit=True)
modfile_set.append(modfile)
def get_record_for_instance(self, instance, what, **kwargs):
if what == NEURONSynapse.Recordables.SynapticCurrent:
return Neuron_PSM_Std_CurrentRecord(neuron_syn_post=instance, **kwargs)
if what == NEURONSynapse.Recordables.SynapticConductance:
return Neuron_PSM_Std_ConductanceRecord(neuron_syn_post=instance, **kwargs)
if what == StandardTags.NMDAVoltageDependancy:
return Neuron_PSM_Std_NMDAVoltageDependanceRecord(neuron_syn_post=instance, **kwargs)
if what == StandardTags.NMDAVoltageDependancy:
return Neuron_PSM_Std_NMDAVoltageDependanceRecord(neuron_syn_post=instance, **kwargs)
if what == StandardTags.NMDAConductanceWithVDep:
return Neuron_PSM_Std_NMDAConductanceWithVoltageDependanceRecord(neuron_syn_post=instance, **kwargs)
assert False
NEURONEnvironment.synapse_psm_template_type.register_plugin(PostSynapticMech_Exp2SynNMDA_Base, NEURONPostSynapticMechTemplate_Exp2SynNMDA)
|
The company is offering private investors interest on sums loaned from between 6.0% – 8.5%. The duration of the loans range from one to three years. It is a policy of the company to acquaint investors in detail on each project with site visits before and during construction so they can be confident about where their money is.
It is hoped that a long term relationship will develop and that investors will enjoy being part of local projects within Deal and the surrounding area.
|
import urllib
import sched
import time
from threading import Thread
from token import Token
from ..utils.http import do_basic_secure_post
from ..exceptions.exceptions import BasicAuthenticationFailedException
class DefaultSequencingOAuth2Client(object):
# Attribute for value of redirect url
ATTR_REDIRECT_URL = "redirect_uri"
# Attribute for value of response type
ATTR_RESPONSE_TYPE = "response_type"
# Attribute for value state
ATTR_STATE = "state"
# Attribute for value client id
ATTR_CLIENT_ID = "client_id"
# Attribute for value scope
ATTR_SCOPE = "scope"
# Attribute for value code
ATTR_CODE = "code"
# Attribute for value refresh token
ATTR_REFRESH_TOKEN = "refresh_token"
# Attribute for access token
ATTR_ACCESS_TOKEN = "access_token"
# Attribute for value grant type
ATTR_GRANT_TYPE = "grant_type"
# Attribute for value expires in
ATTR_EXPIRES_IN = "expires_in"
def __init__(self, auth_parameters):
self.auth_parameters = auth_parameters
self.token = None
self._token_refresher = None
def http_redirect_parameters(self):
attributes = {
self.ATTR_REDIRECT_URL: self.auth_parameters.redirect_uri,
self.ATTR_RESPONSE_TYPE: self.auth_parameters.response_type,
self.ATTR_STATE: self.auth_parameters.state,
self.ATTR_CLIENT_ID: self.auth_parameters.client_id,
self.ATTR_SCOPE: self.auth_parameters.scope
}
return attributes
def login_redirect_url(self):
params = urllib.urlencode(self.http_redirect_parameters())
return '%s?%s' % (self.auth_parameters.oauth_authorization_uri, params)
def authorize(self, response_code, response_state):
if response_state != self.auth_parameters.state:
raise ValueError("Invalid state parameter")
uri = self.auth_parameters.oauth_token_uri
params = {
self.ATTR_GRANT_TYPE: self.auth_parameters.grant_type,
self.ATTR_CODE: response_code,
self.ATTR_REDIRECT_URL: self.auth_parameters.redirect_uri
}
result = do_basic_secure_post(uri, self.auth_parameters, params)
if result is None:
raise BasicAuthenticationFailedException("Failure authentication.")
access_token = result[self.ATTR_ACCESS_TOKEN]
refresh_token = result[self.ATTR_REFRESH_TOKEN]
timelife = int(result[self.ATTR_EXPIRES_IN])
self.token = Token(access_token, refresh_token, timelife)
self._token_refresher = self.__TokenRefresher(self, timelife - 60)
self._token_refresher.start()
return self.token
def is_authorized(self):
return (self.token is not None) and (self.token.lifetime != 0)
def _refresh_token(self):
uri = self.auth_parameters.oauth_token_refresh_uri
params = {
self.ATTR_GRANT_TYPE: self.auth_parameters.grant_type_refresh_token,
self.ATTR_REFRESH_TOKEN: self.token.refresh_token
}
result = do_basic_secure_post(uri, self.auth_parameters, params)
if result is None:
raise BasicAuthenticationFailedException("Authentication against backend failed. " +
"Server replied with: " + result)
access_token = result[self.ATTR_ACCESS_TOKEN]
refresh_token = self.token.refresh_token
timelife = result[self.ATTR_EXPIRES_IN]
self.token = Token(access_token, refresh_token, timelife)
class __TokenRefresher(Thread):
def __init__(self, outer, frequency):
Thread.__init__(self)
self.outer = outer
self.frequency = frequency
self.scheduler = sched.scheduler(time.time, time.sleep)
def run(self):
self.scheduler.enter(self.frequency, 1, self.__run_refresh_token, ())
self.scheduler.run()
def __run_refresh_token(self):
self.outer._refresh_token()
self.scheduler.enter(self.frequency, 1, self.__run_refresh_token, ())
|
Tags: bar, metal frame, rustic, slatina, Wood top.
Unique live edge bar crafted by Horizon Home’s local artisans.
If you enjoy entertaining guests, you might want to add a wet bar to your home. It should be installed where you’ll host most of the time. Sometimes, that’s next to the kitchen. Other times, your wet bar is located in a rec room, great room, library, office or even in your outdoor space. Whichever location works best for you, Horizon Homes ensures that you will find a one of a kind bar to meet your most frequent entertaining needs.
|
#
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""Tests for unitdata models."""
from django.urls import reverse
from weblate.checks.models import Check
from weblate.checks.tasks import batch_update_checks
from weblate.trans.models import Unit
from weblate.trans.tasks import auto_translate
from weblate.trans.tests.test_views import FixtureTestCase, ViewTestCase
class CheckModelTestCase(FixtureTestCase):
def create_check(self, name):
return Check.objects.create(unit=self.get_unit(), check=name)
def test_check(self):
check = self.create_check("same")
self.assertEqual(
str(check.get_description()), "Source and translation are identical"
)
self.assertTrue(check.get_doc_url().endswith("user/checks.html#check-same"))
self.assertEqual(str(check), "Unchanged translation")
def test_check_nonexisting(self):
check = self.create_check("-invalid-")
self.assertEqual(check.get_description(), "-invalid-")
self.assertEqual(check.get_doc_url(), "")
def test_check_render(self):
unit = self.get_unit()
unit.source_unit.extra_flags = "max-size:1:1"
unit.source_unit.save()
check = self.create_check("max-size")
url = reverse(
"render-check", kwargs={"check_id": check.check, "unit_id": unit.id}
)
self.assertEqual(
str(check.get_description()),
'<a href="{0}?pos=0" class="thumbnail">'
'<img class="img-responsive" src="{0}?pos=0" /></a>'.format(url),
)
self.assert_png(self.client.get(url))
class BatchUpdateTest(ViewTestCase):
"""Test for complex manipulating translation."""
def setUp(self):
super().setUp()
self.translation = self.get_translation()
def do_base(self):
# Single unit should have no consistency check
self.edit_unit("Hello, world!\n", "Nazdar svete!\n")
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, set())
# Add linked project
other = self.create_link_existing()
# Now the inconsistent check should be there
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, {"inconsistent"})
return other
def test_autotranslate(self):
other = self.do_base()
auto_translate(
None,
other.translation_set.get(language_code="cs").pk,
"translate",
"todo",
"others",
self.component.pk,
[],
99,
)
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, set())
def test_noop(self):
other = self.do_base()
# The batch update should not remove it
batch_update_checks(self.component.id, ["inconsistent"])
batch_update_checks(other.id, ["inconsistent"])
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, {"inconsistent"})
def test_toggle(self):
other = self.do_base()
one_unit = self.get_unit()
other_unit = Unit.objects.get(
translation__language_code=one_unit.translation.language_code,
translation__component=other,
id_hash=one_unit.id_hash,
)
translated = one_unit.target
combinations = (
(translated, "", {"inconsistent"}),
("", translated, {"inconsistent"}),
("", "", set()),
(translated, translated, set()),
("", translated, {"inconsistent"}),
)
for update_one, update_other, expected in combinations:
Unit.objects.filter(pk=one_unit.pk).update(target=update_one)
Unit.objects.filter(pk=other_unit.pk).update(target=update_other)
batch_update_checks(self.component.id, ["inconsistent"])
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, expected)
for update_one, update_other, expected in combinations:
Unit.objects.filter(pk=one_unit.pk).update(target=update_one)
Unit.objects.filter(pk=other_unit.pk).update(target=update_other)
batch_update_checks(other.id, ["inconsistent"])
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, expected)
|
Hairstyles For Thick Curly Hair Simple Short Haircut Styles Short Haircuts For Thick Curly Hair The Great picture is in category that can use for individual and noncommercial purpose because All trademarks referenced here in are the properties of their respective owners. You can browse other picture of Hairstyles For Thick Curly Hair Simple Short Haircut Styles Short Haircuts For Thick Curly Hair The Great in our galleries below. If you want to see other picture, you can browse our other category.
Hairstyles For Thick Curly Hair Simple Short Haircut Styles Short Haircuts For Thick Curly Hair The Great was posted in July 27 2018 at 3:31 am and has been seen by 19 users. If you want to view image in full size just click image on gallery or click "View Image" at the bottom of the image.
|
import os
from django.conf import settings
import yaafelib as yf
import wave
import contextlib
from celery import task
from sepal.datasets.models import *
from sepal.datasets.utils import filter_by_key, find_dict_by_item
@task()
def handle_uploaded_file(f):
'''Saves an uploaded data source to MEDIA_ROOT/data_sources
'''
with open(os.path.join(settings.MEDIA_ROOT, 'data_sources', f.name), 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
return destination
@task()
def extract_features(dataset_id, instance_id, audiofile_path):
dataset = Dataset.objects.get(pk=dataset_id)
inst = Instance.objects.get(pk=instance_id)
n_frames, sample_rate, duration = 0, 0, 0
# Calculate the sample rate and duration
with contextlib.closing(wave.open(audiofile_path, 'r')) as audiofile:
n_frames = audiofile.getnframes()
sample_rate = audiofile.getframerate()
duration = n_frames / float(sample_rate)
# Format - {'Display name': 'name: Definition'}
FEATURES = [
{'display_name': 'Spectral Shape Characteristics',
'yaafe_name': 'sss',
'yaafe_definition': 'SpectralShapeStatistics',
'subfeatures': ['Spectral centroid', 'Spectral spread', 'Spectral kurtosis', 'Spectral skewness']
},
{'display_name': 'Temporal Shape Characteristics',
'yaafe_name': 'tss',
'yaafe_definition': 'TemporalShapeStatistics',
'subfeatures': ['Temporal centroid', 'Temporal spread', 'Temporal kurtosis', 'Temporal skewness']
},
{'display_name': 'ZCR',
'yaafe_name': 'zcr',
'yaafe_definition': 'ZCR',
'unit': 'Hz'
},
{'display_name': 'Energy',
'yaafe_name': 'energy',
'yaafe_definition': 'Energy',
},
{'display_name': 'Loudness',
'yaafe_name': 'loudness',
'yaafe_definition': 'Loudness',
},
{'display_name': 'Spectral rolloff',
'yaafe_name': 'spectral_rolloff',
'yaafe_definition': 'SpectralRolloff',
},
{'display_name': 'Perceptual sharpness',
'yaafe_name': 'perceptual_sharpness',
'yaafe_definition': 'PerceptualSharpness',
},
{'display_name': 'Perceptual spread',
'yaafe_name': 'perceptual_spread',
'yaafe_definition': 'PerceptualSpread',
},
{'display_name': 'Duration',
'unit': 's',
},
{'display_name': 'Sample rate',
'unit': 'Hz',
},
{'display_name': 'Spectral decrease',
'yaafe_name': 'spectral_decrease',
'yaafe_definition': 'SpectralDecrease',
},
{'display_name': "Spectral flatness",
'yaafe_name': 'spectral_flatness',
'yaafe_definition': 'SpectralFlatness',
},
# {'display_name': "Spectral flux",
# 'yaafe_name': 'spectral_flux',
# 'yaafe_definition': 'SpectralFlux',
# },
{'display_name': "Spectral slope",
'yaafe_name': 'spectral_slope',
'yaafe_definition': 'SpectralSlope',
},
# {'display_name': "Spectral variation",
# 'yaafe_name': 'spectral_variation',
# 'yaafe_definition': 'SpectralVariation',
# }
]
# Add features to extract
feature_plan = yf.FeaturePlan(sample_rate=sample_rate, resample=False)
for feature in FEATURES:
if 'yaafe_definition' in feature:
# YAAFE feature plans take definitions of the form: 'zcr: ZCR'
full_definition = feature['yaafe_name'] + ': ' + feature['yaafe_definition']
# Add the feature to the feature plan to be extracted
feature_plan.addFeature(full_definition)
# Configure an Engine
engine = yf.Engine()
engine.load(feature_plan.getDataFlow())
# Extract features
afp = yf.AudioFileProcessor()
afp.processFile(engine, audiofile_path)
# outputs dict format - {'Spectral centroid': [[2.33], [4.34],...[2.55]]}
outputs = {}
# Read and store output arrays to outputs dict
for feature in FEATURES:
if 'yaafe_definition' in feature: # Exclude duration and sample rate
output_name = feature['yaafe_name']
# If the feature has subfeatures, e.g. Spec shape stats
if 'subfeatures' in feature:
full_output = engine.readOutput(output_name)
for i, subfeature_display_name in enumerate(feature['subfeatures']):
outputs[subfeature_display_name] = full_output[:, i]
# If the feature has only 1 dimension(1 X T array)
else:
display_name = feature['display_name']
a = engine.readOutput(output_name) # 2D array
# Transpose data to make it a 1D array
outputs[display_name] = a.transpose()[0]
# Create YAAFE feature objects
feature_obj_list = []
for display_name in outputs.keys():
feature = find_dict_by_item(('display_name', display_name), FEATURES)
f, created = Feature.objects.get_or_create(
name=display_name.lower(),
display_name=display_name
)
if feature and ('unit' in feature):
f.unit = feature['unit']
f.save()
feature_obj_list.append(f)
# Create Sample rate and Duration objects
rate_obj, created = Feature.objects.get_or_create(name='sample rate')
if not rate_obj.unit:
rate_obj.unit = 'Hz'
rate_obj.save()
feature_obj_list.append(rate_obj)
duration_obj, created = Feature.objects.get_or_create(name='duration')
if not duration_obj.unit:
duration_obj.unit = 's'
duration_obj.save()
feature_obj_list.append(duration_obj)
# Associate features with instance
# for feature in feature_obj_list:
# inst.features.add(feature)
# If dataset has labels
if dataset.labels():
# NOTE: This assumes there's only one label name per dataset.
# Just indexes the first label name
label_name = dataset.labels()[0]
else:
# attach a placeholder LabelName called 'variable'
filtered = LabelName.objects.filter(name='variable')
# make sure that 'get' doesn't return an error if there are more than 1
# LabelName called 'variable'
if len(filtered) <= 1:
label_name, c = LabelName.objects.get_or_create(name='variable')
else:
label_name = filtered[0]
# Add a placeholder label value called "none" to instance
# This is necessary in order for plotting to work
filtered = LabelValue.objects.filter(value="none", label_name=label_name)
if len(filtered) <= 1:
no_label, c = LabelValue.objects.get_or_create(value="none",
label_name=label_name)
else:
no_label = filtered[0]
inst.label_values.add(no_label)
inst.save()
# Save output data and associate it with inst
for display_name, output in outputs.iteritems():
if output.size > 0: # Avoid empty data
for i in range(output[0].size):
output_mean = output[i].mean()
FeatureValue.objects.create(value=output_mean,
feature=Feature.objects.get(name__iexact=display_name.lower()),
instance=inst)
# Save sample_rate and duration data
FeatureValue.objects.create(value=sample_rate,
feature=Feature.objects.get(name='sample rate'),
instance=inst)
FeatureValue.objects.create(value=duration,
feature=Feature.objects.get(name='duration'),
instance=inst)
|
Rowan Moore: Housing is now higher up the British political agenda than it has been for a very long time. The government believes that an awful lot of houses have to be built very quickly, and the question this raises is: does this mean we will go back to the instruments that were used in the past, such as new towns, planning from the centre, major public investment in creating settlements at a large scale? Are we going back to the 1950s and 1960s?
Yvette Cooper: We need a very significant increase in housebuilding. Over the last 30 years of the 20th century, we saw a 30 per cent increase in the number of households and yet a 50 per cent drop in the level of housebuilding, and that’s clearly unsustainable. We are building more new homes than at any time since 1990—last year 185,000 were added to existing stock—but it is clearly not enough to keep up with rising demand, which is why we have set a target of 240,000 new homes a year by 2016, or 3m additional homes by 2020. But we need to make sure that we build those homes in a sustainable way, not just in terms of carbon emissions but also in terms of local communities. Part of that is recognising that every community has to build more homes, right across the country, and it should be for local councils and communities to decide where these homes should go within their area.
RM: So without exception all communities everywhere have to build more homes?
YC: One of the things that has changed compared to even five or six years ago is the fact that we are seeing serious pressures on affordability, as well as household growth, in the north as well as the south. As recently as the end of the 1990s, a lot of the northern regions and cities were experiencing population decline.
RM: So does that mean demolition of housing stock in the north is not going to happen?
YC: Some areas still suffer from low demand, like Hull and east Lancashire, but other areas have seen the problem change. Look at the Yorkshire region as a whole—the gap between the number of new households and the level of new housebuilding is bigger than it is in the southeast. Little wonder you have rising pressure on house prices.
The new planning rules that came in last April allow every community to plan for more housing according to local needs —what kinds of homes and where they should go. For example, the brownfield site target will be very different in different areas. So we’re giving councils more flexibility about how to build while recognising that everyone has to take responsibility about the fact that we do need more.
RM: So this is about changing planning policy, and effectively applying more pressure from the centre, albeit in a friendly way?
YC: Well, it’s saying that every area has to take responsibility. This is not something the government can do; it has to be done by partnerships between local councils, the private sector and housing associations. Government has to provide the framework and support, but we can’t take individual decisions for councils.
RM: But councils will say: we need roads, we need schools, we need infrastructure—where’s the funding coming from for that?
YC: There is £1.7bn of infrastructure funding for the next three years coming simply from my department alone. In addition, there’s major transport infrastructure investment—things like Crossrail—and also investment in schools, healthcare facilities and so on. But we also want councils to be able to raise revenue themselves from developers and from their “planning gain”—the increase in the value of land where planning permission is granted, particularly greenfield land. And we will be introducing a new local infrastructure levy as part of the planning charge—allowing councils to raise more revenue to support infrastructure. Finally, where we have new developments like eco-towns, we will contribute additional investment as well.
RM: The private sector tends to say that if you ask them for too much, they will not build. If the whole purpose of this policy is to increase supply so that prices fall over a period of time, aren’t you asking turkeys to vote for Christmas? Builders don’t have a great interest in increasing supply to the point at which the value of their product declines. How do you get around that?
YC: I think there is clear evidence that many private developers will respond strongly to short-term market pressures rather than the longer term rise in demand, but we’re waiting for John Callcutt’s report on housebuilding delivery in late November before coming to final conclusions. Let me mention one other area. At the moment, if a local authority owns a plot of land it thinks is appropriate for housing development, it will often sell it off to a developer and then try to influence through the planning system things like what the level of affordable housing should be. That, however, makes it very difficult for the council to influence the build-out rate—whereas if the council put the land instead into a local housing company and specified as part of the contract what the build-out rate should be, it would have more say over the pace of development. Government has to push harder, but can’t do it all.
RM: Historically, the decline in housebuilding over the last 30-40 years is almost entirely due to the decline in council housebuilding.
YC: That’s partly because demand has changed—most people want to be homeowners now. What is significant is not what has happened in the public sector, but what has failed to happen in the private sector. The private sector has not responded to rising demand for private housing—we effectively have a market failure. Part of that failure is the result of the planning system, and part of it is due to the short-term cyclical response of the housebuilding industry. The argument made by Kate Barker in her review of housing supply was that that can become self-perpetuating, because constraining supply over a long period of time can make the housing market more volatile.
RM: So your solutions are to free up the supply through planning process and, where the public sector has a role to play, to push harder. Is there anything else?
YC: I think you need more land coming through the planning system, and councils can do more here. And you need more active public sector engagement—whether through local housing companies or using public sector land, like old MoD sites. The other thing we’re doing is creating more incentives for local authorities and communities who are doing their bit—so as well as infrastructure support, we are providing revenue support for those areas through the housing planning delivery grant.
RM: In general, you’re taking about 3m houses by 2020, a colossal number. This implies a level of positive planning we haven’t seen for a long time, as most recent activity has been market-led within the constraints of an essentially negative planning system. So are we going to see some kind of more positive idea coming from the public sector about how to make communities and towns?
YC: We are talking about more positive planning, but that sort of place-shaping role is for local councils, not central government. It is central government’s role to look at overall national housing need, but also to make sure the system doesn’t encourage councils to free-ride on each other—to say: we’re not building more houses because they should go somewhere else. So yes, that does mean more positive planning than many councils have done in the past, but it needs to be planning in a way that involves local communities. Rather than a planner’s vision it needs to be the local community’s vision.
Take eco-towns; the first time for 40 years that government will be supporting new towns. We’ve had 50 expressions of interest. Some of those will not be runners, but it demonstrates a strong interest. We have said new towns are justifiable only if they can be done at much higher environmental standards than in the past. So the zero-carbon vision applies not just to the housing but the shops, the offices, the pubs and clubs, the schools.
We are aiming for a two-phase design competition for the eco-towns. The first phase is the ideas stage, and the second will be for individual towns. We’re running the competition with the Prince’s Foundation, Riba and Cabe, but we also want to involve citizens’ juries in the first stage, so you get people involved who are not in design or architecture or urban planning, but who are simply thinking: is this a place I would want to live in? The bids are now in, and we would expect to say more about the locations in the new year.
Matthew Lockwood: You’re planning to build 3m houses by 2020, and eco-towns will provide less than 10 per cent of these. So how do you see the relationship between zero-carbon homes and the mass of houses out there, especially when some housebuilders see eco-towns as niche and too expensive for the mass market? What can be done to link the ambitions of eco-towns to wider housing policy?
YC: We’ve set a timetable for all new housing to be zero carbon by 2016, and that will be underpinned in building regulations. It’s a more ambitious goal than any other country has aimed for. By 2010, all homes will need to meet level 3, by 2013 code level 4 and by 2016 code level 6, which is zero carbon: this means no net carbon emissions over the course of a year—you might draw down from the grid one month, but then you’d need to contribute back into the grid another month. We have worked on this with the Local Government Association, the World Wildlife Fund and the Housebuilders’ Federation. About 170 organisations have now signed up to our code for sustainable homes, ranging from major housebuilders as well as the HBF to environmental organisations, local councils, the LGA, employers and unions. It’s about trying to build a big consensus behind the proposition that we have to both deliver more housing but cut emissions from housing too. The ten-year timetable gives the market an incentive to invest and to plan now. People need to plan for a complete revolution in the way they design and build homes.
ML: What’s happening on enforcement and inspectors?
YC: There was a problem with the enforcement of, I think, the 2002 building regulations. When the 2006 regulations were introduced, a lot of time and investment was put into enforcement, but we still need to go further. The department is undertaking a review of enforcement, and Ian Wright has been looking at a wider review of building regulations and overall enforcement.
ML: What’s going on with the Merton rule [the London borough of Merton introduced a rule in 2004 requiring 10 per cent of energy use in new developments to come from renewables]?
YC: Councils should be able to set out policies on renewables, but it should be part of an overall plan and tested through the planning process, as with Merton. Some councils have added such policies later as part of supplementary guidance, but we think it needs to be part of the overall development framework.
ML: So you’re setting out rules for the application of the Merton framework.
YC: Yes, that’s why we are setting out a whole planning policy statement on climate change. People should see prevention of carbon emissions not as an add-on but as a fundamental part of the planning process. The Merton rule may be the right approach in terms of getting a base position, but in particular sites councils should go further. An example is the Barking development where they’re using a combined heat and power (CHP) plant station with a power station. So you make sure your policy is compatible with building more homes, then you make sure you are not simply looking across the board but looking at individual site opportunities; and third you should start to be flexible around on and off-site development. If you’ve got a particular development of, say, 50 houses that you could connect up to some renewable energy or a local CHP plant 100 yards down the road, you should be able to do that sort of thing because it might give you a lower carbon opportunity than saying simply it’s all got to be on that particular site. You should have the flexibility to be able to look at different kinds of technologies.
RM: You’re asking for a lot of quality and a lot of quantity at the same time. What do you do if the private sector doesn’t deliver in the way you hope?
YC: One thing is to recognise that we need to do more with public sector assets. We are aiming to use these assets strategically, when we can, to support additional development. Second, as I said, we will look at the John Callcutt review and the structure of the private market. If there is a market failure here, if the private sector is not responding properly to rising demand, what does that tell us about the industry and planning sector?
RM: This has been government policy for quite some time now, under John Prescott and others—what are the models for the future from the activity that has already happened?
YC: One important thing has been the redevelopment of town centres; we’ve seen a big increase in the use of brownfield land. Attitudes are changing among local councils, which we can see through the number of councils that have come forward to be growth areas in the past few months. There were the original 2003 growth areas—the Thames gateway, Milton Keynes, Ashford and the M11 corridor—then there was the first wave of smaller growth areas. Now we’ve announced that 78 local authorities have come forward as part of a second phase of growth points; in total that means that more than half of all local authorities have come forward to work with us on sustaining significantly higher levels of housing growth. That’s a big change from two years ago.
RM: And has that happened because of the government’s initiatives?
YC: I think it’s partly the growing recognition of the problems of affordability—throughout the country we’ve got people on council waiting lists, we’ve got first-time buyers who can’t get on the ladder, we’ve got people’s sons and daughters who can’t afford a home in the place they grew up. Alongside this, the work on cutting carbon emissions has been important; it has changed attitudes about the sustainability of development. It’s the prospect of being able to do new development which cuts carbon emissions and potentially develops new technologies that you can then use to retrofit existing areas—which is the real prize on the environmental side. The new homes we build will end up being about a third of the stock by 2050, so it’s significant, but the big area that we need to do more with is existing stock.
ML: The hope will be that prices of on-site renewables and renewable electricity will come down, but reducing emissions from heating and retrofitting old stock is a different kind of problem. If you’re building a new house you can insulate it to very high standards, but if you’ve got a Victorian dwelling, you can’t put in cavity wall insulation. Maybe renewable heat technologies can help, but I would press you on where the gains are going to be.
YC: We need to develop radical environmental technologies that solve problems like that. The way I describe it is that you need “magic wallpaper.” The officials think I’m stupid when I say that, but it is what you need—well-insulating, inexpensive stuff that you can put on to solid walls easily and that doesn’t add an extra couple of inches to the walls. This is the kind of technology that you need. On one of our visits to Sweden, we met Scansca, who are looking at ways to use their technology to retrofit, and they were far further ahead than a lot of our housebuilders in terms of energy efficiency. You can set an incentive framework for new housing that you hope will give the economies of scale that will then drive the technological spinoffs, and you then need to look at different mechanisms to apply these to existing homes.
RM: There’s huge inflation in construction prices at the moment; the industry is already stretched building existing homes. How will it have the capacity to build the extra 3m homes on top of the things they’re already building?
YC: One finding of John Callcutt’s preliminary assessment was that the industry does have the capacity to expand substantially and could support a big increase in housebuilding, but obviously that’s something we need to monitor carefully. One way of improving competition in the market is to have companies come in from other countries; having housing associations do more is another. We have also been working with John Denham’s department of skills on expanding construction apprenticeships and the skills base for longer-term expansion.
RM: If the property market cools down, as it seems to be doing, does that reverse the urgency of what you’re doing?
YC: Well given how long it takes to build homes and to get from deciding on a site to actual construction, I think the long-term demand is clearly growing. There’s a lot of pent-up demand—a lot of first-time buyers who would like to get on to the property ladder. The national housing and planning advice unit’s assessment was that the long-term factors in the market will remain very strong regardless of what happens over the next 12 months, so I think the need for building more homes for future generations is extremely clear.
RM: As well as housebuilders, the other people you’ve got to get to perform are local authorities. It’s clear that a lot of this new development is going to have to be on greenfield and green belt sites as well as on brownfield—something that can be unpopular with local authorities.
YC: The proportion of new homes being built on brownfield land is 74 per cent, up from 56 per cent in 1997. At the same time we have seen an increase in housebuilding.
RM: But even at that figure it means that 750,000 of the 3m houses are going to be built on greenfield.
YC: What it means is that we’ve had a reduction in the proportion of homes being built on greenfield land. And bear in mind the distinction between greenfield and green belt: we’ve said that the green belt policy—which prevents urban sprawl—needs to continue. In the end, however, it has to be for local councils to decide what’s right in their area. Remember, too, that more brownfield land comes on stream all the time because land use changes. In my constituency we have a major programme for new housing and jobs on a pit site that only closed about four years ago. We’ve also got two proposals for development on two former chemical factory sites that only closed in the last two or three years. In the public sector, you’ve got disused British railway board land—often in sustainable locations because it is close to stations and good to build on—that has just been sitting there. But you also have to bear in mind that councils decide both where homes need to be located but also what kind of homes they need. Family homes with gardens will have different kinds of land and locational requirements to city-centre flats.
RM: What happens if a local authority decides what is really needed is another 30 eight-bedroom executive homes with triple garages?
YC: Well, they’re going to have to deliver mixed communities.
RM: But what if they don’t want to play ball? There will be local authorities who say we don’t want this, and we can guess what kind of local authorities they will be.
YC: There are very substantial differences between the political parties on this. The greatest opposition has come from Conservative-controlled authorities—the Conservative-controlled southeast regional assembly has argued for cuts in the level of housebuilding, not even lower increases, which I think is completely bonkers given the level of demand. Some of this comes down to the need to make the political argument in every community across the country that if councils just back small amounts of executive housing, that is deeply unjust, and lets down local first-time buyers and council tenants. It is in the council’s interest, though, to work with the planning framework, because otherwise they can end up losing out on additional investment in terms of the housing planning delivery grant, and having proposals they turn down overturned on appeal. That is not a sensible way to build communities. Also, if the plan they set out does not meet local needs, they won’t be able to get it adopted in the first place if it doesn’t get through the independent assessment, and that can cause them all kinds of problems in their ability to get proper sustainable growth and jobs in their area.
ML: When you say things like local authorities shouldn’t use the Merton rule to prevent housebuilding, I suppose the fear is that in the rush to meet the housebuilding targets, environmental goals will be watered down.
YC: Well, be clear, we’ve set the 2016 timetable that all homes have got to be zero carbon—a really ambitious national framework—and we will not reach that unless we get a substantial increase in renewables, and that means local renewables—we’re not talking about offsetting with a wind turbine in Cornwall. We’re talking about the policies that local councils adopt, so it’s ultimately for them to decide, but our approach is that there should be sufficient flexibility to be able, for example, to connect to a local CHP, if you’ve got one in the area, rather than ending up effectively choosing technologies because they have to be on rather than off site. This is about innovation at the local level, on the way in which you deliver local renewables that will have the biggest impact on cutting carbon emissions but also deliver affordability. The local issue is about innovation and flexibility, while the wider question about overall government credibility in backing renewables is demonstrated by the 2016 commitment—we just won’t get there unless we get a very substantial increase in renewables.
ML: This raises questions about the fact that power like CHP tends to be demand-led rather than just lying around unexploited.
YC: Our approach as central government should not be to pick technologies—the frameworks ought to encourage the cutting of carbon emissions and the use of renewable energy as a way to cut emissions, but councils must make sure that they have sufficient flexibility on technology. You need to be technology-neutral on both the renewable side and the energy-efficiency side if you’re going to get sufficient innovation to get the cost reductions in the delivery.
RM: Is the ultimate aim of this to bring house prices down?
YC: The problem we’ve got is that house prices have gone up significantly faster than earnings, not that house prices have gone up. What we’re trying to do effectively is prevent that huge gap between price increases and earnings increases. What we’re not trying to do is to have a long-term project that’s about making house prices go down.
|
#!/usr/bin/python
"""
Make single page versions of the documentation for release and
conversion into man pages etc.
"""
import os
import re
from datetime import datetime
docpath = "docs/content"
outfile = "MANUAL.md"
# Order to add docs segments to make outfile
docs = [
"about.md",
"install.md",
"docs.md",
"overview.md",
"drive.md",
"s3.md",
"swift.md",
"dropbox.md",
"googlecloudstorage.md",
"amazonclouddrive.md",
"local.md",
"changelog.md",
"bugs.md",
"faq.md",
"licence.md",
"authors.md",
"contact.md",
]
# Docs which aren't made into outfile
ignore_docs = [
"downloads.md",
"privacy.md",
"donate.md",
]
def read_doc(doc):
"""Read file as a string"""
path = os.path.join(docpath, doc)
with open(path) as fd:
contents = fd.read()
parts = contents.split("---\n", 2)
if len(parts) != 3:
raise ValueError("Couldn't find --- markers: found %d parts" % len(parts))
contents = parts[2].strip()+"\n\n"
# Remove icons
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
# Make [...](/links/) absolute
contents = re.sub(r'\((\/.*?\/)\)', r"(http://rclone.org\1)", contents)
return contents
def check_docs(docpath):
"""Check all the docs are in docpath"""
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
files -= set(ignore_docs)
docs_set = set(docs)
if files == docs_set:
return
print "Files on disk but not in docs variable: %s" % ", ".join(files - docs_set)
print "Files in docs variable but not on disk: %s" % ", ".join(docs_set - files)
raise ValueError("Missing files")
def main():
check_docs(docpath)
with open(outfile, "w") as out:
out.write("""\
%% rclone(1) User Manual
%% Nick Craig-Wood
%% %s
""" % datetime.now().strftime("%b %d, %Y"))
for doc in docs:
out.write(read_doc(doc))
print "Written '%s'" % outfile
if __name__ == "__main__":
main()
|
Printed key rings will take your brand places, as your target audience drives about with your logo hanging on the chain.
We have a massive range of promotional key rings. Most are warehoused locally and can be printed on demand with your company logo. Each key ring has different branding options including pad printing, screen printing and even engraving for that more sophisticated look. Some of these options can also receive a resin coating which makes for a smart finish.
In addition to these key chains, we also have relationships with manufacturers that can create a bespoke key ring, according to your custom specifications. So, speak to our team about your printing requirements and we would be happy to discuss the options with you.
|
'''
Utility classes and functions.
Copyright 2009-2016 GoodCrypto
Last modified: 2016-05-18
This file is open source, licensed under GPLv3 <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
import os
from datetime import datetime
from traceback import format_exc
from django.conf import settings
from django.utils.encoding import force_text, smart_text, DjangoUnicodeDecodeError
from syr.log import get_log
log = get_log()
def to_unicode(s):
''' Converts string to unicode. If can't convert, returns u''.
See
django.utils.encoding.smart_str() and smart_unicode() for an better implementation.
http://www.saltycrane.com/blog/2008/11/python-unicodeencodeerror-ascii-codec-cant-encode-character/
http://wiki.python.org/moin/UnicodeEncodeError'''
try:
unicode_s = force_text(s)
str(unicode_s)
except Exception as e:
try:
unicode_s = force_text(s, encoding=syr.prefered_encoding)
except:
try:
# \u0000 through \u00FF, inclusive
unicode_s = force_text(s, encoding='iso-8859-1')
except Exception as e:
log('Unable to convert %r to unicode: %r' % (s, e))
unicode_s = force_text('')
return unicode_s
def is_secure_connection(request):
''' Check if connection is secure. '''
secure = False
try:
if 'HTTP_X_SCHEME' in request.META:
secure = 'https' == request.META['HTTP_X_SCHEME']
elif 'wsgi.url_scheme' in request.META:
secure = 'https' == request.META['wsgi.url_scheme']
except:
log(format_exc())
return secure
def django_error_page_response(request, error=None):
''' Return a response with Django's error page.
If settings.DEBUG is True, Django automatically shows a useful
error page for exceptions in views. But sometimes an exception
isn't propogated out of the view, such as when the exception
occurs in a separate thread. This shows the Django error page
for any exception.
If error is not present or is None, returns an error page for the
last exception.
Example:
error = None
...
# in separate thread
error = sys.exc_info()
...
# in parent thread
show_django_error_page(error)
'''
from django.views.debug import technical_500_response
# error should be sys.exc_info() from an earlier except block
if not error:
error = sys.exc_info()
exc_type, exc_value, tb = error
response = technical_500_response(request, exc_type, exc_value, tb)
def is_django_error_page(html):
''' Returns True if this html contains a Django error page,
else returns False.'''
django_error_1 = "You're seeing this error because you have"
django_error_2 = 'display a standard 500 page'
try:
smart_html = smart_text(html)
except DjangoUnicodeDecodeError:
# definitely not a django html error page
result = False
else:
result = (django_error_1 in smart_html) and (django_error_2 in smart_html)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
|
Hello, I am selling unconditional love for $0.
Infinite square feet lot. Includes refreshing air, sunshine, the Earth, beautiful mountains and the ocean. A stunning ‘classic’ heritage made 4.5 billion years ago.
Address: Earth, Solar System, Milky Way.
Available now to serious buyers. Perfect for the first-time buyer and experienced investors feeling empty and trapped with stress and pressure to maximize Return on Investment because of family and stakeholders expectation.
I received it a long time ago when I was born. I received the gift of life from my parents, the sun, the water, the Earth, the air, a smile, a hug, a helping hand, without being asked anything in return. I didn’t earn any of them. They belong to public domain, the commons, “Mother Earth” or God, if you’re religious.
At some point, my ancestors were afraid and decided to own land that once belonged to everyone, sell services that were once freely given, and even own humans through debt. Since it was claimed or stolen by those who made the laws themselves with a series of ‘legitimate’ transfers, then any law that enforces private ownership is perpetuating a crime. Not all laws are morally just. Only recently, slavery was legal and women weren’t allowed to vote. Why pay for something that was stolen from you? They made a mistake. I’m sorry. Let’s start fresh with this new sale.
Although I am only one human and my time and resources are limited, my soul and the intention of love is eternal, and so is yours… and I would like us to share that experience.
I don’t accept cheques or mortgage payments, only cash payments of $0. You are also welcome to re-sale it for $0. But I must warn you… it will multiply once you do.
PPS: If you are disappointed and missed out on the $51 million Point Grey mansion recently sold in Vancouver, then you are in luck. What I am offering is priceless.
Unconditional love is in high demand in today’s Vanvouver housing market. I will give you a 100% commission of $0.00 on the sale. Please inquire for details!
12 March: Two prospect buyers contacted me.
13: A real estate agent is interested in representing this sale. One new prospect.
15: An organization called Sustainable Human shared this to their 1 million fans… many new buyers.. Thank you!
17: More social media shares… many emails…people tell me they heard of this on Film for Action (http://www.filmsforaction.org/articles/the-craigslist-property-ad-that-got-more-than-a-million-views-in-a-couple-of-days/) , Gawker, Nature and environmental websites, and more… Some people have re-posted this ad in their city, like this one (http://burlington.craigslist.org/reo/ show contact info html) Very beautiful stories. So much love.
“Greetings. I am interested in investing ALL of my wealth, totalling $0.00, and would ask that you offer any real estate of my surplus $0.00 to those less fortunate.
I’m simply correcting the housing bubble. The Earth’s asking price for its abundant gifts to us has always been $0.00.
No. The Sun rises on both the evil and the good, the Earth’s rain falls on both the righteous and the wrongdoers.. That’s the Sun’s and the Earth’s unconditional love to all of us. We didn’t earn any of it.
Everything. Here’s a beautiful poem that may answer your question: “”Even After All this time The Sun never says to the Earth, “You owe me.” Look What happens With a love like that, It lights the whole sky.” – Hafiz, A persian poet of the 1300.
Since I’m returning the Earth which rightfully belongs to everyone, then it makes all previous legal title claims over the Earth invalid. Love is rebellious and revolutionary.
Please, live and eat freely on this Earth wherever you choose as long as it does not cause harm to others. If your government enforces an eviction based on outdated and immoral private property laws, which ignored the formal laws of the commons and of indigenous people, then you can simply show them this “new” title deed of the Earth’s unconditional love which I have sold to you for $0.00. If they refuse to accept it, you may wish to take them to their own Supreme Court to change this law, and who knows, you may eventually win because what I stated above is based on historical facts.
You’re not alone. For example, there is the Law of the Rights of Mother Earth passed by the President of Bolivia. Indigenous cultures and ancient cultures lived this way. If you’re Christian, perhaps you could use your Canadian rights of religious freedom to live on the Earth based on Acts 2 and 4. “Now the whole group of those who believed were of one heart and soul, and no one claimed private ownership of any possessions, but everything they owned was held in common.” This is not legal advice. Please consult your lawyer. Whatever path you decide is up to you!
The truth of the matter is simple. Love, the Earth, the Sun and the stars are not for sale and they cannot be owned for any other price than $0.00.
Can I repost this ad in my city? Can I translate it in my language? Can I share, copy, re-sale, edit, etc on my blog, news website, etc?
Yes. My public content is un-copyrighted, no rights reserved, and free to use, mix, etc, by all life forms including cats… I claim no ownership to my thoughts or pictures because they’re not ‘mine’ to own. I’m not a possessive guy, unless it’s chocolate cookies… Use your own name if you wish, no need to credit me. There’s an infinite amount of unconditional love, so feel free to look for buyers and re-sell it. The Earth’s unconditional love is yours. Why not post it in New York, London, Tokyo, Sydney, etc? Go for it.
Yes, I accept all foreign currency as long as it’s 0.00.
There is no price tag on love. Love is the most precious gift we can give to the ones we love. However, I think sometimes people do feel hurt when they don't get back any love from the one they love. Love is beautiful but also very tricky. Sometimes I think I know what love is, which is to make the one I love to be happy, but sometimes, I wonder what it is really about. But whatever it is, I choose to follow my heart. Be bright.
Love is to make one's self happy. When you achieve that, those around you will feel it, respond and be lifted to your vibration of Love.
|
'''Post service, dealing with outgoing post'''
import threading
import time
import socks
from murmeli.system import System, Component
from murmeli.signals import Timer
from murmeli.message import StatusNotifyMessage, Message, RelayMessage
from murmeli import dbutils
from murmeli import imageutils
from murmeli import guinotification
class DefaultMessageTransport:
'''Class which the outgoing postman usually uses to send messages.
May be substituted by another object for use in unit tests.'''
@staticmethod
def send_message(msg_bytes, whoto):
'''Try to send the given message over the default mechanism'''
try:
sock = socks.socksocket()
sock.setproxy(socks.PROXY_TYPE_SOCKS4, "localhost", 11109)
sock.connect((whoto + ".onion", 11009))
num_sent = sock.send(msg_bytes)
sock.close()
if num_sent != len(msg_bytes):
print("Num bytes sent:", num_sent, "but message has length:", len(msg_bytes))
else:
return PostService.RC_MESSAGE_SENT
except Exception as exc:
print("Socks send threw something:", exc)
return PostService.RC_MESSAGE_FAILED
class PostService(Component):
'''System component for managing the outgoing post'''
# Return codes
RC_MESSAGE_SENT = 1
RC_MESSAGE_IGNORED = 2
RC_MESSAGE_FAILED = 3
RC_MESSAGE_INVALID = 4
def __init__(self, parent, transport=None):
Component.__init__(self, parent, System.COMPNAME_POSTSERVICE)
self.work_lock = threading.Lock()
self.flush_timer = None
self.need_to_flush = True
self.step_counter = -1
self.running = False
self.flush_interval = 30 # By default, flush every 30 seconds
self.transport = transport or DefaultMessageTransport()
self.should_broadcast = True
def set_timer_interval(self, timer_secs):
'''Set the interval to a non-default value (especially for tests)'''
self.flush_interval = timer_secs
self.step_counter = 0
def checked_start(self):
'''Start the separate threads'''
self.running = True
if self.flush_interval:
self.flush_timer = Timer(self.flush_interval, self._flush)
return True
def stop(self):
'''Stop this component'''
self.running = False
if self.flush_timer:
self.flush_timer.stop()
def request_broadcast(self):
'''Request a broadcast in a separate thread'''
self.step_counter = -1
self.request_flush()
def request_flush(self):
'''Request a flush in a separate thread'''
self.need_to_flush = True
if self.flush_interval == 0:
Timer(1, self._flush, repeated=False)
def _flush(self):
'''Flush the outbox'''
self.step_counter = (self.step_counter + 1) % 10
if not self.step_counter:
self._broadcast()
if not self.need_to_flush:
return
if self.work_lock.acquire(timeout=2):
print("Flush")
self.need_to_flush = False
self.call_component(System.COMPNAME_GUI, "notify_gui",
notify_type=guinotification.NOTIFY_OUTBOX_FLUSHING)
# Look in the outbox for messages
database = self.get_component(System.COMPNAME_DATABASE)
messages_found = 0
messages_sent = 0
failed_recpts = set()
# Loop twice over all messages, firstly dealing with priority messages
for flush_iter in range(2):
print("Flush iter %d" % flush_iter)
for msg in database.get_outbox():
if not msg:
continue # message already deleted
if not self.running:
break # flushing stopped from outside
if flush_iter == 0:
messages_found += 1
recipient = msg.get('recipient')
if not self.call_component(System.COMPNAME_CONTACTS,
"is_online", tor_id=recipient):
continue # not a priority for the first iter
msg_sent, should_delete = self.deal_with_outbox_msg(msg, failed_recpts)
if msg_sent:
messages_sent += 1
self.call_component(System.COMPNAME_GUI, "notify_gui",
notify_type=guinotification.NOTIFY_MSG_SENT)
if should_delete \
and not database.delete_from_outbox(index=msg.get("_id")):
print("Failed to delete from outbox:", msg)
# Wait inbetween sending to avoid overloading the network
time.sleep(3)
print("From %d messages, I managed to send %d" % (messages_found, messages_sent))
# We tried to send a message to these recipients but failed - set them to be offline
for recpt in failed_recpts:
self.call_component(System.COMPNAME_CONTACTS, "gone_offline",
tor_id=recpt)
print("Finished flush, releasing lock")
self.work_lock.release()
def deal_with_outbox_msg(self, msg, failed_recpts):
'''Deal with a message in the outbox, trying to send if possible'''
# send_timestamp = msg.get('timestamp', None) # not used yet
# TODO: if timestamp is too old, either delete the message or move to inbox
# Some messages have a single recipient, others only have a recipientList
recipient = msg.get('recipient')
if recipient:
return self.deal_with_single_recipient(msg, recipient, failed_recpts)
if msg.get('recipientList'):
return self.deal_with_relayed_message(msg, failed_recpts)
print("msg in outbox had neither recipient nor recipientList?", msg)
msg_sent = False
should_delete = False
return (msg_sent, should_delete)
def deal_with_single_recipient(self, msg, recipient, failed_recpts):
'''Try to send the given message to the specified recipient'''
print("Dealing with single recipient:", recipient)
msg_bytes = None
msg_sent = False
should_delete = False
send_result = self.RC_MESSAGE_IGNORED
database = self.get_component(System.COMPNAME_DATABASE)
# Check recipient status, if it's deleted then delete message also
if dbutils.get_status(database, recipient) in [None, 'deleted']:
send_result = self.RC_MESSAGE_IGNORED
elif recipient in failed_recpts:
print("Not even bothering to try to send to '%s', previously failed" % recipient)
send_result = self.RC_MESSAGE_FAILED
else:
msg_bytes = imageutils.string_to_bytes(msg['message'])
send_result = self._send_message(msg_bytes, msg.get('encType'), recipient)
msg_sent = (send_result == self.RC_MESSAGE_SENT)
if msg_sent:
# The recipient and I are both online
self.call_component(System.COMPNAME_CONTACTS, "come_online", tor_id=recipient)
own_tor_id = dbutils.get_own_tor_id(database)
self.call_component(System.COMPNAME_CONTACTS, "come_online", tor_id=own_tor_id)
self.call_component(System.COMPNAME_LOGGING, "log",
logstr="Sent '%s' to '%s'" % (msg.get('msgType'), recipient))
if send_result in [self.RC_MESSAGE_IGNORED, self.RC_MESSAGE_SENT, self.RC_MESSAGE_INVALID]:
# either recipient was blocked or message was sent, either way delete it
should_delete = True
else:
failed_recpts.add(recipient)
if not msg.get('queue'):
print("Failed to send a message but it shouldn't be queued, deleting it")
should_delete = True
elif msg.get('relays'):
print("Failed to send but I can try to relay it")
signed_blob = self._get_blob_to_relay(msg, database)
# Loop over each relay in the list and try to send to each one
failed_relays = set()
for relay in msg.get('relays'):
if relay not in failed_recpts and \
self._send_message(signed_blob, Message.ENCTYPE_RELAY,
relay) == self.RC_MESSAGE_SENT:
print("Sent message to relay '%s'" % relay)
self.call_component(System.COMPNAME_LOGGING, "log",
logstr="Relayed '%s'" % msg.get('msgType'))
else:
# Send failed, so add this relay to the list of failed ones
failed_relays.add(relay)
failed_recpts.add(relay)
# here we update the list even if it hasn't changed
database.update_outbox_message(index=msg["_id"],
props={"relays":list(failed_relays)})
return (msg_sent, should_delete)
def _get_blob_to_relay(self, msg, database):
'''Get a signed blob so the message can be relayed'''
if msg.get('relayMessage'):
return bytes(msg.get('relayMessage'))
print("No signed blob in message, need to create one")
msg_bytes = imageutils.string_to_bytes(msg['message'])
signed_blob = RelayMessage.wrap_outgoing_message(self._sign_message(msg_bytes))
database.update_outbox_message(index=msg["_id"],
props={"relayMessage":list(signed_blob)})
return signed_blob
def _sign_message(self, msg_bytes):
'''Sign the given bytes with our own key id'''
database = self.get_component(System.COMPNAME_DATABASE)
own_key_id = dbutils.get_own_key_id(database)
crypto = self.get_component(System.COMPNAME_CRYPTO)
if not own_key_id or not crypto:
print("Failed to sign message using own key '%s'" % own_key_id)
return None
return crypto.sign_data(msg_bytes, own_key_id)
def deal_with_relayed_message(self, msg, failed_recpts):
'''Try to send the given relay message to a recipient list'''
msg_sent = False
should_delete = False
msg_bytes = imageutils.string_to_bytes(msg['message'])
failed_recpts_for_message = set()
database = self.get_component(System.COMPNAME_DATABASE)
own_tor_id = dbutils.get_own_tor_id(database)
for recpt in msg.get('recipientList'):
if recpt in failed_recpts:
failed_recpts_for_message.add(recpt)
else:
send_result = self._send_message(msg_bytes, msg.get('encType'), recpt)
if send_result == self.RC_MESSAGE_SENT:
msg_sent = True
self.call_component(System.COMPNAME_CONTACTS, "come_online", tor_id=recpt)
self.call_component(System.COMPNAME_CONTACTS, "come_online", tor_id=own_tor_id)
elif send_result == self.RC_MESSAGE_FAILED:
# Couldn't send to this relay recipient
failed_recpts_for_message.add(recpt)
failed_recpts.add(recpt)
if failed_recpts_for_message:
# update msg with the new recipientList
relays = list(failed_recpts_for_message)
database.update_outbox_message(index=msg["_id"],
props={"recipientList":relays})
print("Failed to send a relay to:", failed_recpts_for_message)
else:
print("Relayed everything, now deleting relay message")
should_delete = True
return (msg_sent, should_delete)
def _send_message(self, msg_bytes, enctype, whoto):
'''Send the given message to the specified recipient'''
if not msg_bytes:
return self.RC_MESSAGE_INVALID
print("Send_message (%d bytes) to '%s'" % (len(msg_bytes), whoto))
if not whoto or not isinstance(whoto, str) or len(whoto) < 16:
print("whoto no good, returning invalid")
return self.RC_MESSAGE_INVALID
database = self.get_component(System.COMPNAME_DATABASE)
profile = database.get_profile(torid=whoto)
status = profile.get('status') if profile else None
if enctype == Message.ENCTYPE_NONE:
status = 'allowed'
if not status or status in ['deleted', 'blocked']:
# recipient not found or unsuitable status
print("status no good, returning ignored")
return self.RC_MESSAGE_IGNORED
# Use configured transport object to send
if self.transport:
print("passing on to self.transport")
return self.transport.send_message(msg_bytes, whoto)
print("no transport available, so failed")
return self.RC_MESSAGE_FAILED
def _broadcast(self):
'''Broadcast our online status by adding to the outbox'''
database = self.get_component(System.COMPNAME_DATABASE)
if not database or not self.should_broadcast:
return
if self.work_lock.acquire(timeout=2):
print("Broadcast")
profile_list = database.get_profiles_with_status(["trusted", "robot"])
if profile_list:
crypto = self.get_component(System.COMPNAME_CRYPTO)
msg = StatusNotifyMessage()
msg.recipients = [c['torid'] for c in profile_list]
dbutils.add_message_to_outbox(msg, crypto, database)
self.work_lock.release()
self.need_to_flush = True
|
Who Are You Looking For In Wesson, Mississippi?
Run a search by name for anyone in Wesson, Mississippi & get free white pages information instantly. Wesson, Mississippi white page directory listings include full name, phone number and address.
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Flink(Package):
"""
Apache Flink is an open source stream processing framework with
powerful stream- and batch-processing capabilities.
"""
homepage = "https://flink.apache.org/"
url = "http://archive.apache.org/dist/flink/flink-1.9.1/flink-1.9.1-bin-scala_2.11.tgz"
version('1.9.1', sha256='f69de344cd593e92f8261e19ae8a47b3910e9a70a7cd1ccfb1ecd1ff000b93ea')
version('1.9.0', sha256='a2245f68309e94ed54d86a680232a518aed9c5ea030bcc0b298bc8f27165eeb7')
version('1.8.3', sha256='1ba90e99f70ad7e2583d48d1404d1c09e327e8fb8fa716b1823e427464cc8dc0')
version('1.8.2', sha256='1a315f4f1fab9d651702d177b1741439ac98e6d06e9e13f9d410b34441eeda1c')
version('1.8.1', sha256='4fc0d0f163174ec43e160fdf21a91674979b978793e60361e2fce5dddba4ddfa')
depends_on('java@8:', type='run')
def url_for_version(self, version):
url = "http://archive.apache.org/dist/flink/flink-{0}/flink-{0}-bin-scala_2.11.tgz"
return url.format(version)
def install(self, spec, prefix):
install_tree('.', prefix)
|
Solar power in America is nothing new, but for residents to have the opportunity to bring it into their homes is a growing sustainable trend. Residential solar providers are expanding across the country, including to those in Pearl. While going through the selection might seem a bit daunting at first, there's no need to worry. See.Solar can help you find the right solar installer in Pearl with ease.
If you're thinking about powering your Pearl home through solar energy, you're at the right place to make the initial steps. See.Solar is here to answer any questions you have, as well as guide you down the road toward environmental protection and savings.
You might hear a lot of myths about what installing a solar energy system can mean for your finances or ability to sell to your Pearl home. Getting the right information is essential in helping you make a sound decision about solar energy in Pearl.
Installing a solar energy system might make residents nervous about their investment when it comes time to move to another place. If you need to make your way out of Pearl, residential solar installers such as SunRun will work with you to transfer your system to your next location.
Even if just 5% of homes within the United States went solar, it could offset more than 89 million pounds of carbon dioxide emissions. Your home in Pearl would be a significant part of the solution toward a better future for future generations.
Start saving on your energy bills in Pearl today!
As energy usage continues to rise across the country, you can expect your utility bills in Pearl to keep on rising. According to the US Energy Information Administration, the nationwide electricity price is forecast, on average, to be 2.3% higher in 2017 than in 2016.
While savings might range from Pearl to other cities, you could save anywhere from $80 to $150 per month when you go solar. Over the course of 20 years, your decision to bring a solar energy system to Pearl could mean more than $20,000 in savings. Don't forget that tax incentives of some form exist in each state. Reach out to local Pearl officials to learn more about what tax incentives might be available to you.
It's never too late to make the switch to solar energy. Reach out to See.Solar today to get the most out of energy savings in Pearl.
|
from fe65p2.scan_base import ScanBase
import fe65p2.plotting as plotting
import fe65p2.analysis as analysis
import time
import numpy as np
import bitarray
import tables as tb
from bokeh.charts import output_file, save, show
from bokeh.models.layouts import Column, Row
import yaml
from basil.dut import Dut
import logging
import os
import itertools
logging.basicConfig(level=logging.INFO,
format="%(asctime)s - %(name)s - [%(levelname)-8s] (%(threadName)-10s) %(message)s")
row = range(1,65,1)
all_pixels = []
for (r,c) in itertools.product(row,row):
all_pixels.append((r,c))
local_configuration = {
"mask_steps": 1,
"repeat_command": 101,
"scan_range": [0.005, 0.2, 0.005],#[0.05, 0.55, 0.01], #[0.005, 0.30, 0.01], # [0.01, 0.2, 0.01],# [0.01, 0.20, 0.01], #[0.1, 0.4, 0.05],
"columns": [True] * 2 + [False] * 14,
"mask_filename": '/media/topcoup/TB/Prmp36_vthA255_vthB0_PreCmp110/col1/output_data/20170119_163931_tu_threshold_scan.h5',
"pix_list": [(2,6),(3,3)],
#DAC parameters
"PrmpVbpDac": 36,
"vthin1Dac": 255,
"vthin2Dac": 0,
"vffDac" : 24,
"PrmpVbnFolDac" : 51,
"vbnLccDac" : 1,
"compVbnDac":25,
"preCompVbnDac" : 110
}
class TimewalkScan(ScanBase):
scan_id = "timewalk_scan"
def scan(self, mask_steps=4, repeat_command=101, columns=[True] * 16, pix_list=[], scan_range=[], mask_filename='', **kwargs):
'''Scan loop
This scan is to measure time walk. The charge injection can be driven by the GPAC or an external device.
In the latter case the device is Agilent 33250a connected through serial port.
The time walk and TOT are measured by a TDC module in the FPGA.
The output is an .h5 file (data) and an .html file with plots.
To perform a proper timewalk scan a mask_filename i.e. the output of the tuned threshold scan has to be provided.
'''
def load_vthin1Dac(mask):
if os.path.exists(mask):
in_file = tb.open_file(mask, 'r')
dac_status = yaml.load(in_file.root.meta_data.attrs.dac_status)
vthrs1 = dac_status['vthin1Dac']
logging.info("Loaded vth1 from noise scan: %s", str(vthrs1))
return int(vthrs1)
else: return 29
vth1 = load_vthin1Dac(mask_filename)
inj_factor = 1.0
INJ_LO = 0.0
try:
dut = Dut(ScanBase.get_basil_dir(self) + '/examples/lab_devices/agilent33250a_pyserial.yaml')
dut.init()
logging.info('Connected to ' + str(dut['Pulser'].get_info()))
except RuntimeError:
INJ_LO = 0.2
inj_factor = 2.0
logging.info('External injector not connected. Switch to internal one')
self.dut['INJ_LO'].set_voltage(INJ_LO, unit='V')
self.dut['global_conf']['PrmpVbpDac'] = int(kwargs.get('PrmpVbpDac', 36))
#self.dut['global_conf']['vthin1Dac'] = int(vth1)
self.dut['global_conf']['vthin2Dac'] = int(kwargs.get('vthin2Dac', 0))
self.dut['global_conf']['preCompVbnDac'] = int(kwargs.get('preCompVbnDac', 110))
self.dut['global_conf']['vffDac'] = int(kwargs.get('vffDac', 42))
self.dut['global_conf']['PrmpVbnFolDac'] = int(kwargs.get('PrmpVbnFolDac', 51))
self.dut['global_conf']['vbnLccDac'] = int(kwargs.get('vbnLccDac',1))
self.dut['global_conf']['compVbnDac'] = int(kwargs.get('compVbnDac',25))
self.dut.write_global()
self.dut['control']['RESET'] = 0b01
self.dut['control']['DISABLE_LD'] = 0
self.dut['control']['PIX_D_CONF'] = 0
self.dut['control'].write()
self.dut['control']['CLK_OUT_GATE'] = 1
self.dut['control']['CLK_BX_GATE'] = 1
self.dut['control'].write()
time.sleep(0.1)
self.dut['control']['RESET'] = 0b11
self.dut['control'].write()
self.dut['global_conf']['OneSr'] = 1
self.dut['global_conf']['TestHit'] = 0
self.dut['global_conf']['SignLd'] = 0
self.dut['global_conf']['InjEnLd'] = 0
self.dut['global_conf']['TDacLd'] = 0
self.dut['global_conf']['PixConfLd'] = 0
self.dut.write_global()
self.dut['global_conf']['ColEn'][:] = bitarray.bitarray([True] * 16) # (columns)
self.dut['global_conf']['ColSrEn'][:] = bitarray.bitarray([True] * 16)
self.dut.write_global()
self.dut['pixel_conf'].setall(False)
self.dut.write_pixel()
self.dut['global_conf']['InjEnLd'] = 1
self.dut.write_global()
self.dut['global_conf']['InjEnLd'] = 0
mask_en = np.full([64, 64], False, dtype=np.bool)
mask_tdac = np.full([64, 64], 16, dtype=np.uint8)
for inx, col in enumerate(columns):
if col:
mask_en[inx * 4:(inx + 1) * 4, :] = True
if mask_filename:
logging.info('Using pixel mask from file: %s', mask_filename)
with tb.open_file(mask_filename, 'r') as in_file_h5:
mask_tdac = in_file_h5.root.scan_results.tdac_mask[:]
mask_en = in_file_h5.root.scan_results.en_mask[:]
self.dut.write_en_mask(mask_en)
self.dut.write_tune_mask(mask_tdac)
self.dut['global_conf']['OneSr'] = 1
self.dut.write_global()
self.dut['inj'].set_delay(50000) # 1 zero more
self.dut['inj'].set_width(1000)
self.dut['inj'].set_repeat(repeat_command)
self.dut['inj'].set_en(False)
self.dut['trigger'].set_delay(400-4)
self.dut['trigger'].set_width(16)
self.dut['trigger'].set_repeat(1)
self.dut['trigger'].set_en(False)
logging.debug('Enable TDC')
self.dut['tdc']['RESET'] = True
self.dut['tdc']['EN_TRIGGER_DIST'] = True
self.dut['tdc']['ENABLE_EXTERN'] = False
self.dut['tdc']['EN_ARMING'] = False
self.dut['tdc']['EN_INVERT_TRIGGER'] = False
self.dut['tdc']['EN_INVERT_TDC'] = False
self.dut['tdc']['EN_WRITE_TIMESTAMP'] = True
scan_range = np.arange(scan_range[0], scan_range[1], scan_range[2]) / inj_factor
scan_range = np.append(scan_range, 0.3 / inj_factor)
scan_range = np.append(scan_range, 0.5 / inj_factor)
#scan_range = np.append(scan_range, 0.7 / inj_factor)
self.pixel_list = pix_list
p_counter = 0
for pix in pix_list:
mask_en = np.full([64, 64], False, dtype=np.bool)
mask_en[pix[0], pix[1]] = True
self.dut.write_en_mask(mask_en)
self.dut.write_inj_mask(mask_en)
self.inj_charge = []
for idx, k in enumerate(scan_range):
dut['Pulser'].set_voltage(INJ_LO, float(INJ_LO + k), unit='V')
self.dut['INJ_HI'].set_voltage(float(INJ_LO + k), unit='V')
self.inj_charge.append(float(k) * 1000.0 * analysis.cap_fac())
time.sleep(0.5)
with self.readout(scan_param_id=idx + p_counter * len(scan_range)):
logging.info('Scan Parameter: %f (%d of %d)', k, idx + 1, len(scan_range))
self.dut['tdc']['ENABLE'] = True
self.dut['global_conf']['vthin1Dac'] = int(vth1)
self.dut['global_conf']['vthin2Dac'] = int(kwargs.get('vthin2Dac', 0))
self.dut['global_conf']['PrmpVbpDac'] = int(kwargs.get('PrmpVbpDac', 36))
self.dut['global_conf']['preCompVbnDac'] = int(kwargs.get('preCompVbnDac', 110))
self.dut.write_global()
time.sleep(0.1)
self.dut['global_conf']['vthin1Dac'] = int(vth1)
self.dut['global_conf']['vthin2Dac'] = int(kwargs.get('vthin2Dac', 0))
self.dut['global_conf']['PrmpVbpDac'] = int(kwargs.get('PrmpVbpDac', 36))
self.dut['global_conf']['preCompVbnDac'] = int(kwargs.get('preCompVbnDac', 110))
self.dut.write_global()
time.sleep(0.1)
#self.dut['global_conf']['PrmpVbnFolDac'] = kwargs['PrmpVbnFolDac']
#self.dut['global_conf']['vbnLccDac'] = kwargs['vbnLccDac']
#self.dut['global_conf']['compVbnDac'] = kwargs['compVbnDac']
#self.dut['global_conf']['preCompVbnDac'] = kwargs['preCompVbnDac']
#self.dut.write_global()
#time.sleep(0.1)
#self.dut.write_global()
#time.sleep(0.1)
self.dut['inj'].start()
while not self.dut['inj'].is_done():
#time.sleep(0.05)
pass
while not self.dut['trigger'].is_done():
#time.sleep(0.05)
pass
self.dut['tdc'].ENABLE = 0
p_counter += 1
def tdc_table(self, scanrange):
h5_filename = self.output_filename + '.h5'
with tb.open_file(h5_filename, 'r+') as in_file_h5:
raw_data = in_file_h5.root.raw_data[:]
meta_data = in_file_h5.root.meta_data[:]
if (meta_data.shape[0] == 0):
print 'empty output'
return
repeat_command = in_file_h5.root.meta_data.attrs.kwargs
a = repeat_command.rfind("repeat_command: ")
repeat_command = repeat_command[a + len("repeat_command: "):a + len("repeat_command: ") + 7]
a = repeat_command.rfind("\n")
repeat_command = int(repeat_command[0:a])
param, index = np.unique(meta_data['scan_param_id'], return_index=True)
pxl_list = []
for p in param:
pix_no = int(p) / int(len(self.inj_charge))
pxl_list.append(self.pixel_list[pix_no][0] * 64 + self.pixel_list[pix_no][1])
index = index[1:]
index = np.append(index, meta_data.shape[0])
index = index - 1
stops = meta_data['index_stop'][index]
split = np.split(raw_data, stops)
avg_tdc = []
avg_tdc_err = []
avg_del = []
avg_del_err = []
hits = []
deletelist = ()
for i in range(len(split[:-1])): # loop on pulses
rwa_data_param = split[i]
tdc_data = rwa_data_param & 0xFFF # take last 12 bit
tdc_delay = (rwa_data_param & 0x0FF00000) >> 20
counter = 0.0
TOT_sum = 0.0
DEL_sum = 0.0
if (tdc_data.shape[0] == 0 or tdc_data.shape[0] == 1):
counter = 1.0
for j in range(tdc_data.shape[0]): # loop on repeats
if (j > 0):
counter += 1
TOT_sum += tdc_data[j]
DEL_sum += tdc_delay[j]
if (counter > 1):
hits.append(counter)
avg_tdc.append((float(TOT_sum) / float(counter)) * 1.5625)
avg_tdc_err.append(1.5625 / (np.sqrt(12.0 * counter)))
avg_del.append((float(DEL_sum) / float(counter)) * 1.5625)
avg_del_err.append(1.5625 / (np.sqrt(12.0 * counter)))
else:
deletelist = np.append(deletelist, i)
pxl_list = np.delete(pxl_list, deletelist)
newpix = [0]
pix_no_old = pxl_list[0]
runparam = 0
for p in pxl_list:
if p != pix_no_old:
newpix = np.append(newpix, runparam)
pix_no_old = p
runparam = runparam + 1
addedvalues = 0
for pixels in range(len(newpix)):
missingvalues = 0
if newpix[pixels] == newpix[-1]:
missingvalues = scanrange - abs(newpix[pixels] + addedvalues - len(hits))
else:
if abs(newpix[pixels] - newpix[pixels + 1]) < scanrange:
missingvalues = scanrange - abs(newpix[pixels] - newpix[pixels + 1])
if missingvalues != 0:
hits = np.insert(hits, newpix[pixels] + addedvalues, np.zeros(missingvalues))
avg_tdc = np.insert(avg_tdc, newpix[pixels] + addedvalues, np.zeros(missingvalues))
avg_tdc_err = np.insert(avg_tdc_err, newpix[pixels] + addedvalues, np.zeros(missingvalues))
avg_del = np.insert(avg_del, newpix[pixels] + addedvalues, np.zeros(missingvalues))
avg_del_err = np.insert(avg_del_err, newpix[pixels] + addedvalues, np.zeros(missingvalues))
pxl_list = np.insert(pxl_list, newpix[pixels] + addedvalues,
(pxl_list[newpix[pixels] + addedvalues]) * np.ones(missingvalues))
addedvalues = addedvalues + missingvalues
injections = []
for pixels in range(int(len(pxl_list) / len(self.inj_charge))):
for i in range(len(self.inj_charge)):
injections = np.append(injections, self.inj_charge[i])
pix, stop = np.unique(pxl_list, return_index=True)
stop = np.sort(stop)
stop = list(stop)
stop.append(len(avg_tdc))
repeat_command_dic={}
repeat_command_dic['repeat_command']=repeat_command
avg_tab = np.rec.fromarrays([injections, pxl_list, hits, avg_tdc, avg_tdc_err, avg_del, avg_del_err],
dtype=[('charge', float), ('pixel_no', int), ('hits', int),
('tot_ns', float), ('err_tot_ns', float), ('delay_ns', float),
('err_delay_ns', float)])
tdc_table=in_file_h5.create_table(in_file_h5.root, 'tdc_data', avg_tab, filters=self.filter_tables)
tdc_table.attrs.repeat_command = repeat_command_dic
thresholds = ()
expfit0 = ()
expfit1 = ()
expfit2 = ()
expfit3 = ()
pixels = ()
for i in range(len(stop) - 1):
s1 = int(stop[i])
s2 = int(stop[i + 1])
A, mu, sigma = analysis.fit_scurve(hits[s1:s2], injections[s1:s2],repeat_command)
if np.max(hits[s1:s2]) > (repeat_command + 200): # or mu > 3000:
thresholds = np.append(thresholds, 0)
expfit0 = np.append(expfit0, 0)
expfit1 = np.append(expfit1, 0)
expfit2 = np.append(expfit2, 0)
expfit3 = np.append(expfit3, 0)
pixels = np.append(pixels, pxl_list[s1])
continue
for values in range(s1, s2):
if injections[values] >= 5 / 4 * mu:
s1 = values
break
numberer = 0
hitvaluesold = hits[-1]
for hitvalues in hits[s1:s2]:
if abs(hitvalues - hitvaluesold) <= 1 and hitvalues != 0:
break
numberer = numberer + 1
hitvaluesold = hitvalues
if numberer == len(avg_del[s1:s2]):
numberer = 0
expfit = analysis.fit_exp(injections[s1:s2], avg_del[s1:s2], mu, abs(numberer))
startexp = -expfit[0] * np.log((25.0 + np.min(avg_del[s1:s2]) - expfit[3]) / expfit[2]) - expfit[1]
if np.isnan(startexp) or startexp >= 2000:
startexp = 0
thresholds = np.append(thresholds, startexp)
expfit0 = np.append(expfit0, expfit[0])
expfit1 = np.append(expfit1, expfit[1])
expfit2 = np.append(expfit2, expfit[2])
expfit3 = np.append(expfit3, expfit[3])
pixels = np.append(pixels, pxl_list[s1])
thresh = np.rec.fromarrays([pixels, thresholds, expfit0, expfit1, expfit2, expfit3],
dtype=[('pixel_no', int), ('td_threshold', float),
('expfit0', float), ('expfit1', float), ('expfit2', float),
('expfit3', float)])
in_file_h5.create_table(in_file_h5.root, 'td_threshold', thresh, filters=self.filter_tables)
p1, p2, single_scan = plotting.plot_timewalk(h5_filename)
output_file(self.output_filename + '.html', title=self.run_name)
status = plotting.plot_status(h5_filename)
save(Row(Column(p1, p2, status), single_scan))
#show(p1)
if __name__ == "__main__":
Timescan = TimewalkScan()
Timescan.start(**local_configuration)
scanrange = local_configuration['scan_range']
Timescan.tdc_table(len(np.arange(scanrange[0], scanrange[1], scanrange[2]))+2)
|
Join us each month for a special Shabbat morning gathering, Shabbat In the Circle.
We will begin at 9:30am with the study of Hassidic and other mystical texts then discuss how we can apply them in our daily lives.
This will be followed at 10:15am by a collaborative musical gathering based on the Shabbat morning service incorporating melodies, poems and dance to enhance our Shabbat.
We will also explore the intersection of Jewish and other sacred spiritual techniques to see how we can enrich our connection to the divine and to one another. Joining Cantor George Mordecai will be a talented group of musicians, storytellers and others.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# clusterjob documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 27 17:35:53 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# We import the clusterjob just to ensure that it installed in the same
# environment as sphinx, so that autdoc works
import clusterjob
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3.4', None),
'ipyparallel': ('http://ipyparallel.readthedocs.org/en/latest/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'clusterjob'
copyright = '2015, Michael Goerz'
author = 'Michael Goerz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# autodoc settings
autoclass_content = 'both'
autodoc_member_order = 'bysource'
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = True
napoleon_use_param = True
napoleon_use_rtype = True
# -- Extensions to the Napoleon GoogleDocstring class ---------------------
from sphinx.ext.napoleon.docstring import GoogleDocstring
# first, we define new methods for any new sections and add them to the class
def parse_keys_section(self, section):
return self._format_fields('Keys', self._consume_fields())
GoogleDocstring._parse_keys_section = parse_keys_section
def parse_attributes_section(self, section):
return self._format_fields('Attributes', self._consume_fields())
GoogleDocstring._parse_attributes_section = parse_attributes_section
def parse_class_attributes_section(self, section):
return self._format_fields('Class Attributes', self._consume_fields())
GoogleDocstring._parse_class_attributes_section = parse_class_attributes_section
# we now patch the parse method to guarantee that the the above methods are
# assigned to the _section dict
def patched_parse(self):
self._sections['keys'] = self._parse_keys_section
self._sections['class attributes'] = self._parse_class_attributes_section
self._unpatched_parse()
GoogleDocstring._unpatched_parse = GoogleDocstring._parse
GoogleDocstring._parse = patched_parse
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'clusterjobdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'clusterjob.tex', 'clusterjob Documentation',
'Michael Goerz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'clusterjob', 'clusterjob Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'clusterjob', 'clusterjob Documentation',
author, 'clusterjob', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
Literophilia is proud to announce that we are once again hosting a giveaway!
Larry Rodness has generously offered up his latest book to satisfy your literary addictions. Literophilia is giving away 1 copy (in ePub or PDF form) of Perverse! If you haven’t read my review on this book yet, you can check it out here. You can also check out our interview with Mr. Rodness here so that you can get better acquainted with him!
To enter, leave a comment telling us why you want to win. Don’t forget to leave your e-mail — otherwise, we won’t be able to notify you if you win! You can increase your chances of winning by earning extra entries. Do one (or more) of the following and post a comment to let us know. Just make sure to leave a separate comment for each entry or it won’t count.
Twitter this phrase: “Want a free copy of an awesome book? Literophilia is giving away Larry Rodness’s Perverse! http://is.gd/pSWy4T“. Then leave a comment here linking back to the tweet (the actual post, not just your Twitter page).
Mention this giveaway on your blog or Facebook and link back to us (you can use the shortened URL if you like: http://is.gd/pSWy4T). Then leave a comment here linking to the post.
Sign up to receive e-mail updates from Literophilia (using the link in the sidebar) and comment here with your e-mail to let me know that you’ve done so.
All entries must be submitted by June 31st, 2013 at 11:59 PM EST. 1 winner will be chosen and notified via e-mail. This contest is open to all readers as per your local state/country’s laws.
Giveaway is closed, winners will be contacted. Thanks to all who participated!
Literophilia is proud to announce that we’re hosting our very first giveaway. If this goes well, I’m strongly considering seeking out the means to do more of them in the future. So tell all your friends and enter away!
Kevin Krohn has generously offered up his latest book to satisfy your literary addictions. Literophilia is giving away 5 copies of Severed Ties (Volume 1)! If you haven’t read my review on this book yet, you can check it out here. We’ll also be posting an interview with the author later on in the week so you can get better acquainted with him.
Twitter this phrase: “Want a free copy of an awesome book? Literophilia is giving away 5 copies of *Severed Ties (Volume 1)* by Kevin Krohn! http://is.gd/LD6w“. Then leave a comment here linking back to the tweet (the actual post, not just your Twitter page).
Mention this giveaway on your blog and link back to us. Then leave a comment here linking to the post.
All entries must be submitted by June 10th, 2009 at 11:59 PM EST. 5 winners will be chosen at random and notified via e-mail. This contest is open to all readers as per your local state/country’s laws.
I’ve never much been one to play favorites with anyone or anything — my tastes are pretty varied, so it’s hard for me to just pick one thing and have it represent everything I like. But recently, it struck me: As a loud and proud Literophiliac, shouldn’t I have some sort of favorites list when it comes to books?
One of the many ‘symptoms’ of Literophilia is an over-abundance of books. Being the child of a Literophiliac, I know this symptom very well — nearly every room in my mom’s house holds a bookshelf that is literally packed to the brim with books.
Since I’m only 23 and I move to a different apartment nearly every year, I haven’t been able to get my collection looking how I’d like. Recently, I bought a couple of bookcases ($20 at Ikea!) and I’ve been happily staring at my beautifully organized bookcase for the past week or so.
Come take a look with me!
|
#!/usr/bin/env python2.7
import unittest
import os
from os import path, getenv
from os.path import expanduser
import logging # https://docs.python.org/2/library/logging.html#logging-levels
import glob
import argparse
import sys
import csv
sys.path.append( os.getcwd() )
sys.path.insert( 1, 'lib' ) #Pickup libs, rdflib etc., from shipped lib directory
sys.path.insert( 1, 'sdopythonapp' ) #Pickup sdopythonapp functionality
sys.path.insert( 1, 'sdopythonapp/lib' ) #Pickup sdopythonapp libs, rdflib etc., from shipped lib directory
sys.path.insert( 1, 'sdopythonapp/site' ) #Pickup sdopythonapp from shipped site
# Ensure that the google.appengine.* packages are available
# in tests as well as all bundled third-party packages.
sdk_path = getenv('APP_ENGINE', expanduser("~") + '/google-cloud-sdk/platform/google_appengine/')
sys.path.insert(0, sdk_path) # add AppEngine SDK to path
import rdflib
from rdflib.term import URIRef, Literal
from rdflib.parser import Parser
from rdflib.serializer import Serializer
from rdflib.plugins.sparql import prepareQuery, processUpdate
from rdflib.compare import graph_diff
from rdflib.namespace import RDFS, RDF
import pyRdfa
rdflib.plugin.register("jsonld", Parser, "rdflib_jsonld.parser", "JsonLDParser")
rdflib.plugin.register("rdfa", Parser, "pyRdfa.rdflibparsers", "RDFaParser")
rdflib.plugin.register("jsonld", Serializer, "rdflib_jsonld.serializer", "JsonLDSerializer")
OUTTYPES = {'jsonld': 'jsonld','xml':'xml','nq':'nquads','rdf':'xml','ttl':'turtle'}
parser = argparse.ArgumentParser()
parser.add_argument("-i","--input" , action='append',nargs='*', help="Input file(s)")
parser.add_argument("-o","--outputdir", help="Output directory (Default = .)")
parser.add_argument("-f","--format", default='ttl', help="Output format ['xml', 'rdf', 'nquads','nt','jsonld','ttl']")
parser.add_argument("-c","--combinefile", default=None, help="Combine outputs into file")
parser.add_argument("-d","--defaultns", help="Default output namespace")
args = parser.parse_args()
print("%s: Arguments: %s" % (sys.argv[0],args))
if args.format not in OUTTYPES:
parser.print_help()
sys.exit(1)
format = args.format
combine = args.combinefile
SPARQL1 = """
PREFIX dc: <http://purl.org/dc/terms/>
PREFIX schema: <http://schema.org/>
DELETE { ?s dc:source ?o }
INSERT { ?s schema:source ?o }
WHERE {
?s dc:source ?o .
}
"""
def out(filename):
graph.update(SPARQL1)
graph.bind('',URIRef('http://schema.org/'),override=True, replace=True)
if args.outputdir:
outfile = "%s/%s" % (args.outputdir,filename)
else:
outfile = filename
print("Writing %s triples to %s" % (len(graph),outfile))
f = open(outfile,'w')
f.write(graph.serialize(format=OUTTYPES.get(format),auto_compact=True))
files = args.input[0]
graph = rdflib.ConjunctiveGraph()
for fullfilename in files:
if not combine:
graph = rdflib.ConjunctiveGraph()
if args.outputdir:
filename = os.path.basename(fullfilename)
else:
filename = fullfilename
filestub, ext = os.path.splitext(filename)
ext = ext[1:]
graph.parse(fullfilename,format = ext)
print("Loaded %s triples from %s" % (len(graph), filename))
if not combine:
out(filename="%s.%s" % (filestub,format))
if combine:
print("Outputting ")
out(filename=combine)
|
A status category for technical debt that represents the debt that is known to the development team and has been made visible for future consideration. Contrast with happened-upon technical debt, targeted technical debt. See also technical debt.
|
# BEGIN_COPYRIGHT
# END_COPYRIGHT
"""
Import study
============
A study represents a general context. It is characterized by the
following fields::
label description
ASTUDY A textual description of ASTUDY, no tabs please.
The description column is optional. The study sub-operation will read
in a tsv files with the above information and output the VIDs of the
newly created study objects.
"""
import os, csv, copy
import core
DEFAULT_DESCRIPTION = 'No description provided'
class Recorder(core.Core):
def __init__(self, out_stream=None, report_stream=None,
host=None, user=None, passwd=None, keep_tokens=1,
batch_size=1000, operator='Alfred E. Neumann', logger=None):
super(Recorder, self).__init__(host, user, passwd, keep_tokens=keep_tokens,
study_label=None, logger=logger)
self.out_stream = out_stream
if self.out_stream:
self.out_stream.writeheader()
self.report_stream = report_stream
if self.report_stream:
self.report_stream.writeheader()
self.batch_size = batch_size
self.operator = operator
def record(self, records, blocking_validation):
def records_by_chunk(batch_size, records):
offset = 0
while len(records[offset:]) > 0:
yield records[offset:offset+batch_size]
offset += batch_size
if not records:
msg = 'No records are going to be imported'
self.logger.critical(msg)
raise core.ImporterValidationError(msg)
self.preload_studies()
records, bad_records = self.do_consistency_checks(records)
for br in bad_records:
self.report_stream.writerow(br)
if blocking_validation and len(bad_records) >= 1:
raise core.ImporterValidationError('%d invalid records' % len(bad_records))
for i, c in enumerate(records_by_chunk(self.batch_size, records)):
self.logger.info('start processing chunk %d' % i)
self.process_chunk(c)
self.logger.info('done processing chunk %d' % i)
def preload_studies(self):
self.logger.info('start prefetching studies')
self.known_studies = {}
studies = self.kb.get_objects(self.kb.Study)
for s in studies:
self.known_studies[s.label] = s
self.logger.info('there are %d Study(s) in the kb'
% (len(self.known_studies)))
def do_consistency_checks(self, records):
self.logger.info('start consistency checks')
k_map = {}
good_records = []
bad_records = []
mandatory_fields = ['label']
for i, r in enumerate(records):
reject = ' Rejecting import of record %d: ' % i
if self.missing_fields(mandatory_fields, r):
f = 'missing mandatory field'
self.logger.error(reject + f)
bad_rec = copy.deepcopy(r)
bad_rec['error'] = f
bad_records.append(bad_rec)
continue
if r['label'] in self.known_studies:
f = 'there is a pre-existing study with label %s' % r['label']
self.logger.error(reject + f)
bad_rec = copy.deepcopy(r)
bad_rec['error'] = f
bad_records.append(bad_rec)
continue
if r['label'] in k_map:
f = 'there is a pre-existing study with label %s in this batch' % r['label']
self.logger.error(reject + f)
bad_rec = copy.deepcopy(r)
bad_rec['error'] = f
bad_records.append(bad_rec)
continue
k_map['label'] = r
good_records.append(r)
self.logger.info('done with consistency checks')
return good_records, bad_records
def process_chunk(self, chunk):
studies = []
for r in chunk:
conf = {'label': r['label'], 'description': r['description']}
studies.append(self.kb.factory.create(self.kb.Study, conf))
self.kb.save_array(studies)
for d in studies:
self.logger.info('saved %s[%s] as %s.' % (d.label, d.description, d.id))
self.out_stream.writerow({
'study': 'None',
'label': d.label,
'type': 'Study',
'vid': d.id,
})
help_doc = """
import new Study definitions into the KB.
"""
def make_parser(parser):
parser.add_argument('--label', metavar="STRING",
help="overrides the label column value")
class RecordCanonizer(core.RecordCanonizer):
def canonize(self, r):
super(RecordCanonizer, self).canonize(r)
r.setdefault('description', DEFAULT_DESCRIPTION)
def implementation(logger, host, user, passwd, args, close_handles):
f = csv.DictReader(args.ifile, delimiter='\t')
logger.info('start processing file %s' % args.ifile.name)
records = [r for r in f]
if not records:
logger.info('empty file')
return
canonizer = RecordCanonizer(['label'], args)
canonizer.canonize_list(records)
o = csv.DictWriter(args.ofile, fieldnames=['study', 'label', 'type', 'vid'],
delimiter='\t', lineterminator=os.linesep)
report_fnames = f.fieldnames
report_fnames.append('error')
report = csv.DictWriter(args.report_file, report_fnames,
delimiter='\t', lineterminator=os.linesep,
extrasaction='ignore')
recorder = Recorder(o, report, host=host, user=user, passwd=passwd,
keep_tokens=args.keep_tokens, logger=logger)
try:
recorder.record(records, args.blocking_validator)
except core.ImporterValidationError as ve:
logger.critical(ve.message)
raise
finally:
close_handles(args)
logger.info('done processing file %s' % args.ifile.name)
def do_register(registration_list):
registration_list.append(('study', help_doc, make_parser,
implementation))
|
Branding doesn’t just happen. In other words, you don’t whip up a quick logo, print some business cards, put a sign up on your brick and mortar, and let it all unfold. Branding is a science. After all, it’s a process that can require extensive market research, testing, focus groups, and more before you can even launch a brand campaign. Therefore, it also requires constant maintenance, tracking, and a potential revamp to adjust for future consumer trends.
Unfortunately, far too many SMBs neglect their branding. Have you ever found yourself wondering why customers opt for a competitor who offers an inferior product or service? By and large, branding is most often to blame or credit, depending upon which side of the spectrum you fall.
Strategis Consulting’s design and conversion specialists work together to create landing pages that match the intent of visitors clicking the keywords, complete with a Call-to-Action that gets them to sign-up, request an estimate, or buy . . . on the spot.
Color is one of the first things our brains perceive from a brand, and is thus instrumental in pulling us in. The right color can make brand messaging more visually appealing and easier to read, improving readership by up to 40%.
It only takes us 10 seconds to form a first impression of a brand logo, but it takes five to seven impressions for us to recognize the logo. Frequent exposure is the key to success.
Our brains process images 60,000 times faster than words. Choosing the right symbol or image to accompany your brand name and tagline is essential.
73% of consumers state that they love a brand because of friendly customer service.
The statistics above are just the tip of a very big iceberg. You can’t be expected to keep up to date on them all – that’s our job, and we do it for you!
Strategis Group’s branding services consider your brand history, current goals, and forecasted market trends and tie them all together into a living, breathing, and ever-adapting organism that will build brand awareness while driving revenue today, and tomorrow.
|
'''Provides a custom 'sitecustomize' module which will be used when the
'autowrapt' wrapper script is used when launching a Python program. This
custom 'sitecustomize' module will find any existing 'sitecustomize'
module which may have been overridden and ensures that that is imported
as well. Once that is done then the monkey patches for ensuring any
bootstrapping is done for registering post import hook callback
functions after the 'usercustomize' module is loaded will be applied. If
however 'usercustomize' support is not enabled, then the registration
will be forced immediately.
'''
import os
import sys
import site
import time
_debug = os.environ.get('AUTOWRAPT_DEBUG',
'off').lower() in ('on', 'true', '1')
def log_message(text, *args):
if _debug:
text = text % args
timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
print('AUTOWRAPT: %s (%d) - %s' % (timestamp, os.getpid(), text))
log_message('autowrapt - sitecustomize (%s)', __file__)
log_message('working_directory = %r', os.getcwd())
log_message('sys.prefix = %r', os.path.normpath(sys.prefix))
try:
log_message('sys.real_prefix = %r', sys.real_prefix)
except AttributeError:
pass
log_message('sys.version_info = %r', sys.version_info)
log_message('sys.executable = %r', sys.executable)
if hasattr(sys, 'flags'):
log_message('sys.flags = %r', sys.flags)
log_message('sys.path = %r', sys.path)
# This 'sitecustomize' module will override any which may already have
# existed, be it one supplied by the user or one which has been placed
# in the 'site-packages' directory of the Python installation. We need
# to ensure that the existing 'sitecustomize' module is still loaded. To
# do that we remove the special startup directory containing this module
# from 'sys.path' and use the 'imp' module to find any original
# 'sitecustomize' module and load it.
import imp
boot_directory = os.path.dirname(__file__)
pkgs_directory = os.path.dirname(os.path.dirname(boot_directory))
log_message('pkgs_directory = %r', pkgs_directory)
log_message('boot_directory = %r', boot_directory)
path = list(sys.path)
try:
path.remove(boot_directory)
except ValueError:
pass
try:
(file, pathname, description) = imp.find_module('sitecustomize', path)
except ImportError:
pass
else:
log_message('sitecustomize = %r', (file, pathname, description))
imp.load_module('sitecustomize', file, pathname, description)
# Before we try and setup or trigger the bootstrapping for the
# registration of the post import hook callback functions, we need to
# make sure that we are still executing in the context of the same
# Python installation as the 'autowrapt' script was installed in. This
# is necessary because if it isn't and we were now running out of a
# different Python installation, then it may not have the 'autowrapt'
# package installed and so our attempts to import it will fail causing
# startup of the Python interpreter to fail in an obscure way.
expected_python_prefix = os.environ.get('AUTOWRAPT_PYTHON_PREFIX')
actual_python_prefix = os.path.realpath(os.path.normpath(sys.prefix))
expected_python_version = os.environ.get('AUTOWRAPT_PYTHON_VERSION')
actual_python_version = '.'.join(map(str, sys.version_info[:2]))
python_prefix_matches = expected_python_prefix == actual_python_prefix
python_version_matches = expected_python_version == actual_python_version
log_message('python_prefix_matches = %r', python_prefix_matches)
log_message('python_version_matches = %r', python_version_matches)
if python_prefix_matches and python_version_matches:
bootstrap_packages = os.environ.get('AUTOWRAPT_BOOTSTRAP')
log_message('bootstrap_packages = %r', bootstrap_packages)
if bootstrap_packages:
# When the 'autowrapt' script is run from out of a Python egg
# directory under 'buildout', then the path to the egg directory
# will not actually be listed in 'sys.path' as yet. This is
# because 'buildout' sets up any scripts so that 'sys.path' is
# specified only within the script. So that we can find the
# 'autowrapt' package, we need to ensure that in this case the
# egg directory for 'autowrapt' is manually added to 'sys.path'
# before we can import it.
pkgs_directory_missing = pkgs_directory not in sys.path
if pkgs_directory_missing:
sys.path.insert(0, pkgs_directory)
from autowrapt.bootstrap import bootstrap
from autowrapt.bootstrap import register_bootstrap_functions
# If we had to add the egg directory above corresponding to the
# 'autowrapt' package, now remove it to ensure the presence of
# the directory doesn't cause any later problems. It is quite
# possible that the directory will be added back in by scripts
# run under 'buildout' but that would be the normal behaviour
# and better off letting it do it how it wants to rather than
# leave the directory in place.
if pkgs_directory_missing:
try:
sys.path.remove(pkgs_directory)
except ValueError:
pass
# Trigger the application of the monkey patches to the 'site'
# module so that actual registration of the post import hook
# callback functions is only run after any 'usercustomize'
# module has been imported. If 'usercustomize' module support
# is disabled, as it will be in a Python virtual environment,
# then trigger the registration immediately.
bootstrap()
if not site.ENABLE_USER_SITE:
register_bootstrap_functions()
|
To all those who made New Year’s Resolutions, I hope they are going well. Personally, I decided not to make any resolutions this year because honestly, I needed to do something deeper and more meaningful than to say I was going to lose weight or spend less money. I needed more than a resolution, I needed a reSOULution.
I need some soul food! Have you ever had a time in your life when you felt like you were operating in survival mode? Welcome to my life last year.
I knew things had to change. My soul felt dry like a parched desert in need of a good rain to bring it back to life. After a year full of emotions that were all over the place due to life’s circumstances, including the death of my momma, I decided to make a New Year’s ReSOULution.
Over the past year my spiritual life had become a hodgepodge of half-read devotionals, unfinished blog posts, unspoken prayers, and stifled growth. I admit there were some days I didn’t even pray or read. You would think when in a crisis mode I would have prayed more, but I didn’t.
My soul was drained. I felt like a toothpaste tube that had been rolled and re-squeezed numerous times to get out every last drop. I didn’t even understand how I was surviving until a good friend of mine called to tell me she was praying. It was at that moment I knew how God was sustaining me. People were praying for me. Prayer has always been a huge part of my life, but now I was in a season of drought and others were watering my soul.
For the first time in my life I knew, felt and experienced to the full what it was like to have other people praying for me. Don’t get me wrong, I know people have prayed for me in the past, but this was different. People were doing something for me that I couldn’t do for myself. Have you ever been in a similar situation?
Isn’t it funny how no one ever wants to come across as needy? However, there are times we all are in desperate need of help. The world tries to make us view weakness as a bad thing, but If I’ve learned anything, it is when we are weak God shows up in a big way. Instead of fighting my weakness I had to learn to use and accept it, so the power of Christ could work in me.
Paul begged for God to take away the thorn in his flesh three times, but God had a different answer than the one Paul originally wanted: 2 Corinthians 12:9-11 (NLT) “My grace is all you need. My power works best in weakness.” So now I am glad to boast about my weaknesses, so that the power of Christ can work through me. That’s why I take pleasure in my weaknesses, and in the insults, hardships, persecutions, and troubles that I suffer for Christ. For when I am weak, then I am strong.
I wish I could say I’m like Paul and take pleasure in my weaknesses, insults, hardships, persecutions and troubles, but I don’t. It is hard not to be defensive in our flesh when the world is flinging garbage at us, but Jesus is my best defense. This is where my New Year’s ReSOULution comes into play. This year my heart’s desire is to focus on taking my weaknesses and turning them into strengths that will help and encourage others to do the same. Do you have weaknesses to share so the power of Christ can work through you too?
Trust me, I prayed for my thorn to be taken away too, but then I read these words: “My grace is all you need. My power works best in weakness.” The world teaches us that being weak means you’ll end up covered in footprints from being walked all over, but Paul teaches us that our weakness is the perfect platform for the power of Christ to be demonstrated through us. Being weak is being powerful. Being independent from the world shows our strength and our dependence on Christ. God redeems anyone choosing eternal life. No matter the trials we face, His grace is truly enough!
We’s love to hear from you in the comments below!
Sheila Schweiger-Rhodes is an author, speaker, blogger and the founder of JesusGlitter.com where she encourages others to look for Jesus in the everyday moments of life and "Be the Sparkle!" Sheila has been described as deeply passionate, profoundly uplifting and completely authentic. Her passion is to share the message of God's redeeming love and offer the encouragement, love and hope to those who have been wounded by life. Sheila resides in Maryland with her husband Dave. Together they have formed a blended family of four adult children and three grandchildren.
What a beautiful story of seeing God’s goodness in the midst of a difficult year. What a gift to see God’s strength for you through the prayers of your friends.
God’s strength is all we need. I am thankful for the prayers of friends, grace, and mercy I am given every day to fight against the troubles of this world. God has given us so many gifts!
Beautifully said. It is in the valley that we grow and the mountain top where we can be refreshed. That you were refreshed in that valley life put you in, is up-lifting to hear. Thanks for sharing your painful, beautiful experience.
Thank You for your encouraging words. We never appreciate the mountain top until we know what it is like to be in a valley for sure! At the moment, I am appreciating the view!
I don’t make New Year’s resolutions–those are silly and never last. I do brainstorm solutions to problems and goals I’d like to accomplish–and then I create a plan of action. I can do this at any time of the year or whenever I feel the need for change–why wait for the new year!
And yes, I’ve been carried by prayer many times–most notably when my husband had cancer.
I agree! ReSOULutions can be done throughout the year whenever or wherever God leads us. The power of prayer for one another carries and comforts us through the many trials of life. Thanks for your insights!
|
# from nipype import config
# config.enable_debug_mode()
# Importing necessary packages
import os
import os.path as op
import glob
import json
import nipype
from nipype import config, logging
import matplotlib.pyplot as plt
import nipype.interfaces.fsl as fsl
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
from nipype.utils.filemanip import copyfile
import nibabel as nib
from IPython.display import Image
from nipype.interfaces.utility import Function, Merge, IdentityInterface
from nipype.interfaces.io import SelectFiles, DataSink
from IPython.display import Image
from IPython import embed as shell
from workflows.preprocessing_pipeline import create_all_calcarine_reward_preprocessing_workflow
from workflows.nii_to_h5 import create_all_calcarine_reward_2_h5_workflow
from workflows.whole_brain_GLM import create_whole_brain_GLM_workflow
# we will create a workflow from a BIDS formatted input, at first for the specific use case
# of a 7T PRF experiment's preprocessing.
# a project directory that we assume has already been created.
raw_data_dir = '/home/raw_data/-2014/reward/human_reward/data/'
preprocessed_data_dir = '/home/shared/-2014/reward/new/'
FS_subject_dir = os.path.join(raw_data_dir, 'FS_SJID')
preprocess = False
GLM = True
mask = True
for si in range(1,7): #
sub_id, FS_ID = 'sub-00%i'%si, 'sub-00%i'%si
sess_id = 'ses-*'
# now we set up the folders and logging there.
opd = op.join(preprocessed_data_dir, sub_id)
try:
os.makedirs(op.join(opd, 'log'))
except OSError:
pass
config.update_config({ 'logging': {
'log_directory': op.join(opd, 'log'),
'log_to_file': True,
'workflow_level': 'INFO',
'interface_level': 'INFO'
},
'execution': {
'stop_on_first_crash': False
}
})
logging.update_logging(config)
# load the sequence parameters from json file
with open(os.path.join(raw_data_dir, 'acquisition_parameters.json')) as f:
json_s = f.read()
acquisition_parameters = json.loads(json_s)
# load the analysis parameters from json file
with open(os.path.join(raw_data_dir, 'analysis_parameters.json')) as f:
json_s = f.read()
analysis_info = json.loads(json_s)
# load the analysis/experimental parameters for this subject from json file
with open(os.path.join(raw_data_dir, sub_id ,'experimental_parameters.json')) as f:
json_s = f.read()
experimental_parameters = json.loads(json_s)
analysis_info.update(experimental_parameters)
if not op.isdir(os.path.join(preprocessed_data_dir, sub_id)):
try:
os.makedirs(os.path.join(preprocessed_data_dir, sub_id))
except OSError:
pass
# copy json files to preprocessed data folder
# this allows these parameters to be updated and synced across subjects by changing only the raw data files.
copyfile(os.path.join(raw_data_dir, 'acquisition_parameters.json'), os.path.join(preprocessed_data_dir, 'acquisition_parameters.json'), copy = True)
copyfile(os.path.join(raw_data_dir, 'analysis_parameters.json'), os.path.join(preprocessed_data_dir, 'analysis_parameters.json'), copy = True)
copyfile(os.path.join(raw_data_dir, sub_id ,'experimental_parameters.json'), os.path.join(preprocessed_data_dir, sub_id ,'experimental_parameters.json'), copy = True)
if preprocess:
# the actual workflow
all_calcarine_reward_workflow = create_all_calcarine_reward_preprocessing_workflow(analysis_info, name = 'all_calcarine_reward')
# standard in/output variables
all_calcarine_reward_workflow.inputs.inputspec.raw_directory = raw_data_dir
all_calcarine_reward_workflow.inputs.inputspec.sub_id = sub_id
all_calcarine_reward_workflow.inputs.inputspec.sess_id = sess_id
all_calcarine_reward_workflow.inputs.inputspec.output_directory = opd
all_calcarine_reward_workflow.inputs.inputspec.psc_func = analysis_info['psc_func']
# to what file do we motion correct?
all_calcarine_reward_workflow.inputs.inputspec.which_file_is_EPI_space = analysis_info['which_file_is_EPI_space']
# registration details
all_calcarine_reward_workflow.inputs.inputspec.FS_ID = FS_ID
all_calcarine_reward_workflow.inputs.inputspec.FS_subject_dir = FS_subject_dir
all_calcarine_reward_workflow.inputs.inputspec.standard_file = op.join(os.environ['FSL_DIR'], 'data/standard/MNI152_T1_1mm_brain.nii.gz')
# all the input variables for retroicor functionality
# the key 'retroicor_order_or_timing' determines whether slice timing
# or order is used for regressor creation
all_calcarine_reward_workflow.inputs.inputspec.MB_factor = acquisition_parameters['MultiBandFactor']
all_calcarine_reward_workflow.inputs.inputspec.nr_dummies = acquisition_parameters['NumberDummyScans']
all_calcarine_reward_workflow.inputs.inputspec.tr = acquisition_parameters['RepetitionTime']
all_calcarine_reward_workflow.inputs.inputspec.slice_direction = acquisition_parameters['SliceDirection']
all_calcarine_reward_workflow.inputs.inputspec.phys_sample_rate = acquisition_parameters['PhysiologySampleRate']
all_calcarine_reward_workflow.inputs.inputspec.slice_timing = acquisition_parameters['SliceTiming']
all_calcarine_reward_workflow.inputs.inputspec.slice_order = acquisition_parameters['SliceOrder']
all_calcarine_reward_workflow.inputs.inputspec.acceleration = acquisition_parameters['SenseFactor']
all_calcarine_reward_workflow.inputs.inputspec.epi_factor = acquisition_parameters['EpiFactor']
all_calcarine_reward_workflow.inputs.inputspec.wfs = acquisition_parameters['WaterFatShift']
all_calcarine_reward_workflow.inputs.inputspec.te_diff = acquisition_parameters['EchoTimeDifference']
# write out the graph and run
all_calcarine_reward_workflow.write_graph(opd + '.svg', format='svg', graph2use='colored', simple_form=False)
all_calcarine_reward_workflow.run('MultiProc', plugin_args={'n_procs': 24})
# all_calcarine_reward_workflow.run()
if GLM:
glm_wf = create_whole_brain_GLM_workflow(analysis_info)
glm_wf.inputs.inputspec.sub_id = sub_id
glm_wf.inputs.inputspec.preprocessed_directory = preprocessed_data_dir
glm_wf.write_graph(opd + '_GLM.svg', format='svg', graph2use='colored', simple_form=False)
glm_wf.run('MultiProc', plugin_args={'n_procs': 6})
if mask:
n2h = create_all_calcarine_reward_2_h5_workflow(analysis_info, name='all_calcarine_reward_nii_2_h5')
# standard in/output variables
n2h.inputs.inputspec.preprocessed_data_dir = preprocessed_data_dir
n2h.inputs.inputspec.sub_id = sub_id
n2h.write_graph(opd + '_h5.svg', format='svg', graph2use='colored', simple_form=False)
n2h.run()
|
At an Apple Event on Monday, Apple’s CEO Tim Cook revealed the different models and prices for its new Apple Watch, which will be available in stores on April 24. 2015.
The Apple Watch seems to be an extension of the iPhone . Yes, you do need to own an iPhone and have that iPhone turned on and in proximity to the watch, for it to work. The Apple Watch is designed for quick interaction with apps that iPhone users are already familiar with, such as Phone, Messages, Mail, Maps, Calendar, Weather, Siri and others.
The Apple Watch itself has the same hardware/software features and functionality. The huge differences in pricing is due to the case and band material. This leads one to think that Apple is moving in the direction of becoming a jewelry business. Expect an announcement of an Apple Pendant at the next Keynote event!
|
'''
Created on Feb 15, 2013
@author: eric
'''
from collections import namedtuple
import random
from osgeo import gdal, ogr
BoundingBox = namedtuple('BoundingBox', ['min_x', 'min_y','max_x', 'max_y'])
def extents(database, table_name, where=None, lat_col='_db_lat', lon_col='_db_lon'):
'''Return the bounding box for a table in the database. The partition must specify
a table
'''
# Find the extents of the data and figure out the offsets for the array.
e= database.connection.execute
if where:
where = "WHERE "+where
else:
where = ''
r = e("""SELECT min({lon}) as min_x, min({lat}) as min_y,
max({lon}) as max_x, max({lat}) as max_y from {table} {where}"""
.format(lat=lat_col, lon=lon_col, table=table_name, where=where)
).first()
# Convert to a regular tuple
o = BoundingBox(r[0], r[1],r[2],r[3])
return o
#From http://danieljlewis.org/files/2010/06/Jenks.pdf
#
# !!!! Use psal instead!
# !!!! http://pysal.geodacenter.org/1.2/library/esda/mapclassify.html#pysal.esda.mapclassify.Natural_Breaks
#
def jenks_breaks(dataList, numClass):
dataList.sort()
print "A"
mat1 = []
for i in range(0, len(dataList) + 1):
temp = []
for j in range(0, numClass + 1):
temp.append(0)
mat1.append(temp)
print "B"
mat2 = []
for i in range(0, len(dataList) + 1):
temp = []
for j in range(0, numClass + 1):
temp.append(0)
mat2.append(temp)
print "C"
for i in range(1, numClass + 1):
mat1[1][i] = 1
mat2[1][i] = 0
for j in range(2, len(dataList) + 1):
mat2[j][i] = float('inf')
print "D"
v = 0.0
# # iterations = datalist * .5*datalist * Numclass
for l in range(2, len(dataList) + 1):
s1 = 0.0
s2 = 0.0
w = 0.0
for m in range(1, l + 1):
i3 = l - m + 1
val = float(dataList[i3 - 1])
s2 += val * val
s1 += val
w += 1
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, numClass + 1):
if mat2[l][j] >= (v + mat2[i4][j - 1]):
mat1[l][j] = i3
mat2[l][j] = v + mat2[i4][j - 1]
mat1[l][1] = 1
mat2[l][1] = v
k = len(dataList)
kclass = []
print "E"
for i in range(0, numClass + 1):
kclass.append(0)
kclass[numClass] = float(dataList[len(dataList) - 1])
countNum = numClass
print 'F'
while countNum >= 2:
#print "rank = " + str(mat1[k][countNum])
id_ = int((mat1[k][countNum]) - 2)
#print "val = " + str(dataList[id])
kclass[countNum - 1] = dataList[id_]
k = int((mat1[k][countNum] - 1))
countNum -= 1
return kclass
def getGVF( dataList, numClass ):
""" The Goodness of Variance Fit (GVF) is found by taking the
difference between the squared deviations from the array mean (SDAM)
and the squared deviations from the class means (SDCM), and dividing by the SDAM
"""
breaks = jenks_breaks(dataList, numClass)
dataList.sort()
listMean = sum(dataList)/len(dataList)
print listMean
SDAM = 0.0
for i in range(0,len(dataList)):
sqDev = (dataList[i] - listMean)**2
SDAM += sqDev
SDCM = 0.0
for i in range(0,numClass):
if breaks[i] == 0:
classStart = 0
else:
classStart = dataList.index(breaks[i])
classStart += 1
classEnd = dataList.index(breaks[i+1])
classList = dataList[classStart:classEnd+1]
classMean = sum(classList)/len(classList)
print classMean
preSDCM = 0.0
for j in range(0,len(classList)):
sqDev2 = (classList[j] - classMean)**2
preSDCM += sqDev2
SDCM += preSDCM
return (SDAM - SDCM)/SDAM
def rasterize(pixel_size=25):
# Open the data source
RASTERIZE_COLOR_FIELD = "__color__"
orig_data_source = ogr.Open("test.shp")
# Make a copy of the layer's data source because we'll need to
# modify its attributes table
source_ds = ogr.GetDriverByName("Memory").CopyDataSource(orig_data_source, "")
source_layer = source_ds.GetLayer(0)
source_srs = source_layer.GetSpatialRef()
x_min, x_max, y_min, y_max = source_layer.GetExtent()
# Create a field in the source layer to hold the features colors
field_def = ogr.FieldDefn(RASTERIZE_COLOR_FIELD, ogr.OFTReal)
source_layer.CreateField(field_def)
source_layer_def = source_layer.GetLayerDefn()
field_index = source_layer_def.GetFieldIndex(RASTERIZE_COLOR_FIELD)
# Generate random values for the color field (it's here that the value
# of the attribute should be used, but you get the idea)
for feature in source_layer:
feature.SetField(field_index, random.randint(0, 255))
source_layer.SetFeature(feature)
# Create the destination data source
x_res = int((x_max - x_min) / pixel_size)
y_res = int((y_max - y_min) / pixel_size)
target_ds = gdal.GetDriverByName('GTiff').Create('test.tif', x_res,
y_res, 3, gdal.GDT_Byte)
target_ds.SetGeoTransform(( x_min, pixel_size, 0, y_max, 0, -pixel_size,))
if source_srs:
# Make the target raster have the same projection as the source
target_ds.SetProjection(source_srs.ExportToWkt())
else:
# Source has no projection (needs GDAL >= 1.7.0 to work)
target_ds.SetProjection('LOCAL_CS["arbitrary"]')
# Rasterize
err = gdal.RasterizeLayer(target_ds, (3, 2, 1), source_layer,
burn_values=(0, 0, 0),
options=["ATTRIBUTE=%s" % RASTERIZE_COLOR_FIELD])
if err != 0:
raise Exception("error rasterizing layer: %s" % err)
|
Welcome to the 2018 Phenom Player Payment Portal!
This registration session will allow you to complete contact information and submit payment. Please read through each page carefully to ensure that all information is provided accurately. To complete registration through our secure site, please have your Visa, MasterCard, Discover, or checking account information available. Following the completion of this registration, you will see a charge on your account from CL Cardinals BBall.
|
# Natural Language Toolkit - K nearest neighbour classifier
#
# Author: Sumukh Ghodke <sumukh dot ghodke at gmail dot com>
#
# URL: <http://nltk.sf.net>
# This software is distributed under GPL, for license information see LICENSE.TXT
from nltk_contrib.classifier import instances as ins, Classifier, distancemetric as dm
from nltk import probability as prob
class IB1(Classifier):
def __init__(self, training, attributes, klass):
Classifier.__init__(self, training, attributes, klass)
def classify(self, instances):
for each_test in instances:
id = InstanceDistances()
for each_training in self.training:
dist = dm.euclidean_distance(each_test, each_training, self.attributes)
id.distance(dist, each_training)
each_test.classified_klass = id.klass(majority_klass_vote)
@classmethod
def can_handle_continuous_attributes(self):
return True
def is_trained(self):
return True
class InstanceDistances:
"""
Maps instances to the distance they are from a common test_instance
"""
def __init__(self):
self.distances = {}
def distance(self, value, instance):
if value in self.distances:
self.distances[value].append(instance)
else:
self.distances[value] = [instance]
def minimum_distance_instances(self):
keys = self.distances.keys()
keys.sort()
return self.distances[keys[0]]
def klass(self, strategy):
return strategy(self.minimum_distance_instances())
def majority_klass_vote(instances):
fd = prob.FreqDist()
for each in instances:
fd.inc(each.klass_value)
return fd.max()
|
This video is fascinating. It is about a full-scale Uber-based economy. It lasts two minutes. There are more videos. Click the link.
We are told that China’s government has found a way to overcome the business cycle, create jobs, and avoid inflation. If only it could get someone to move here. Or here.
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'edurepo.settings')
import sys
sys.path.append('.')
import django
from repo.models import Course, LearningObjective
def delete_learning_objective(lo, delete, noisy=True):
if noisy:
print 'Deleting objective %s...' % lo
if delete:
lo.delete()
return 0
def delete_course(course, delete=False, noisy=True):
courses = Course.objects.filter(id=course)
assert courses, "Course %s is not in the system" % course
if noisy:
print 'Deleting course %s...' % courses[0]
learning_objectives = LearningObjective.objects.filter(course=courses[0])
for lo in learning_objectives:
rc = delete_learning_objective(lo, delete, noisy=noisy)
if rc:
return rc
if delete:
courses[0].delete()
return 0
def process(args):
if len(args) != 2:
print >> sys.stderr, "Usage: %s course check-or-delete" % sys.argv[0]
return 1
course = args[0]
mode = args[1]
assert mode == 'check' or mode == 'delete'
delete = mode == 'delete'
return delete_course(course, delete=delete)
if __name__ == '__main__':
django.setup()
sys.exit(process(sys.argv[1:]))
|
See statistics and status information for mainline and UMC translations, along with downloads of the text catalog files.
Failing to contact translation coordinator.
Korean translation moderator does not responding.
New logo translations: Speakers of ALL languages needed!
|
#!/usr/bin/env python3
import functools as ft
from rdflib import BNode
from quit.namespace import FOAF, PROV, QUIT
class Blame(object):
"""
Reusable Blame object for web client
"""
def __init__(self, quit):
self.quit = quit
def _generate_values(self, quads):
result = list()
for quad in quads:
(s, p, o, c) = quad
c.rewrite = True
# Todo: BNodes in VALUES are not supported by specification? Using UNDEF for now
_s = 'UNDEF' if isinstance(s, BNode) else s.n3()
_p = 'UNDEF' if isinstance(p, BNode) else p.n3()
_o = 'UNDEF' if isinstance(o, BNode) else o.n3()
_c = 'UNDEF' if isinstance(c, BNode) else c.identifier.n3()
c.rewrite = False
result.append((_s, _p, _o, _c))
return result
def run(self, quads=None, branch_or_ref='master'):
"""
Annotated every quad with the respective author
Args:
querystring: A string containing a SPARQL ask or select query.
Returns:
The SPARQL result set
"""
commit = self.quit.repository.revision(branch_or_ref)
g, commitid = self.quit.instance(branch_or_ref)
quads = [x for x in g.store.quads((None, None, None))]
if len(quads) == 0:
return []
values = self._generate_values(quads)
values_string = ft.reduce(lambda acc, quad: acc + '( %s %s %s %s )\n' % quad, values, '')
q = """
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX aksw: <http://aksw.org/>
PREFIX quit: <http://quit.aksw.org/vocab/>
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?s ?p ?o ?context ?hex ?name ?email ?date WHERE {
?commit quit:preceedingCommit* ?c .
?c prov:endedAtTime ?date ;
prov:qualifiedAssociation ?qa ;
quit:updates ?update ;
quit:hex ?hex .
?qa prov:agent ?user ;
prov:hadRole quit:Author .
?user foaf:mbox ?email ;
rdfs:label ?name .
?update quit:graph ?context ;
quit:additions ?additions .
GRAPH ?additions {
?s ?p ?o
}
FILTER NOT EXISTS {
?y quit:preceedingCommit+ ?z .
?z quit:updates ?update2 .
?update2 quit:graph ?g ;
quit:removals ?removals .
GRAPH ?removals {
?s ?p ?o
}
}
VALUES (?s ?p ?o ?context) {
%s
}
}
""" % values_string
return self.quit.store.store.query(
q,
initNs={'foaf': FOAF, 'prov': PROV, 'quit': QUIT},
initBindings={'commit': QUIT['commit-' + commit.id]}
)
|
She was a waitress at Savoy Hotel, Perth, Western Australia, AustraliaG.1 From before 1953, her married name became O'Brien.1 From 3 December 1953, her married name became Fiennes-Clinton.1 After her marriage, Linda Alice Creed was styled as Countess of Lincoln on 25 December 1988.
John James Millen lived at Kalgoorlie, Western Australia, AustraliaG.
Sam George Grimston is the son of John Duncan Grimston, 7th Earl of Verulam and Dione Angela Smith.
Marjorie Ray Duncan is the daughter of Walter Atholl Duncan. She married John Grimston, 6th Earl of Verulam, son of James Walter Grimston, 4th Earl of Verulam and Lady Violet Constance Maitland Brabazon, on 2 June 1938.
[S6] G.E. Cokayne; with Vicary Gibbs, H.A. Doubleday, Geoffrey H. White, Duncan Warrand and Lord Howard de Walden, editors, The Complete Peerage of England, Scotland, Ireland, Great Britain and the United Kingdom, Extant, Extinct or Dormant, new ed., 13 volumes in 14 (1910-1959; reprint in 6 volumes, Gloucester, U.K.: Alan Sutton Publishing, 2000), volume VIII, page 317. Hereinafter cited as The Complete Peerage.
|
# -*- coding: utf-8 -*-
#
# 2016-05-04 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Initial writup
#
# License: AGPLv3
# (c) 2016. Cornelius Kölbel
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from privacyidea.models import EventHandler, EventHandlerOption, db
from privacyidea.lib.error import ParameterError
from privacyidea.lib.audit import getAudit
import functools
import logging
log = logging.getLogger(__name__)
AVAILABLE_EVENTS = []
class event(object):
"""
This is the event decorator that calls the event handler in the handler
module. This event decorator can be used at any API call
"""
def __init__(self, eventname, request, g):
self.eventname = eventname
if not eventname in AVAILABLE_EVENTS:
AVAILABLE_EVENTS.append(eventname)
self.request = request
self.g = g
def __call__(self, func):
"""
Returns a wrapper that wraps func.
The wrapper will evaluate the event handling definitions and call the
defined action.
:param func: The function that is decorated
:return: function
"""
@functools.wraps(func)
def event_wrapper(*args, **kwds):
# here we have to evaluate the event configuration from the
# DB table eventhandler and based on the self.eventname etc...
# TODO: do Pre-Event Handling
f_result = func(*args, **kwds)
# Post-Event Handling
e_handles = self.g.event_config.get_handled_events(self.eventname)
for e_handler_def in e_handles:
log.debug("Handling event {eventname} with "
"{eventDef}".format(eventname=self.eventname,
eventDef=e_handler_def))
event_handler_name = e_handler_def.get("handlermodule")
event_handler = get_handler_object(event_handler_name)
# The "action is determined by the event configuration
# In the options we can pass the mailserver configuration
options = {"request": self.request,
"g": self.g,
"response": f_result,
"handler_def": e_handler_def}
if event_handler.check_condition(options=options):
log.debug("Handling event {eventname} with options"
"{options}".format(eventname=self.eventname,
options=options))
# create a new audit object
event_audit = getAudit(self.g.audit_object.config)
# copy all values from the originial audit entry
event_audit_data = dict(self.g.audit_object.audit_data)
event_audit_data["action"] = "EVENT {trigger}>>" \
"{handler}:{action}".format(
trigger=self.eventname,
handler=e_handler_def.get("handlermodule"),
action=e_handler_def.get("action"))
event_audit_data["action_detail"] = "{0!s}".format(
e_handler_def.get("options"))
event_audit_data["info"] = e_handler_def.get("name")
event_audit.log(event_audit_data)
event_handler.do(e_handler_def.get("action"),
options=options)
# set audit object to success
event_audit.log({"success": True})
event_audit.finalize_log()
return f_result
return event_wrapper
def get_handler_object(handlername):
"""
Return an event handler object based on the Name of the event handler class
:param handlername: The identifier of the Handler Class
:type hanldername: basestring
:return:
"""
# TODO: beautify and make this work with several different handlers
from privacyidea.lib.eventhandler.usernotification import \
UserNotificationEventHandler
from privacyidea.lib.eventhandler.tokenhandler import TokenEventHandler
from privacyidea.lib.eventhandler.scripthandler import ScriptEventHandler
h_obj = None
if handlername == "UserNotification":
h_obj = UserNotificationEventHandler()
if handlername == "Token":
h_obj = TokenEventHandler()
if handlername == "Script":
h_obj = ScriptEventHandler()
return h_obj
def enable_event(event_id, enable=True):
"""
Enable or disable the and event
:param event_id: ID of the event
:return:
"""
ev = EventHandler.query.filter_by(id=event_id).first()
if not ev:
raise ParameterError("The event with id '{0!s}' does not "
"exist".format(event_id))
# Update the event
ev.active = enable
r = ev.save()
return r
def set_event(name, event, handlermodule, action, conditions=None,
ordering=0, options=None, id=None, active=True):
"""
Set an event handling configuration. This writes an entry to the
database eventhandler.
:param name: The name of the event definition
:param event: The name of the event to react on. Can be a single event or
a comma separated list.
:type event: basestring
:param handlermodule: The identifier of the event handler module. This is
an identifier string like "UserNotification"
:type handlermodule: basestring
:param action: The action to perform. This is an action defined by the
handler module
:type action: basestring
:param conditions: A condition. Only if this condition is met, the action is
performed.
:type conditions: dict
:param ordering: An optional ordering of the event definitions.
:type ordering: integer
:param options: Additional options, that are needed as parameters for the
action
:type options: dict
:param id: The DB id of the event. If the id is given, the event is
updated. Otherwise a new entry is generated.
:type id: int
:return: The id of the event.
"""
conditions = conditions or {}
if id:
id = int(id)
event = EventHandler(name, event, handlermodule, action,
conditions=conditions, ordering=ordering,
options=options, id=id, active=active)
return event.id
def delete_event(event_id):
"""
Delete the event configuration with this given ID.
:param event_id: The database ID of the event.
:type event_id: int
:return:
"""
event_id = int(event_id)
ev = EventHandler.query.filter_by(id=event_id).first()
r = ev.delete()
return r
class EventConfiguration(object):
"""
This class is supposed to contain the event handling configuration during
the Request. It can be read initially (in the init method) an can be
accessed later during the request.
"""
def __init__(self):
self.eventlist = []
self._read_events()
@property
def events(self):
return self.eventlist
def get_handled_events(self, eventname):
"""
Return a list of the event handling definitions for the given eventname
:param eventname:
:return:
"""
eventlist = [e for e in self.eventlist if (
eventname in e.get("event") and e.get("active"))]
return eventlist
def get_event(self, eventid):
"""
Return the reduced list with the given eventid. This list should only
have one element.
:param eventid: id of the event
:type eventid: int
:return: list with one element
"""
if eventid is not None:
eventid = int(eventid)
eventlist = [e for e in self.eventlist if e.get("id") == eventid]
return eventlist
else:
return self.eventlist
def _read_events(self):
q = EventHandler.query.order_by(EventHandler.ordering)
for e in q:
self.eventlist.append(e.get())
|
My Brother Les with another day's cleanup. There was a little seam along the lag line that was a consistent producer of heavy gold. What I call the lag line is where the subsequent erosion and the incisor events concentrated the gold. As a stream placer is forming from ancient continental environments, Many of these deposits experience high energy events.
Streams fan out as gradients decrease, heavy minerals have a tendency to collect on these low energy areas. As the gradient increases, the stream is continuously attacking the bedrock and as it starts eroding, the settled rock, sand and the concentrated mineral, starts on the move as the water cuts another stream incisor. This causes in many cases the classic debris flow many miners observe in their mining operations.
|
from datetime import datetime
from uuid import UUID
import json
from minion.renderers import JSON
from minion.request import Response
from minion.traversal import LeafResource, TreeResource
from sqlalchemy import String, select
from sqlalchemy.sql.expression import cast
import attr
from great.models import music
from great.models.core import ModelManager, NotFound
def _uuid_to_str(obj):
if isinstance(obj, UUID):
return obj.hex
raise TypeError("{!r} is not JSON serializable".format(obj))
@attr.s
class ModelResource(object):
manager = attr.ib()
from_detail_json = attr.ib(default=json.load)
for_detail_json = attr.ib(default=lambda model: model)
renderer = JSON(default=_uuid_to_str)
def get_child(self, name, request):
if not name:
return self
elif name == "tracked":
# FIXME
query = self.manager.db.execute(
select(self.manager._basic_fields).where(
self.manager.table.c.tracked,
),
)
return LeafResource(
render=lambda request: self.renderer.render(
jsonable=[dict(each) for each in query.fetchall()],
request=request,
),
)
id = int(name)
def render_detail(request):
try:
content = self.for_detail_json(self.manager.detail(id=id))
except NotFound:
return Response(code=404)
return self.renderer.render(jsonable=content, request=request)
return LeafResource(render=render_detail)
def render(self, request):
if request.method == b"GET":
fields = [
field
for raw in request.url.get(b"fields")
for field in raw.rstrip(b",").split(b",")
]
content = self.manager.list(
fields=fields,
)
elif request.method == b"POST":
try:
new = self.from_detail_json(request.content)
except ValueError:
return Response(code=400)
content = self.for_detail_json(self.manager.create(**new))
elif request.method == b"DELETE":
self.manager.delete(id=json.load(request.content)[u"id"])
return Response(code=204)
else:
return Response(code=405)
return self.renderer.render(jsonable=content, request=request)
def init_app(bin, root):
music_resource = TreeResource()
db = bin.provide("engine").connect()
for table, detail_columns, from_detail_json, for_detail_json in (
(
music.albums,
[
music.albums.c.comments,
music.albums.c.compilation,
music.albums.c.live,
cast(music.albums.c.mbid, String).label("mbid"),
music.albums.c.pinned,
music.albums.c.rating,
music.albums.c.release_date,
music.albums.c.type,
],
_album_from_json,
_album_for_json,
),
(
music.artists,
[
music.artists.c.comments,
cast(music.artists.c.created_at, String).label("created_at"),
cast(music.artists.c.mbid, String).label("mbid"),
cast(music.artists.c.modified_at, String).label("modified_at"),
music.artists.c.pinned,
music.artists.c.rating,
],
json.load,
lambda artist: artist,
),
):
music_resource.set_child(
name=table.name,
resource=ModelResource(
from_detail_json=from_detail_json,
for_detail_json=for_detail_json,
manager=ModelManager(
db=db,
table=table,
detail_columns=detail_columns,
),
)
)
root.set_child("music", music_resource)
def _album_from_json(detail):
album = json.load(detail)
release_date = album.get(u"release_date")
if release_date is not None:
album[u"release_date"] = datetime.strptime(
release_date, "%Y-%m-%d"
).date()
return album
def _album_for_json(album):
release_date = album.get(u"release_date")
if release_date is not None:
album[u"release_date"] = release_date.strftime("%Y-%m-%d")
return album
|
Traveling from I-15, take Exit 265 Center Street and head east.
Proceed to 300 West and turn left, heading north.
Liberty Center will be on your left.
|
#====================== BEGIN GPL LICENSE BLOCK ======================
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#======================= END GPL LICENSE BLOCK ========================
# <pep8 compliant>
import bpy
from ...utils import copy_bone
from ...utils import strip_org, make_deformer_name
from ...utils import create_bone_widget
class Rig:
""" A "copy" rig. All it does is duplicate the original bone and
constrain it.
This is a control and deformation rig.
"""
def __init__(self, obj, bone, params):
""" Gather and validate data about the rig.
"""
self.obj = obj
self.org_bone = bone
self.org_name = strip_org(bone)
self.params = params
self.make_control = params.make_control
self.make_deform = params.make_deform
def generate(self):
""" Generate the rig.
Do NOT modify any of the original bones, except for adding constraints.
The main armature should be selected and active before this is called.
"""
bpy.ops.object.mode_set(mode='EDIT')
# Make a control bone (copy of original).
if self.make_control:
bone = copy_bone(self.obj, self.org_bone, self.org_name)
# Make a deformation bone (copy of original, child of original).
if self.make_deform:
def_bone = copy_bone(self.obj, self.org_bone, make_deformer_name(self.org_name))
# Get edit bones
eb = self.obj.data.edit_bones
# UNUSED
# if self.make_control:
# bone_e = eb[bone]
if self.make_deform:
def_bone_e = eb[def_bone]
# Parent
if self.make_deform:
def_bone_e.use_connect = False
def_bone_e.parent = eb[self.org_bone]
bpy.ops.object.mode_set(mode='OBJECT')
pb = self.obj.pose.bones
if self.make_control:
# Constrain the original bone.
con = pb[self.org_bone].constraints.new('COPY_TRANSFORMS')
con.name = "copy_transforms"
con.target = self.obj
con.subtarget = bone
# Create control widget
create_bone_widget(self.obj, bone)
def add_parameters(params):
""" Add the parameters of this rig type to the
RigifyParameters PropertyGroup
"""
params.make_control = bpy.props.BoolProperty(name="Control", default=True, description="Create a control bone for the copy")
params.make_deform = bpy.props.BoolProperty(name="Deform", default=True, description="Create a deform bone for the copy")
def parameters_ui(layout, params):
""" Create the ui for the rig parameters.
"""
r = layout.row()
r.prop(params, "make_control")
r = layout.row()
r.prop(params, "make_deform")
def create_sample(obj):
""" Create a sample metarig for this rig type.
"""
# generated by rigify.utils.write_metarig
bpy.ops.object.mode_set(mode='EDIT')
arm = obj.data
bones = {}
bone = arm.edit_bones.new('Bone')
bone.head[:] = 0.0000, 0.0000, 0.0000
bone.tail[:] = 0.0000, 0.0000, 0.2000
bone.roll = 0.0000
bone.use_connect = False
bones['Bone'] = bone.name
bpy.ops.object.mode_set(mode='OBJECT')
pbone = obj.pose.bones[bones['Bone']]
pbone.rigify_type = 'basic.copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
bpy.ops.object.mode_set(mode='EDIT')
for bone in arm.edit_bones:
bone.select = False
bone.select_head = False
bone.select_tail = False
for b in bones:
bone = arm.edit_bones[bones[b]]
bone.select = True
bone.select_head = True
bone.select_tail = True
arm.edit_bones.active = bone
|
LOCAL construction companies were furious last night at being “excluded” from the bidding for £250m in school building projects in North and Mid Wales.
A consortium of councils in Gwynedd, Ceredigion and Powys awarded the “framework” status for small and major projects to just six contractors with headquarters in England.
Three of those, Bam, Carillion and Laing O’Rourke will be favoured for the major projects priced at £30m-£100m each at part of the flagship 21st century schools programme by the Welsh Assembly Government for the next four years.
Chris Wynne, the managing director of Wynne Construction in Denbighshire, warned this system of “bundling” contracts was threatening the future of North Wales-based construction firms.
His 80-year-old business recently won a £9.3m contract for a replacement school at Ysgol yr Hendre in Caernarfon. It followed other contracts across North Wales worth £36m in the past two years on schools in Flint, Llangefni, Colwyn Bay, Holyhead and Pwllheli.
It employs around 150 people and it was local companies like his which engaged in training their workforce and taking on the apprentices vital to the future of the industry, he said.
But he warned: “These procurement strategies are spreading across local authorities and within the Welsh health estate and hospitals.
In a letter to economy minister Ieuan Wyn Jones yesterday, he said: “Clearly you do not appreciate the scale of the impact of these procurement strategies and awards will have on the indigenous Welsh companies.
“When these large non-Welsh framework contractors pack up and leave there will be no sustainable legacy in terms of training or long term employment.
Glyn Watkin Jones, chairman of the Bangor-based Watkin Jones construction company, which employs 350 people, said: “Whatever money these big companies make out of these projects goes out of Wales.
“The Assembly Government should be supporting local construction companies.
Plaid Cymru AM for Aberconwy Gareth Jones feared that such huge public spending on building projects wasn’t working on behalf of the economy and businesses of North Wales.
|
"""
Django settings for moebox project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
from .config import *
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g#ouo6a&jt%lyl*=2rhu7ajzezl)rtff7ha!%s84^f@#^=&!d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'moebox',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'moebox.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hello.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'd:/xampp/htdocs_root'
|
Almost two years ago, an Air France, Airbus jet disappeared over the Atlantic, while flying from Rio de Janeiro to Paris. Everyone on board perished. Recently, a search team led by the Woods Hole Oceanographic Institution on Cape Cod, said it had located debris from the plane. I spoke with WHOI Special Projects Director David Gallo, about his team’s efforts. He also offered some remarks on the issue of preparedness for future, deep sea accidents.
|
from binascii import hexlify
import logging
from pycoin.tx import Tx
from models import unprocessed_txs, txs, all_addresses, addr_to_uid, Account, known_txs, exchange_rate, nodes_recently_updated
from constants import COIN
from digitalocean_custom import calc_node_minutes
def hash_to_hex(h):
return hexlify(h[::-1])
def process_tx_initial(tx_obj: Tx):
found_relevant_address = False
for out in tx_obj.txs_out:
address = out.bitcoin_address()
if address in all_addresses:
found_relevant_address = True
break
if not found_relevant_address:
logging.info('Found irrelevant tx %s' % hash_to_hex(tx_obj.hash()))
return
tx_hash = tx_obj.hash()
txid = hash_to_hex(tx_hash).decode()
if tx_hash in known_txs:
return
known_txs.add(tx_hash)
txs[tx_hash] = tx_obj.as_hex()
for out in tx_obj.txs_out:
address = out.bitcoin_address()
if address in all_addresses and address is not None:
unprocessed_txs.add(tx_hash)
uid = addr_to_uid[address]
account = Account(uid)
account.txs.add(tx_hash)
account.unconf_minutes.incr(calc_node_minutes(satoshi_amount=out.coin_value, exchange_rate=exchange_rate.get()))
account.add_msg('Found tx for %.08f, %s' % (out.coin_value / COIN, txid))
nodes_recently_updated.append(account.uid)
|
A classic C.C. beanie with four thread tones. Add some color to your favorite looks!
Matches our Four Tone Gloves and Scarf.
|
from __future__ import print_function
import os
import numpy as np
from six import string_types, iteritems
from pyNastran.bdf.mesh_utils.bdf_renumber import bdf_renumber
from pyNastran.bdf.bdf import BDF
def bdf_merge(bdf_filenames, bdf_filename_out=None, renumber=True, encoding=None,
size=8, is_double=False, cards_to_skip=None, log=None):
"""
Merges multiple BDF into one file
Parameters
----------
bdf_filenames : List[str]
list of bdf filenames
bdf_filename_out : str / None
the output bdf filename (default=None; None -> no writing)
renumber : bool
should the bdf be renumbered (default=True)
encoding : str
the unicode encoding (default=None; system default)
size : int; {8, 16}; default=8
the bdf write precision
is_double : bool; default=False
the field precision to write
cards_to_skip : List[str]; (default=None -> don't skip any cards)
There are edge cases (e.g. FLUTTER analysis) where things can break due to
uncross-referenced cards. You need to disable entire classes of cards in
that case (e.g. all aero cards).
Supports
--------
nodes: GRID
coords: CORDx
elements: CQUAD4, CTRIA3, CTETRA, CPENTA, CHEXA, CELASx, CBAR, CBEAM
CONM1, CONM2, CMASS
properties: PSHELL, PCOMP, PSOLID, PMASS
materials: MAT1, MAT8
.. todo:: doesn't support SPOINTs/EPOINTs
.. warning:: still very preliminary
"""
if not isinstance(bdf_filenames, (list, tuple)):
raise TypeError('bdf_filenames is not a list/tuple...%s' % str(bdf_filenames))
if not len(bdf_filenames) > 1:
raise RuntimeError("You can't merge one BDF...bdf_filenames=%s" % str(bdf_filenames))
for bdf_filename in bdf_filenames:
if not isinstance(bdf_filename, string_types):
raise TypeError('bdf_filenames is not a string...%s' % bdf_filename)
#bdf_filenames = [bdf_filenames]
#starting_id_dict_default = {
#'cid' : max(model.coords.keys()),
#'nid' : max(model.nodes.keys()),
#'eid' : max([
#max(model.elements.keys()),
#max(model.masses.keys()),
#]),
#'pid' : max([
#max(model.properties.keys()),
#max(model.properties_mass.keys()),
#]),
#'mid' : max(model.material_ids),
#}
model = BDF(debug=False, log=log)
model.disable_cards(cards_to_skip)
bdf_filename0 = bdf_filenames[0]
model.read_bdf(bdf_filename0, encoding=encoding)
model.log.info('primary=%s' % bdf_filename0)
data_members = [
'coords', 'nodes', 'elements', 'masses', 'properties', 'properties_mass',
'materials',
]
for bdf_filename in bdf_filenames[1:]:
#model.log.info('model.masses = %s' % model.masses)
starting_id_dict = {
'cid' : max(model.coords.keys()) + 1,
'nid' : max(model.nodes.keys()) + 1,
'eid' : max([
max(model.elements.keys()),
0 if len(model.masses) == 0 else max(model.masses.keys()),
]) + 1,
'pid' : max([
max(model.properties.keys()),
0 if len(model.properties_mass) == 0 else max(model.properties_mass.keys()),
]) + 1,
'mid' : max(model.material_ids) + 1,
}
#for param, val in sorted(iteritems(starting_id_dict)):
#print(' %-3s %s' % (param, val))
model.log.info('secondary=%s' % bdf_filename)
model2 = BDF(debug=False)
model2.disable_cards(cards_to_skip)
bdf_dump = 'bdf_merge_temp.bdf'
#model2.read_bdf(bdf_filename, xref=False)
bdf_renumber(bdf_filename, bdf_dump, starting_id_dict=starting_id_dict,
size=size, is_double=is_double, cards_to_skip=cards_to_skip)
model2 = BDF(debug=False)
model2.disable_cards(cards_to_skip)
model2.read_bdf(bdf_dump)
os.remove(bdf_dump)
#model.log.info('model2.node_ids = %s' % np.array(model2.node_ids))
for data_member in data_members:
data1 = getattr(model, data_member)
data2 = getattr(model2, data_member)
if isinstance(data1, dict):
#model.log.info(' working on %s' % (data_member))
for key, value in iteritems(data2):
if data_member in 'coords' and key == 0:
continue
if isinstance(value, list):
raise NotImplementedError(type(value))
else:
assert key not in data1, key
data1[key] = value
#print(' %s' % key)
else:
raise NotImplementedError(type(data1))
#if bdf_filenames_out:
#model.write_bdf(bdf_filenames_out, size=size)
if renumber:
model.log.info('final renumber...')
starting_id_dict = {
'cid' : 1,
'nid' : 1,
'eid' : 1,
'pid' : 1,
'mid' : 1,
}
bdf_renumber(model, bdf_filename_out, starting_id_dict=starting_id_dict,
size=size, is_double=is_double, cards_to_skip=cards_to_skip)
elif bdf_filename_out:
model.write_bdf(out_filename=bdf_filename_out, encoding=None,
size=size, is_double=is_double,
interspersed=True,
enddata=None)
return model
|
This exhibit explores the world of railroad workers from the trains crews to the section workers to the workers in the "shops". This exhibit opens May 1st and runs through February 28, 2010. If you have any artifacts to loan for the exhibit or would like to help put it together, contact the Museum staff at 307/789-8248.
Press here to download a poster for your door to help us advertise.
|
"""
A numerically stable implementation of the logarithm of sums of exponentials.
"""
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <lucas@theis.io>'
__docformat__ = 'epytext'
from numpy import log, sum, exp, zeros, max, asarray, vectorize, inf, nan, squeeze, reshape
def logsumexp(x, ax=None):
"""
Computes the log of the sum of the exp of the entries in x in a numerically
stable way.
@type x: array_like
@param x: a list, array or matrix of numbers
@type ax: integer
@param ax: axis along which the sum is applied
@rtype: array
@return: an array containing the results
"""
if ax is None:
x_max = max(x, ax) - 1.
return x_max + log(sum(exp(x - x_max)))
else:
x_max_shape = list(x.shape)
x_max_shape[ax] = 1
x_max = asarray(max(x, ax)) - 1.
return x_max + log(sum(exp(x - x_max.reshape(x_max_shape)), ax))
def logmeanexp(x, ax=None):
"""
Computes the log of the mean of the exp of the entries in x in a numerically
stable way. Uses logsumexp.
@type x: array_like
@param x: a list, array or matrix of numbers
@type ax: integer
@param ax: axis along which the values are averaged
@rtype: array
@return: an array containing the results
"""
x = asarray(x)
n = x.size if ax is None else x.shape[ax]
return logsumexp(x, ax) - log(n)
|
Today thousands of Aucklanders came to the Queen Street at the place between Shortland St and Wyndham St. This place was made as the home of 40 models. They parade up the street showcasing the latest fashion.
The models have been wearing on-trend outfits from the Glassons, Huffer, Moochi, Max, Smith & Caughey’s and many more.
To summarise: 40 models, 44 designers and over 300 garments made their way down NZ’s Longest 125m Catwalk.
Visit us again for more photos of the New Zealand’s longest catwalk fashion show!
|
import load
import send
import imaplib
import email
import yaml
import re
from bs4 import BeautifulSoup
with open('../config.yaml') as f:
cf = yaml.safe_load(f)
def parse_text_message_from_email(msg):
'''
Gets the actual text sent to the
email address by parsing it out of the email body
'''
text = {}
text['sender'] = msg['from']
msg_body = ''
if msg.is_multipart():
for i,part in enumerate(msg.walk()):
if part.get_content_type() =='text/plain':
msg_body = part.get_payload(decode=True)
elif part.get_content_type() =='text/html':
msg_soup = BeautifulSoup(part.get_payload(decode=True))
else:
msg_body = msg.get_payload(decode=True)
if len(msg_body) == 0:
msg_body = msg_soup.find('body').text
msg_body.replace('\r','').replace('\n','')
text['message']=msg_body
return text
def parse_choices(choices_made):
'''
Takes a numbered list of choices and maps them
to the relevant search criteria.
'''
search_criteria='';
for choice in choices_made:
if choice == '1':
search_criteria='dishwasher&20philadelphia ' + search_criteria
if choice == '2':
search_criteria='warehouse&20philadelphia ' + search_criteria
if choice == '3':
search_criteria='cook&20philadelphia ' + search_criteria
return search_criteria
def read_mailbox_and_edit_users(M):
"""
Processes mail in order to add,edit, and remove users
"""
Session = load.bind_to_database(cf['postgres_username'],cf['postgres_password'],
cf['postgres_db'])
rv, data_num = M.search(None, "ALL")
if rv != 'OK':
print "No messages found!"
return
messages=[]
print str(len(data_num[0].split())) + " new messages found"
for num in data_num[0].split():
rv, data = M.fetch(num, '(RFC822)')
if rv != 'OK':
print "ERROR getting message", num
return
email_data = email.message_from_string(data[0][1])
text = parse_text_message_from_email(email_data)
choices_made = re.findall(r'\d+',text['message'])
if 'stop' in text['message'].lower():
if load.check_user(Session,text['sender']):
load.delete_user(Session,text['sender'])
send.send_text(cf['fromaddr'],cf['username'],cf['password'],text['sender'],
cf['stop_message'])
M.store(num , '+FLAGS', '\\Deleted') #This archives the message.
elif 'start' in text['message'].lower() or 'list' in text['message'].lower():
send.send_text(cf['fromaddr'],cf['username'],cf['password'],text['sender'],
cf['start_message'])
M.store(num , '+FLAGS', '\\Deleted') #This archives the message.
elif 'demo' in text['message'].lower() or 'list' in text['message'].lower():
send.send_text(cf['fromaddr'],cf['username'],cf['password'],text['sender'],
cf['demo_message'])
M.store(num , '+FLAGS', '\\Deleted') #This archives the message.
elif len(choices_made) > 0:
search_criteria = parse_choices(choices_made)
if len(search_criteria) > 0:
if load.check_user(Session,text['sender']):
load.edit_user(Session,text['sender'],search_criteria)
else:
load.insert_user(Session,'',text['sender'],'',search_criteria)
send.send_text(cf['fromaddr'],cf['username'],cf['password'],text['sender'],
str(choices_made) + '. ' + cf['chosen_message'])
M.store(num , '+FLAGS', '\\Deleted') #This archives the message.
|
The Chapman loose cover fits most standard A5 notebooks.
(Obviously one comes with this purchase to get you started).
Once thats full of ideas you can easily replace the notebook and carry on using this soft smooth cover.
Natural vegetable tanned Swedish leather cover.
The notebook has 120 natural off white lined pages.
LeadorDead logo debossed on page marker.
Width: 16cm Height: 23cm Depth: 1.5cm.
|
##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Boost, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Ward Poelmans (Ghent University)
@author: Petar Forai (IMP/IMBA)
@author: Luca Marsella (CSCS)
@author: Guilherme Peretti-Pezzi (CSCS)
@author: Joachim Hein (Lund University)
@author: Michele Dolfi (ETH Zurich)
@author: Simon Branford (University of Birmingham)
"""
from distutils.version import LooseVersion
import fileinput
import glob
import os
import re
import sys
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import ERROR
from easybuild.tools.filetools import apply_regex_substitutions, copy, mkdir, symlink, which, write_file
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import AARCH64, POWER, UNKNOWN
from easybuild.tools.systemtools import get_cpu_architecture, get_glibc_version, get_shared_lib_ext
class EB_Boost(EasyBlock):
"""Support for building Boost."""
def __init__(self, *args, **kwargs):
"""Initialize Boost-specific variables."""
super(EB_Boost, self).__init__(*args, **kwargs)
self.objdir = None
self.pyvers = []
if LooseVersion(self.version) >= LooseVersion("1.71.0"):
self.bjamcmd = 'b2'
else:
self.bjamcmd = 'bjam'
@staticmethod
def extra_options():
"""Add extra easyconfig parameters for Boost."""
extra_vars = {
'boost_mpi': [False, "Build mpi boost module", CUSTOM],
'boost_multi_thread': [None, "Build boost with multi-thread option (DEPRECATED)", CUSTOM],
'toolset': [None, "Toolset to use for Boost configuration ('--with-toolset' for bootstrap.sh)", CUSTOM],
'build_toolset': [None, "Toolset to use for Boost compilation "
"('toolset' for b2, default calculated from toolset)", CUSTOM],
'mpi_launcher': [None, "Launcher to use when running MPI regression tests", CUSTOM],
'only_python_bindings': [False, "Only install Boost.Python library providing Python bindings", CUSTOM],
'use_glibcxx11_abi': [None, "Use the GLIBCXX11 ABI", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def patch_step(self):
"""Patch Boost source code before building."""
super(EB_Boost, self).patch_step()
# TIME_UTC is also defined in recent glibc versions, so we need to rename it for old Boost versions (<= 1.49)
glibc_version = get_glibc_version()
old_glibc = glibc_version is not UNKNOWN and LooseVersion(glibc_version) > LooseVersion("2.15")
if old_glibc and LooseVersion(self.version) <= LooseVersion("1.49.0"):
self.log.info("Patching because the glibc version is too new")
files_to_patch = ["boost/thread/xtime.hpp"] + glob.glob("libs/interprocess/test/*.hpp")
files_to_patch += glob.glob("libs/spirit/classic/test/*.cpp") + glob.glob("libs/spirit/classic/test/*.inl")
for patchfile in files_to_patch:
try:
for line in fileinput.input("%s" % patchfile, inplace=1, backup='.orig'):
line = re.sub(r"TIME_UTC", r"TIME_UTC_", line)
sys.stdout.write(line)
except IOError as err:
raise EasyBuildError("Failed to patch %s: %s", patchfile, err)
def prepare_step(self, *args, **kwargs):
"""Prepare build environment."""
super(EB_Boost, self).prepare_step(*args, **kwargs)
# keep track of Python version(s) used during installation,
# so we can perform a complete sanity check
if get_software_root('Python'):
self.pyvers.append(get_software_version('Python'))
def configure_step(self):
"""Configure Boost build using custom tools"""
# mpi sanity check
if self.cfg['boost_mpi'] and not self.toolchain.options.get('usempi', None):
raise EasyBuildError("When enabling building boost_mpi, also enable the 'usempi' toolchain option.")
# create build directory (Boost doesn't like being built in source dir)
self.objdir = os.path.join(self.builddir, 'obj')
mkdir(self.objdir)
# generate config depending on compiler used
toolset = self.cfg['toolset']
if toolset is None:
if self.toolchain.comp_family() == toolchain.INTELCOMP:
toolset = 'intel-linux'
elif self.toolchain.comp_family() == toolchain.GCC:
toolset = 'gcc'
else:
raise EasyBuildError("Unknown compiler used, don't know what to specify to --with-toolset, aborting.")
cmd = "%s ./bootstrap.sh --with-toolset=%s --prefix=%s %s"
tup = (self.cfg['preconfigopts'], toolset, self.objdir, self.cfg['configopts'])
run_cmd(cmd % tup, log_all=True, simple=True)
# Use build_toolset if specified or the bootstrap toolset without the OS suffix
self.toolset = self.cfg['build_toolset'] or re.sub('-linux$', '', toolset)
user_config = []
# Explicitely set the compiler path to avoid B2 checking some standard paths like /opt
cxx = os.getenv('CXX')
if cxx:
cxx = which(cxx, on_error=ERROR)
# Remove default toolset config which may lead to duplicate toolsets (e.g. for intel-linux)
apply_regex_substitutions('project-config.jam', [('using %s ;' % toolset, '')])
# Add our toolset config with no version and full path to compiler
user_config.append("using %s : : %s ;" % (self.toolset, cxx))
if self.cfg['boost_mpi']:
# configure the boost mpi module
# http://www.boost.org/doc/libs/1_47_0/doc/html/mpi/getting_started.html
# let Boost.Build know to look here for the config file
# Check if using a Cray toolchain and configure MPI accordingly
if self.toolchain.toolchain_family() == toolchain.CRAYPE:
if self.toolchain.PRGENV_MODULE_NAME_SUFFIX == 'gnu':
craympichdir = os.getenv('CRAY_MPICH2_DIR')
craygccversion = os.getenv('GCC_VERSION')
# We configure the gcc toolchain below, so make sure the EC doesn't use another toolset
if self.toolset != 'gcc':
raise EasyBuildError("For the cray toolchain the 'gcc' toolset must be used.")
# Remove the previous "using gcc" line add above (via self.toolset) if present
user_config = [x for x in user_config if not x.startswith('using gcc :')]
user_config.extend([
'local CRAY_MPICH2_DIR = %s ;' % craympichdir,
'using gcc ',
': %s' % craygccversion,
': CC ',
': <compileflags>-I$(CRAY_MPICH2_DIR)/include ',
r' <linkflags>-L$(CRAY_MPICH2_DIR)/lib \ ',
'; ',
'using mpi ',
': CC ',
': <find-shared-library>mpich ',
': %s' % self.cfg['mpi_launcher'],
';',
'',
])
else:
raise EasyBuildError("Bailing out: only PrgEnv-gnu supported for now")
else:
user_config.append("using mpi : %s ;" % os.getenv("MPICXX"))
write_file('user-config.jam', '\n'.join(user_config), append=True)
def build_boost_variant(self, bjamoptions, paracmd):
"""Build Boost library with specified options for bjam."""
# build with specified options
cmd = "%s ./%s %s %s %s" % (self.cfg['prebuildopts'], self.bjamcmd, bjamoptions, paracmd, self.cfg['buildopts'])
run_cmd(cmd, log_all=True, simple=True)
# install built Boost library
cmd = "%s ./%s %s install %s %s" % (
self.cfg['preinstallopts'], self.bjamcmd, bjamoptions, paracmd, self.cfg['installopts'])
run_cmd(cmd, log_all=True, simple=True)
# clean up before proceeding with next build
run_cmd("./%s %s --clean-all" % (self.bjamcmd, bjamoptions), log_all=True, simple=True)
def build_step(self):
"""Build Boost with bjam tool."""
self.bjamoptions = " --prefix=%s --user-config=user-config.jam" % self.objdir
if 'toolset=' not in self.cfg['buildopts']:
self.bjamoptions += " toolset=" + self.toolset
cxxflags = os.getenv('CXXFLAGS')
# only disable -D_GLIBCXX_USE_CXX11_ABI if use_glibcxx11_abi was explicitly set to False
# None value is the default, which corresponds to default setting (=1 since GCC 5.x)
if self.cfg['use_glibcxx11_abi'] is not None:
cxxflags += ' -D_GLIBCXX_USE_CXX11_ABI='
if self.cfg['use_glibcxx11_abi']:
cxxflags += '1'
else:
cxxflags += '0'
if cxxflags is not None:
self.bjamoptions += " cxxflags='%s'" % cxxflags
ldflags = os.getenv('LDFLAGS')
if ldflags is not None:
self.bjamoptions += " linkflags='%s'" % ldflags
# specify path for bzip2/zlib if module is loaded
for lib in ["bzip2", "zlib"]:
libroot = get_software_root(lib)
if libroot:
self.bjamoptions += " -s%s_INCLUDE=%s/include" % (lib.upper(), libroot)
self.bjamoptions += " -s%s_LIBPATH=%s/lib" % (lib.upper(), libroot)
self.paracmd = ''
if self.cfg['parallel']:
self.paracmd = "-j %s" % self.cfg['parallel']
if self.cfg['only_python_bindings']:
# magic incantation to only install Boost Python bindings is... --with-python
# see http://boostorg.github.io/python/doc/html/building/installing_boost_python_on_your_.html
self.bjamoptions += " --with-python"
self.log.info("Building boost with single and multi threading")
self.bjamoptions += " threading=single,multi --layout=tagged"
if self.cfg['boost_mpi']:
self.log.info("Building boost_mpi library")
mpi_bjamoptions = " --with-mpi"
self.build_boost_variant(self.bjamoptions + mpi_bjamoptions, self.paracmd)
self.log.info("Building boost libraries")
# build with specified options
cmd = "%s ./%s %s %s %s" % (self.cfg['prebuildopts'], self.bjamcmd, self.bjamoptions, self.paracmd, self.cfg['buildopts'])
run_cmd(cmd, log_all=True, simple=True)
def install_step(self):
"""Install Boost by copying files to install dir."""
# install boost libraries
self.log.info("Installing boost libraries")
cmd = "%s ./%s %s install %s %s" % (
self.cfg['preinstallopts'], self.bjamcmd, self.bjamoptions, self.paracmd, self.cfg['installopts'])
run_cmd(cmd, log_all=True, simple=True)
self.log.info("Copying %s to installation dir %s", self.objdir, self.installdir)
if self.cfg['only_python_bindings'] and 'Python' in self.cfg['multi_deps'] and self.iter_idx > 0:
self.log.info("Main installation should already exist, only copying over missing Python libraries.")
copy(glob.glob(os.path.join(self.objdir, 'lib', 'libboost_python*')), os.path.join(self.installdir, 'lib'), symlinks=True)
else:
copy(glob.glob(os.path.join(self.objdir, '*')), self.installdir, symlinks=True)
# Link tagged multi threaded libs as the default libs
lib_mt_suffix = '-mt'
if LooseVersion(self.version) >= LooseVersion("1.69.0"):
if get_cpu_architecture() == AARCH64:
lib_mt_suffix += '-a64'
elif get_cpu_architecture() == POWER:
lib_mt_suffix += '-p64'
else:
lib_mt_suffix += '-x64'
shlib_ext = get_shared_lib_ext()
for source_shared_lib in glob.glob(os.path.join(self.installdir, 'lib', 'lib*%s.%s.%s' % (lib_mt_suffix, shlib_ext, self.version))):
target_shared_lib = source_shared_lib.replace('%s.%s' % (lib_mt_suffix, shlib_ext), '.%s' % shlib_ext)
source_static_lib = source_shared_lib.replace('%s.%s.%s' % (lib_mt_suffix, shlib_ext, self.version), '%s.a' % lib_mt_suffix)
target_static_lib = source_static_lib.replace('%s.a' % lib_mt_suffix, '.a')
symlink(os.path.basename(source_shared_lib), target_shared_lib, use_abspath_source=False)
symlink(os.path.basename(target_shared_lib), target_shared_lib.replace('.%s' % self.version, ''), use_abspath_source=False)
symlink(os.path.basename(source_static_lib), target_static_lib, use_abspath_source=False)
def sanity_check_step(self):
"""Custom sanity check for Boost."""
shlib_ext = get_shared_lib_ext()
custom_paths = {
'files': [],
'dirs': ['include/boost']
}
if self.cfg['only_python_bindings']:
for pyver in self.pyvers:
pymajorver = pyver.split('.')[0]
pyminorver = pyver.split('.')[1]
if LooseVersion(self.version) >= LooseVersion("1.67.0"):
suffix = '%s%s' % (pymajorver, pyminorver)
elif int(pymajorver) >= 3:
suffix = pymajorver
else:
suffix = ''
custom_paths['files'].append(os.path.join('lib', 'libboost_python%s.%s' % (suffix, shlib_ext)))
else:
custom_paths['files'].append(os.path.join('lib', 'libboost_system.%s' % shlib_ext))
lib_mt_suffix = '-mt'
# MT libraries gained an extra suffix from v1.69.0 onwards
if LooseVersion(self.version) >= LooseVersion("1.69.0"):
if get_cpu_architecture() == AARCH64:
lib_mt_suffix += '-a64'
elif get_cpu_architecture() == POWER:
lib_mt_suffix += '-p64'
else:
lib_mt_suffix += '-x64'
custom_paths['files'].append(os.path.join('lib', 'libboost_thread%s.%s' % (lib_mt_suffix, shlib_ext)))
if self.cfg['boost_mpi']:
custom_paths['files'].append(os.path.join('lib', 'libboost_mpi.%s' % shlib_ext))
custom_paths['files'].append(os.path.join('lib', 'libboost_mpi%s.%s' % (lib_mt_suffix, shlib_ext)))
super(EB_Boost, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Set up a BOOST_ROOT environment variable to e.g. ease Boost handling by cmake"""
txt = super(EB_Boost, self).make_module_extra()
if not self.cfg['only_python_bindings']:
txt += self.module_generator.set_environment('BOOST_ROOT', self.installdir)
return txt
|
By Margaux Sergent. Bathroom Tile. Published at Monday, February 18th, 2019 - 01:12:33 AM.
|
"""
peerdiscovery
The process is simple.
1) Start up the client and broadcast a UDP datagram on a defined interval.
2) Listen for other datagrams
3) When another datagram is heard, pull it into the list of the peers.
But, if the peer is already in the list, do nothing.
4) On disconnect, the client sends an exit message, letting the other
users know that they are no longer online; making it safe for the
client to disconnect
"""
import json
from twisted.internet import task
from twisted.internet.protocol import DatagramProtocol
from twisted.python import log
HEARTBEAT = "HEARTBEAT"
EXIT = "EXIT"
class PeerList(object):
"""
A simple structure meant to manage the other peers. Supports a limited
set of operations, such as add, remove, exists, and count.
"""
def __init__(self):
self._peers = {}
def add(self, peer):
self._peers[peer.peerId] = peer
def get(self, peerId):
return self._peers.get(peerId)
def remove(self, peerId):
del self._peers[peerId]
def exists(self, peerId):
return self._peers.get(peerId) is not None
def count(self):
return len(self._peers.keys())
def all(self):
""" return an iterable """
return self._peers.itervalues()
class PeerDiscoveryMessage(object):
"""
Contains basic location information for clients to use to initiate a
connection with this peer. Basically, just the user is, what ip they
are using, and what port to connect on
"""
def __init__(self, message, name, address, port):
if isinstance(message, str):
message = unicode(message, "utf-8")
if isinstance(name, str):
name = unicode(name, "utf-8")
if isinstance(message, str):
address = unicode(address, "utf-8")
self.message = message
self.name = name
self.address = address
self.port = port
def serialize(self):
return json.dumps({
"message": self.message.encode("utf-8"),
"name": self.name.encode("utf-8"),
"address": self.address.encode("utf-8"),
"port": self.port
})
@classmethod
def parseDatagram(klass, datagram):
"""
Given a datagram formatted using JSON, return a new message object.
"""
msg = json.loads(datagram)
peerMsg = msg["message"]
peerName = msg["name"]
peerAddress = msg["address"]
peerPort = msg["port"]
return klass(peerMsg, peerName, peerAddress, peerPort)
class Peer(object):
"""
A peer is another user located on a different system. Maintains the user"s
peerId, username, IP address, and port.
"""
def __init__(self, name, address, port):
self.peerId = makePeerId(name, address, port)
if isinstance(name, str):
name = unicode(name, "utf-8")
if isinstance(address, str):
address = unicode(address, "utf-8")
self.name = name
self.address = address
self.port = port
def serialize(self):
return json.dumps({
"peerId": self.peerId.encode("utf-8"),
"name": self.name.encode("utf-8"),
"address": self.address.encode("utf-8"),
"port": self.port
})
def __eq__(self, other):
return self.peerId == other.peerId
def makePeerId(name, address, port):
"""
Create a unique peerId for a peer.
:param name: the name of a peer
:param address: the ip address of a peer
:param port: the port being used
:returns string: an peerId
"""
if isinstance(name, str):
name = unicode(name, "utf-8")
if isinstance(name, str):
address = unicode(name, "utf-8")
return u"{0}_{1}_{2}".format(name, address, port)
class PeerDiscoveryProtocol(DatagramProtocol):
"""
UDP protocol used to find others running the same program.
The protocol will do several things, on program start, a connection
message will be sent; basically announcing itself as a node to the network.
Then the protocol will regularly send a heartbeat message at a defined
interval.
Once the peer has decided to disconnect, it will send an exit message to
alert the other nodes of its demise.
:param reactor: the reactor being used.
:param peers: a data structure in which peers can be stored, implements
IPeerList
:param name: the username you"d like to broadcast.
:param multiCastAddress: the multicast address to broadcast.
:param multiCastPort: the port on which to broadcast.
:param address: the IP address to broadcast. This is for the current user.
:param port: the Port to broadcast where other users can connect.
"""
def __init__(self, reactor, peerList, name, multiCastAddress,
multiCastPort, address, port):
"""
Set up an instance of the PeerDiscovery protocol by creating
the message information needed to broadcast other instances
of the protocol running on the same network.
"""
self._peers = peerList
self.peerId = makePeerId(name, address, port)
self.name = name
self.reactor = reactor
# these need to be strings
self.multiCastAddress = multiCastAddress
self.multiCastPort = multiCastPort
self.address = address
self.port = port
self.loop = None
def sendMessage(self, message):
self.transport.write(message,
(self.multiCastAddress, self.multiCastPort))
def startProtocol(self):
self.transport.setTTL(5)
self.transport.joinGroup(self.multiCastAddress)
self.loop = task.LoopingCall(self.sendHeartBeat)
self.loop.start(5)
def sendHeartBeat(self):
"""
Sends message alerting other peers to your presence.
"""
message = PeerDiscoveryMessage(HEARTBEAT,
self.name,
self.address,
self.port).serialize()
self.sendMessage(message)
log.msg("Sent ", message)
def stopProtocol(self):
"""
Gracefully tell peers to remove you.
"""
# XXX this needs to occur at shut down!
message = PeerDiscoveryMessage(EXIT,
self.name,
self.address,
self.port).serialize()
self.sendMessage(message)
if self.loop is not None:
self.loop.stop()
log.msg("Exit ", message)
def datagramReceived(self, datagram, address):
"""
Handles how datagrams are read when they are received. Here, as this
is a json serialised message, we are pulling out the peer information
and placing it in a list.
"""
parsed = PeerDiscoveryMessage.parseDatagram(datagram)
peerId = makePeerId(parsed.name, parsed.address, parsed.port)
# ignore those messages from yourself
if parsed.address == self.address:
return
log.msg("Decoding:{0} from {1}", datagram, address)
if parsed.message == EXIT:
if self._peers.exists(peerId):
self._peers.remove(peerId)
log.msg("dropping peer:", address)
elif parsed.message == HEARTBEAT:
if not self._peers.exists(peerId):
newPeer = Peer(parsed.name, parsed.address, parsed.port)
self._peers.add(newPeer)
log.msg("new Peer: address: {0}", parsed.name)
|
Sometimes you need to do what is right for your mentality, even if it puts your body under physical stress. Feel free to disagree with me, but the back/knee/hip pain was absolutely worth the drive to Stanwell Park Beach to watch the sun rise on Midwinter’s Day.
With the rising of the Midwinter Sun, may there be Peace in the East.
Six weeks ago I hurt my back and I’ve been off work ever since, and with the cats not being great conversationalists, I needed to get out and I needed to celebrate this day.
Midwinter’s Day – to me – is like Herne’s birthday. It’s his day where he takes his crown as the Oak King to lead the Southern Hemisphere out of the harsh winter (that’s still to come as it’s a late winter) and prepares us for the warmth of Spring.
Given my disconnection of late, given that I’ve been concentrating on healing my body, I needed to do something to heal/stimulate and reconnect my mind with the spiritual divine. My feet have never been so cold on sand (and I grew up in Victoria) but watching the sun rise, being able to perform my morning greeting, and rededicating my path to Herne the Hunter was worth every moment of discomfort.
Same sunrise, different camera on the phone.
At this current moment I have entered the tarot archetype of the Fool (or ‘Wanderer’ as it’s known in my deck of the Wildwood Tarot). I have resigned from my job due to my back problems, I am slowly packing the house, and next month we will have relocated 490km south-west to the Riverina.
Also, if you’re in Sydney and would like to work with the tarot in a way you may not have considered before, I recommend coming to Michelle from Seeking Spirit’s tarot workshop. Each workshop is dedicated to an individual card from the Major Arcana, which really allows you to dive deep into meaning, it’s placement on the journey, and journeying into the card itself. Even if you’ve been practicing tarot for a life time, shared knowledge from open group discussion can open your awareness to concepts you may not have considered.
We discussed the Fool today. Given the journey I am about to embark on, I am the Fool stepping out of the cave, terrified of what’s to come, but anxious for the journey all the same.
Moving to the Riverina is a massive leap, and it’s a leap we’ve been wanting to do since we first bought land in the region five years ago. We’re moving from Australia’s largest city to a small town with no traffic lights or parking meters. Given the stress on my body from my desk job from the last two projects (3-4 years) this is going to be a welcome change. You wouldn’t normally think a desk job can be stressful, but I have two discs in my lower back that say otherwise.
The move also means new land – new land energies, new seasons, and a different environmental history. The colour of the landscape is different, the eucalyptus are different, flowers will bloom at a different time than those in Sydney (and those in my part of Sydney bloom differently than Coastal and Mountainous Sydney).
On the journey between Tarcutta and Wagga Wagga I’ve always had a sense of being home. I cannot wait to see if that sensation carries over to where we will be living.
|
from __future__ import absolute_import
import six
from functools import total_ordering
from sentry.utils.compat import implements_to_string
@implements_to_string
@total_ordering
class Problem(object):
# Used for issues that may render the system inoperable or have effects on
# data integrity (e.g. issues in the processing pipeline.)
SEVERITY_CRITICAL = 'critical'
# Used for issues that may cause the system to operate in a degraded (but
# still operational) state, as well as configuration options that are set
# in unexpected ways or deprecated in future versions.
SEVERITY_WARNING = 'warning'
# Mapping of severity level to a priority score, where the greater the
# score, the more critical the issue. (The numeric values should only be
# used for comparison purposes, and are subject to change as levels are
# modified.)
SEVERITY_LEVELS = {
SEVERITY_CRITICAL: 2,
SEVERITY_WARNING: 1,
}
def __init__(self, message, severity=SEVERITY_CRITICAL, url=None):
assert severity in self.SEVERITY_LEVELS
self.message = six.text_type(message)
self.severity = severity
self.url = url
def __eq__(self, other):
return self.SEVERITY_LEVELS[self.severity] == self.SEVERITY_LEVELS[other.severity]
def __lt__(self, other):
return self.SEVERITY_LEVELS[self.severity] < self.SEVERITY_LEVELS[other.severity]
def __str__(self):
return self.message
@classmethod
def threshold(cls, severity):
threshold = cls.SEVERITY_LEVELS[severity]
def predicate(problem):
return cls.SEVERITY_LEVELS[problem.severity] >= threshold
return predicate
class StatusCheck(object):
def check(self):
"""
Perform required checks and return a list of ``Problem`` instances.
"""
raise NotImplementedError
|
---. 1970. Workbook of atmospheric dispersion estimates (Revised).
Office of Air Programs Publication No. AP-26. Research Triangle Park, N.C.: U.S. EPA.
|
# typedesc.py - classes representing C type descriptions
try:
set
except NameError:
from sets import Set as set
class Argument(object):
"a Parameter in the argument list of a callable (Function, Method, ...)"
def __init__(self, atype, name):
self.atype = atype
self.name = name
class _HasArgs(object):
def __init__(self):
self.arguments = []
def add_argument(self, arg):
assert isinstance(arg, Argument)
self.arguments.append(arg)
def iterArgTypes(self):
for a in self.arguments:
yield a.atype
def iterArgNames(self):
for a in self.arguments:
yield a.name
def fixup_argtypes(self, typemap):
for a in self.arguments:
a.atype = typemap[a.atype]
################
class Alias(object):
# a C preprocessor alias, like #define A B
def __init__(self, name, alias, typ=None):
self.name = name
self.alias = alias
self.typ = typ
class Macro(object):
# a C preprocessor definition with arguments
def __init__(self, name, args, body):
# all arguments are strings, args is the literal argument list
# *with* the parens around it:
# Example: Macro("CD_INDRIVE", "(status)", "((int)status > 0)")
self.name = name
self.args = args
self.body = body
class File(object):
def __init__(self, name):
self.name = name
class Function(_HasArgs):
location = None
def __init__(self, name, returns, attributes, extern):
_HasArgs.__init__(self)
self.name = name
self.returns = returns
self.attributes = attributes # dllimport, __stdcall__, __cdecl__
self.extern = extern
class Constructor(_HasArgs):
location = None
def __init__(self, name):
_HasArgs.__init__(self)
self.name = name
class OperatorFunction(_HasArgs):
location = None
def __init__(self, name, returns):
_HasArgs.__init__(self)
self.name = name
self.returns = returns
class FunctionType(_HasArgs):
location = None
def __init__(self, returns, attributes):
_HasArgs.__init__(self)
self.returns = returns
self.attributes = attributes
class Method(_HasArgs):
location = None
def __init__(self, name, returns):
_HasArgs.__init__(self)
self.name = name
self.returns = returns
class FundamentalType(object):
location = None
def __init__(self, name, size, align):
self.name = name
if name != "void":
self.size = int(size)
self.align = int(align)
class PointerType(object):
location = None
def __init__(self, typ, size, align):
self.typ = typ
self.size = int(size)
self.align = int(align)
class Typedef(object):
location = None
def __init__(self, name, typ):
self.name = name
self.typ = typ
class ArrayType(object):
location = None
def __init__(self, typ, min, max):
self.typ = typ
self.min = min
self.max = max
class StructureHead(object):
location = None
def __init__(self, struct):
self.struct = struct
class StructureBody(object):
location = None
def __init__(self, struct):
self.struct = struct
class _Struct_Union_Base(object):
location = None
def get_body(self):
return self.struct_body
def get_head(self):
return self.struct_head
class Structure(_Struct_Union_Base):
def __init__(self, name, align, members, bases, size, artificial=None):
self.name = name
self.align = int(align)
self.members = members
self.bases = bases
self.artificial = artificial
if size is not None:
self.size = int(size)
else:
self.size = None
self.struct_body = StructureBody(self)
self.struct_head = StructureHead(self)
class Union(_Struct_Union_Base):
def __init__(self, name, align, members, bases, size, artificial=None):
self.name = name
self.align = int(align)
self.members = members
self.bases = bases
self.artificial = artificial
if size is not None:
self.size = int(size)
else:
self.size = None
self.struct_body = StructureBody(self)
self.struct_head = StructureHead(self)
class Field(object):
def __init__(self, name, typ, bits, offset):
self.name = name
self.typ = typ
self.bits = bits
self.offset = int(offset)
class CvQualifiedType(object):
def __init__(self, typ, const, volatile):
self.typ = typ
self.const = const
self.volatile = volatile
class Enumeration(object):
location = None
def __init__(self, name, size, align):
self.name = name
self.size = int(size)
self.align = int(align)
self.values = []
def add_value(self, v):
self.values.append(v)
class EnumValue(object):
def __init__(self, name, value, enumeration):
self.name = name
self.value = value
self.enumeration = enumeration
class Variable(object):
location = None
def __init__(self, name, typ, init=None):
self.name = name
self.typ = typ
self.init = init
################################################################
|
Tangled Tides did not win me over in the first chapter, or even the second for that matter, but the description had me so intrigued that I didn't want to stop for fear of missing out on something great. I mean.. what's more precious to Yara than HER LIFE?! I just had to know. My gut was right and I ended up loving this story!
This story is a combination of multiple paranormal / fantastical creatures. It's not just mermaids and selkies like the description includes. There are also bird like Sirens, Ghost like figures, and Gorgons!! Yes Gorgons, as in Medusa. Snake hair and a gaze that turns you to stone MEDUSA! Karen Amanda Hooper added elements that basically made it to where I could not put this story down because I just HAD to know how it all tied together.
The characters were strong, though some more than others. You may think that there is going to be a love triangle in the beginning of the story, but rest assured there is not. I really enjoyed the push and pull of the romance that was there and really can't wait to see how it grows! Yara was a very .... Stubborn girl.. but she has her reasons. With all the lying going on around her and being forced to become a mermaid against her will, who wouldn't be a bit pissed off and hard headed?
Sometimes the world got a bit confusing with so many elements going on. At times it frustrated me, and at others I thought " this is probably how Yara feels about everything that's going on. Overwhelmed."
The story has some great scenes. The concepts were wonderful and really had me entranced during certain scenes. There is some wonderful humor, interesting mythology, new ideas on mer and selkie lore, some heartbreak and deceit.... It just really all blended together wonderfully.
There were definitely some complications with the story. Some ideas that didn't quite mesh well, some moments when it seemed over the top, and some writing that didn't quite feel polished. BUT the overall story held me and had me wanting more. None of those things made me want to stop the book or even put it down for a moment.
I would highly recommend Tangled Tides to fans of Mer and Selkie stories, and also fans of mythology. These elements are combined in a way that I hadn't experienced before and I think other readers will really enjoy it!
|
import random
import re
import json
import operator
from itertools import chain
from datetime import datetime, timedelta, date, time
import calendar
from django.urls import reverse
from django.shortcuts import render, redirect, get_object_or_404, render_to_response
from django.http import HttpResponse, HttpResponseRedirect, Http404, JsonResponse
from django.views import View
from django.views.generic.detail import DetailView
from django.conf import settings
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.utils.datastructures import MultiValueDictKeyError
from django.core.mail import send_mail
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth import authenticate, login, logout
from django.conf import settings
from .forms import *
from main.models import ServiceCategory, Service, DetailedService
from .models import *
from django.db.models import F, Q
from .Day import Day
def dayPeriods(hour=9, minute=0, second=0):
start_day = datetime.time(hour, minute, second)
class Search(object):
def __init__(self, search_str):
# super(Search, self).__init__(search_feadback)
self.to_find_fn_and_ln = re.match(u"(?P<first_name>[\u0400-\u0500]+) (?P<last_name>[\u0400-\u0500]+)",
search_str, re.U)
self.to_find_fn_or_ln = re.match(u"^(?P<some_name>[\u0400-\u0500]+)$|^([\u0400-\u0500]+[\s]+)$",
search_str, re.U)
self.to_find_tel = re.match(r"^(?:([+]\d{1,2}))?[\s.-]?(\d{3})?[\s.-]?(\d{3})?[\s.-]?(\d{2})?[\s.-]?(\d{2})$",
search_str, re.U)
self.to_find_email = re.match(r'(?:[a-z0-9!#$%&*+/=?^_`{|}~-]+'
r'(?:\.[a-z0-9!#$%&*+/=?^_`{|}~-]+)*|'
r'"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|'
r'\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+'
r'[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}'
r'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:'
r'(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|'
r'\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])', search_str, re.U)
def serchFeadback(self):
if self.to_find_fn_and_ln:
# print(to_find_fn_and_ln.group('first_name'))
q_first = Q(first=self.to_find_fn_and_ln.group('first_name'))
q_last = Q(last=self.to_find_fn_and_ln.group('last_name'))
feadback_list = Feadback.objects.filter(q_first, q_last).order_by('id')
elif self.to_find_fn_or_ln:
some_name = re.findall(u'[\u0400-\u0500]+', self.to_find_fn_or_ln.group(0), re.U)[0]
# print(some_name)
q_some1 = Q(first__contains=some_name)
q_some2 = Q(last__contains=some_name)
feadback_list = Feadback.objects.filter(q_some1 | q_some2).order_by('id')
elif self.to_find_tel:
q_tel = Q(tel=self.to_find_tel.group())
feadback_list = Feadback.objects.filter(q_tel).order_by('id')
else:
feadback_list = [None]
return feadback_list
def serchClient(self):
if self.to_find_fn_and_ln:
# print(to_find_fn_and_ln.group('first_name'))
q_first = Q(first=self.to_find_fn_and_ln.group('first_name'))
q_last = Q(last=self.to_find_fn_and_ln.group('last_name'))
client_list = Client.objects.filter(q_first, q_last).order_by('id')
elif self.to_find_fn_or_ln:
some_name = re.findall(u'[\u0400-\u0500]+', self.to_find_fn_or_ln.group(0), re.U)[0]
# print(some_name)
q_some1 = Q(first__contains=some_name)
q_some2 = Q(last__contains=some_name)
client_list = Client.objects.filter(q_some1 | q_some2).order_by('id')
elif self.to_find_tel:
q_tel = Q(tel=self.to_find_tel.group())
client_list = Client.objects.filter(q_tel).order_by('id')
elif self.to_find_email:
q_email = Q(email=self.to_find_tel.group())
client_list = Client.objects.filter(q_email).order_by('id')
else:
client_list = [None]
return client_list
def searchByNameTel(search_feadback):
to_find_fn_and_ln = re.match(u"(?P<first_name>[\u0400-\u0500]+) (?P<last_name>[\u0400-\u0500]+)", search_feadback,
re.U)
# print(to_find_fn_and_ln)
to_find_fn_or_ln = re.match(u"^(?P<some_name>[\u0400-\u0500]+)$|^([\u0400-\u0500]+[\s]+)$", search_feadback, re.U)
# print(to_find_fn_or_ln)
to_find_tel = re.match(r"^(?:([+]\d{1,2}))?[\s.-]?(\d{3})?[\s.-]?(\d{3})?[\s.-]?(\d{2})?[\s.-]?(\d{2})$",
search_feadback, re.U)
# print(to_find_tel)
if to_find_fn_and_ln:
# print(to_find_fn_and_ln.group('first_name'))
q_first = Q(first=to_find_fn_and_ln.group('first_name'))
q_last = Q(last=to_find_fn_and_ln.group('last_name'))
feadback_list = Feadback.objects.filter(q_first, q_last).order_by('id')
elif to_find_fn_or_ln:
some_name = re.findall(u'[\u0400-\u0500]+', to_find_fn_or_ln.group(0), re.U)[0]
# print(some_name)
q_some1 = Q(first__contains=some_name)
q_some2 = Q(last__contains=some_name)
feadback_list = Feadback.objects.filter(q_some1 | q_some2).order_by('id')
elif to_find_tel:
q_tel = Q(tel=to_find_tel.group())
feadback_list = Feadback.objects.filter(q_tel).order_by('id')
else:
feadback_list = [None]
return feadback_list
class QueryByPeriod(object):
@classmethod
def byDay(cls, min_datetime=None):
if min_datetime:
min_date = min_datetime.date()
else:
min_date = datetime.date(timezone.now())
max_date = min_date + timedelta(days=1)
q_object = Q()
q_object &= Q(date__gte=min_date)
q_object &= Q(date__lt=max_date)
return q_object
@classmethod
def byWeek(cls, min_datetime=None):
if min_datetime:
min_date = datetime.date(min_datetime - timedelta(days=datetime.weekday(min_datetime)))
else:
min_date = datetime.date(
datetime.now(timezone.utc) - timedelta(days=datetime.weekday(datetime.now(timezone.utc))))
max_date = min_date+timedelta(days=7)
q_object = Q()
q_object &= Q(date__gt=min_date)
q_object &= Q(date__lt=max_date)
return q_object
@classmethod
def byMonth(cls, min_datetime=None):
if min_datetime:
min_date = datetime.date(min_datetime - timedelta(days=min_datetime.day - 1))
else:
min_date = datetime.date(datetime.now(timezone.utc) - timedelta(days=datetime.now(timezone.utc).day - 1))
max_date = date(int(min_date.year), int(min_date.month)+1, 1)
q_object = Q()
q_object &= Q(date__gte=min_date)
q_object &= Q(date__lt=max_date)
return q_object
@staticmethod
def queryOnday(query_obj, min_datetime = None):
print(query_obj)
query = query_obj.filter(QueryByPeriod.byDay(min_datetime))
print(query)
if query.count() < 1:
return None
return query.order_by('date')
@staticmethod
def queryOnweek(query_obj, min_datetime = None):
query = query_obj.filter(QueryByPeriod.byWeek(min_datetime))
if query.count() < 1:
return None
return query.order_by('date')
@staticmethod
def queryOnmonth(query_obj, min_datetime = None):
query = query_obj.filter(QueryByPeriod.byMonth(min_datetime))
if query.count() < 1:
return None
return query.order_by('date')
class EventList(object):
@classmethod
def byDay(cls, min_datetime):
if not min_datetime:
min_datetime = datetime.date(timezone.now())
else:
min_datetime = min_datetime.date()
max_datetime = min_datetime + timedelta(days=1)
q_object = Q()
q_object &= ~Q(status="failed")
q_object &= Q(date_time__gt=min_datetime)
q_object &= Q(date_time__lt=max_datetime)
return q_object
@classmethod
def byWeek(cls, min_datetime):
if not min_datetime:
min_datetime = datetime.date(datetime.now(timezone.utc)-timedelta(days=datetime.weekday(datetime.now(timezone.utc))))
else:
min_datetime = datetime.date(min_datetime-timedelta(days=datetime.weekday(min_datetime)))
max_datetime = min_datetime+timedelta(days=7)
q_object = Q()
q_object &= ~Q(status="failed")
q_object &= Q(date_time__gt=min_datetime)
q_object &= Q(date_time__lt=max_datetime)
return q_object
@classmethod
def byMonth(cls, min_datetime):
if not min_datetime:
min_datetime = datetime.date(datetime.now(timezone.utc)-timedelta(days=datetime.now(timezone.utc).day-1))
else:
min_datetime = datetime.date(min_datetime-timedelta(days=min_datetime.day-1))
max_datetime = date(int(min_datetime.year), int(min_datetime.month)+1, 1)
q_object = Q()
q_object &= ~Q(status="failed")
q_object &= Q(date_time__gte=min_datetime)
q_object &= Q(date_time__lt=max_datetime)
return q_object
@staticmethod
def eventsOnday(min_datetime = None):
events = Event.objects.all().filter(EventList.byDay(min_datetime))
if events.count() < 1:
return None
return events.order_by('date_time')
@staticmethod
def eventsOnweek(min_datetime = None):
events = Event.objects.all().filter(EventList.byWeek(min_datetime))
if events.count() < 1:
return None
return events.order_by('date_time')
@staticmethod
def eventsOnmonth(min_datetime = None):
events = Event.objects.all().filter(EventList.byMonth(min_datetime))
if events.count() < 1:
return None
return events.order_by('date_time')
class EventPeriod(object):
def isInPast(self):
if self.event_start < timezone.now():
return True
return False
def __init__(self, event_obj):
self.event_start = event_obj.date_time
self.event_end = event_obj.date_time + timedelta(hours=event_obj.duration.hour, minutes=event_obj.duration.minute)
self.is_in_past = EventPeriod.isInPast(self)
def __str__(self):
return "%s %s"%(str(self.event_start), str(self.event_end))
class Period(object):
def __init__(self, start_period, length_period = timedelta(minutes=30)):
self.period_datetime = str(start_period)
self.render = "{0:%H:%M}".format(start_period.time())
self.start_period = start_period
self.length_period = length_period
self.end_period = start_period + length_period
self.start_event = None
self.event = None
# self.contain_event = None
self.in_past = None
def __str__(self):
return "%s %s"%(self.start_period, self.event)
def isPeriodInPast(self):
if self.start_period < timezone.now():
self.in_past = True
return True
return False
def isEventStartInPeriod(self, event_obj=None):
event_period_obj = EventPeriod(event_obj)
if event_period_obj == None or event_obj == None:
return False
eve_st_gte_per_st = event_period_obj.event_start >= self.start_period
eve_st_lt_per_en = event_period_obj.event_start < self.end_period
if eve_st_gte_per_st and eve_st_lt_per_en:
# Period.extendPeriod(self, event_period_obj)
# while self.end_period < event_period_obj.event_end:
# self.end_period += self.length_period
self.start_event = True
return True
return False
def isEventEndInPeriod(self, event_obj=None):
event_period_obj = EventPeriod(event_obj)
if event_period_obj == None or event_obj == None:
return False
eve_en_gt_per_st = event_period_obj.event_end > self.start_period
eve_en_lte_per_en = event_period_obj.event_end <= self.end_period
if eve_en_gt_per_st and eve_en_lte_per_en:
# Period.extendPeriod(self, event_period_obj)
# while self.end_period < event_period_obj.event_end:
# self.end_period += self.length_period
# self.event = event_obj
return True
return False
def extendPeriod(self, event_obj):
event_period_obj = EventPeriod(event_obj)
while self.end_period < event_period_obj.event_end:
self.end_period += self.length_period
def isEventInPeriod(self, event_obj=None):
event_period_obj = EventPeriod(event_obj)
if event_period_obj == None:
return False
# self.contain_event = False
eve_st_gte_per_st = event_period_obj.event_start >= self.start_period
eve_st_lt_per_en = event_period_obj.event_start < self.end_period
eve_st_lt_per_st = event_period_obj.event_start < self.start_period
eve_en_gt_per_st = event_period_obj.event_end > self.start_period
eve_en_lte_per_en = event_period_obj.event_end <= self.end_period
eve_en_gt_per_en = event_period_obj.event_end > self.end_period
# if (eve_st_gte_per_st and eve_st_lt_per_en) or (eve_en_gte_per_st and eve_en_lt_per_en):
if (eve_st_gte_per_st and eve_st_lt_per_en) or \
(eve_st_lt_per_st and eve_en_gt_per_en) or \
(eve_en_gt_per_en and eve_en_lte_per_en) or \
(eve_en_gt_per_st and eve_en_lte_per_en):
# self.contain_event = True
self.event = event_obj
return True
return False
class Day(object):
def timePeriods(self):
period = Period(self.start_day)
period_list = []
if self.event_list:
event_list = list(self.event_list)
else:
event_list = []
stop = 20
while period.end_period <= self.end_day:# or stop > 0:
stop -= 1
# print(period)
period.isPeriodInPast()
if len(event_list) > 0:
event_obj = event_list[0]
# event_period_obj = EventPeriod(event_obj)
# print(event_period_obj)
# print(period.isEventStartInPeriod(event_period_obj, event_obj))
if period.isEventStartInPeriod(event_obj):
period.isEventInPeriod(event_obj)
# period.event = event_obj
period.extendPeriod(event_obj)
event_list.pop(0)
period_list.append(period)
period = Period(period_list[-1].end_period)
return period_list
def __init__(self, event_list=None, day_date=datetime.date(timezone.now()), start_day=time(9, 0, 0), end_day=time(20, 0, 0)):
weekdays_name = {1:"Понедельник",
2:"Вторник",
3:"Среда",
4:"Четверг",
5:"Пятница",
6:"Суббота",
7:"Воскресенье"}
self.event_list = event_list
self.day_date = day_date
self.start_day = datetime.combine(self.day_date, start_day)
self.end_day = datetime.combine(self.day_date, end_day)
# print(self.start_day, self.end_day)
self.calendar_data = self.day_date.isocalendar()
self.day_of_week = weekdays_name[self.day_date.isoweekday()]
self.time_periods = Day.timePeriods(self)
self.sorted_time_periods = sorted(self.time_periods, key=lambda x: x.start_period)
# self.sorted_time_periods = sorted(self.time_periods.items(), key=operator.itemgetter(0))
def __str__(self):
return "{0:%d} {0:%m} {0:%Y}".format(self.day_date)
def __repr__(self):
return "{0:%d} {0:%m} {0:%Y}".format(self.day_date)
def __iter__(self):
return iter(self.sorted_time_periods)
class WeekDay(Day):
def timePeriods(self):
period = Period(self.start_day)
period_list = []
if self.event_list:
event_list = list(self.event_list)
else:
event_list = []
stop = 20
while period.end_period <= self.end_day:# or stop > 0:
stop -= 1
# print(period)
period.isPeriodInPast()
if len(event_list) > 0:
event_obj = event_list[0]
# event_period_obj = EventPeriod(event_obj)
# print(event_period_obj)
# print(period.isEventStartInPeriod(event_obj))
# print(period.isEventEndInPeriod(event_obj))
# if period.isEventInPeriod(event_obj):
# period.contain_event = True
# if period.isEventStartInPeriod(event_obj):
# period.event = event_obj
period.isEventInPeriod(event_obj)
period.isEventStartInPeriod(event_obj)
if period.isEventEndInPeriod(event_obj):
event_list.pop(0)
period_list.append(period)
period = Period(period_list[-1].end_period)
return period_list
def __init__(self, event_list=None, day_date=datetime.date(datetime.now(timezone.utc)), start_day=time(9, 0, 0), end_day=time(20, 0, 0)):
Day.__init__(self, event_list, day_date, start_day, end_day)
weekdays_name = {1:"Понедельник",
2:"Вторник",
3:"Среда",
4:"Четверг",
5:"Пятница",
6:"Суббота",
7:"Воскресенье"}
self.calendar_data = self.day_date.isocalendar()
self.day_of_week = weekdays_name[self.day_date.isoweekday()]
self.time_periods = WeekDay.timePeriods(self)
self.sorted_time_periods = sorted(self.time_periods, key=lambda x: x.start_period)
def __str__(self):
return "{0:%d} {0:%m} {0:%Y}".format(self.day_date)
def __iter__(self):
return iter(self.sorted_time_periods)
class Week(object):
def weekDays(self):
days_list = []
current_day = self.start_week
next_day = self.start_week + timedelta(days=1)
for day in range(7):
q_start_day = Q(date_time__gte=current_day)
q_end_day = Q(date_time__lt=next_day)
day_event_list = self.event_list
if self.event_list:
day_event_list = self.event_list.filter(q_start_day, q_end_day)
# print(day_event_list)
day = WeekDay(day_event_list, current_day)
# print(day)
days_list.append(day)
# print(current_day)
current_day += timedelta(days=1)
next_day += timedelta(days=1)
return days_list
def __init__(self, event_list=None, date_time=datetime.now(timezone.utc), start_day=time(9, 0, 0), end_day=time(20, 0, 0)):
self.start_week = datetime.date(date_time-timedelta(days=datetime.weekday(date_time)))
self.end_week = self.start_week + timedelta(days=7)
self.event_list = event_list
self.day_date = datetime.date(datetime.now(timezone.utc))
self.start_day = datetime.combine(self.day_date, start_day)
self.end_day = datetime.combine(self.day_date, end_day)
self.week_days = Week.weekDays(self)
self.start_date = str(self.start_week)
def __str__(self):
return str(self.week_days)
def __iter__(self):
return iter(self.week_days)
class Login(View):
def get(self, request):
login_form = MyAuthenticationForm()
return render(request, "login.html", {"login_form": login_form})
def post(self, request):
if request.POST.get("submit") == "login":
login_form = MyAuthenticationForm(None, data=request.POST)
# return HttpResponse(login_form)
if login_form.is_valid():
# return HttpResponse(123)
client_data = login_form.cleaned_data
user = authenticate(request, **login_form.cleaned_data)
if user is not None:
login(request, user)
return redirect('/crm/')
return HttpResponse(user)
# return HttpResponse("isn't valid")
elif request.POST.get("submit") == "logout":
logout(request)
return redirect('/crm/login/')
class LoginRequiredView(LoginRequiredMixin, View):
login_url = '/crm/login/'
redirect_field_name = '/crm/login/'
class CrmMain(LoginRequiredView):
# @classmethod
# def addClientToEvent(self):
# clients = Client.objects.all()
# events = Event.objects.all()
# for event in events:
# if not event.client:
# tel = event.feadback.tel
# for client in clients:
# if client.tel == tel:
# event.client = client
# event.save()
def get(self, request):
# CrmMain.addClientToEvent(self)
context = {"user": request.user}
serch_form = SearchForm()#initial={'search': '%sпоиск'%search_icon})
serch_feadback_form = SearchFeadbackForm()
feadback_list = Feadback.objects.all()
feadback_list_inwork = feadback_list.filter(has_event=False)
if feadback_list_inwork:
feadback_list_inwork.order_by('date')
feadback_list_done = feadback_list.filter(has_event=True)
if feadback_list_done:
feadback_list_done.order_by('-date')
feadback_list = list(feadback_list_inwork)+list(feadback_list_done)
# print(Task.objects.filter(done=False))
task_list = Task.objects.all().order_by("-date")
context["task_list"] = task_list
periods = Day(event_list=EventList.eventsOnday())
# return HttpResponse(EventList.eventsOnday(self))
context["serch_form"] = serch_form
context["serch_feadback_form"] = serch_feadback_form
context["feadback_list"] = feadback_list
context["periods"] = periods
return render(request, "crm_main.html", context)
def post(self, request):
context = {}
search_feadback = u"%s"%str(request.POST.get("search_feadback"))
# print(search_feadback)
serch_all = re.findall(u'[\u0400-\u0500]+', search_feadback, re.U)
# print(serch_all)
if request.is_ajax() and search_feadback:
print(Search(search_feadback).serchFeadback())
context["feadback_list"] = Search(search_feadback).serchFeadback()
# context["feadback_list"] = searchByNameTel(search_feadback)
return render(request, "crm_main/feadback_list_ajax.html", context)
class CrmCalendar(LoginRequiredView):
def get(self, request):
context = {"user": request.user}
periods = Day(event_list=EventList.eventsOnday())
week_periods = Week(event_list=EventList.eventsOnweek())
# return HttpResponse(EventList.eventsOnday(self))
context["periods"] = periods
context["week_periods"] = week_periods
return render(request, "crm_calendar.html", context)
def post(self, request):
pass
class CrmClients(LoginRequiredView):
def get(self, request):
context = {}
serch_client_form = SearchClientForm()
client_list = Client.objects.all()
context["serch_client_form"] = serch_client_form
context["client_list"] = client_list
return render(request, "crm_clients.html", context)
def post(self, request):
context = {}
search_client = u"%s" % str(request.POST.get("search_client"))
# print(search_client)
serch_all = re.findall(u'[\u0400-\u0500]+', search_client, re.U)
# print(serch_all)
if request.is_ajax() and search_client:
# print(Search(search_feadback).serchFeadback())
context["client_list"] = Search(search_client).serchClient()
# context["feadback_list"] = searchByNameTel(search_feadback)
return render(request, "crm_clients/clients_list_ajax.html", context)
class QByPeriod(object):
@classmethod
def DaysInMonths(cls, from_year_month, to_year_month):
### from_year_month == (year, month)
### to_year_month == (year, month)
days_sum = 0
if from_year_month[0] < to_year_month[0] or \
(from_year_month[0] == to_year_month[0] and from_year_month[1] < to_year_month[1]):
for year in range(from_year_month[0], to_year_month[0]+1):
print(year)
if year == to_year_month[0]:
print(range(from_year_month[1], to_year_month[1]+1))
for month in range(from_year_month[1], to_year_month[1]+1):
print(month)
days_sum += calendar.monthrange(year, month)[1]
else:
print(range(from_year_month[1], 13))
for month in range(from_year_month[1], 13):
days_sum += calendar.monthrange(year, month)[1]
from_year_month[1] = 1
print(days_sum)
return days_sum
else:
raise "from_month has to be less than to_month"
@classmethod
def byMonth(cls, field_name, min_datetime=None):
current = datetime.now(timezone.utc)
if min_datetime:
min_date = datetime.date(min_datetime - timedelta(days=min_datetime.day - 1))
else:
min_date = datetime.date(current - timedelta(days=current.day - 1))
max_date = date(int(min_date.year), int(min_date.month) + 1, 1)
print(min_date, max_date)
filter__gte = field_name + '__' + 'gte'
filter__lt = field_name + '__' + 'lt'
q_object = Q()
q_object &= Q(**{filter__gte: min_date})
q_object &= Q(**{filter__lt: max_date})
return q_object
@classmethod
def byThreeMonths(cls, field_name, min_datetime=None):
current = datetime.now(timezone.utc)
to_month = (current.year, current.month-1)
if (current.month - 2) > 0:
from_month = (current.year, current.month - 2)
else:
from_month = (current.year-1, 12 - (current.month - 2))
days = QByPeriod.DaysInMonths(from_month,to_month)
if min_datetime:
min_date = datetime.date(min_datetime - timedelta(days=min_datetime.day - 1 + days))
else:
min_date = datetime.date(
current - timedelta(days=current.day - 1 + days))
max_date = date(int(min_date.year), int(min_date.month) + 3, 1)
filter__gte = field_name + '__' + 'gte'
filter__lt = field_name + '__' + 'lt'
q_object = Q()
q_object &= Q(**{filter__gte: min_date})
q_object &= Q(**{filter__lt: max_date})
return q_object
@classmethod
def byTwelveMonths(cls, field_name, min_datetime=None):
current = datetime.now(timezone.utc)
to_month = [current.year, current.month - 1]
if (current.month - 12) > 0:
from_month = [current.year, current.month - 12]
else:
from_month = [current.year - 1, 12 + (current.month - 12)]
print(from_month, to_month)
days = QByPeriod.DaysInMonths(from_month, to_month)
if min_datetime:
min_date = datetime.date(min_datetime - timedelta(days=min_datetime.day - 1 + days))
else:
min_date = datetime.date(
current - timedelta(days=current.day - 1 + days))
max_date = min_date + timedelta(days=days+calendar.monthrange(current.year, current.month)[1])
filter__gte = field_name + '__' + 'gte'
filter__lt = field_name + '__' + 'lt'
q_object = Q()
q_object &= Q(**{filter__gte: min_date})
q_object &= Q(**{filter__lt: max_date})
return q_object
class QuerySetByPeriod(QByPeriod):
def __init__(self, Query_set, field_name, min_datetime=None):
self.Query_set = Query_set
self.field_name = field_name
self.min_datetime = min_datetime
def getByMounth(self):
return self.Query_set.filter(self.byMonth(self.field_name, self.min_datetime))
def getThreeMonths(self):
return self.Query_set.filter(self.byThreeMonths(self.field_name, self.min_datetime))
def getByTwelveMonths(self):
return self.Query_set.filter(self.byTwelveMonths(self.field_name, self.min_datetime))
def __str__(self):
return str(self.Query_set)
class DataSets(object):
@staticmethod
def RatingByEventFrequency(query_set):
from operator import itemgetter
services_raiting = {}
services_list = []
for event in query_set:
if event.detailed_service in services_list:
services_raiting[event.detailed_service] += 1
else:
services_list.append(event.detailed_service)
services_raiting[event.detailed_service] = 0
return sorted(services_raiting.items(), key=itemgetter(1), reverse=True)
@staticmethod
def RatingByDaysLoad(query_set):
services_raiting = {}
for day in range(1,8):
services_raiting[day] = []
for event in query_set:
day = event.date_time.isoweekday()
services_raiting[day].append(event.detailed_service)
return sorted(services_raiting.items(), key=lambda item: len(item[1]), reverse=True)
class CrmStatistic(LoginRequiredView):
def get(self, request):
context = {}
clients = QuerySetByPeriod(Client.objects.all(), "registration")
print(clients)
events = QuerySetByPeriod(Event.objects.all(), "date_time")
print(clients)
new_clients_by_month = clients.getByMounth()
context["new_clients_by_month"] = new_clients_by_month
print(new_clients_by_month)
new_clients_by_three_month = clients.getThreeMonths()
context["new_clients_by_three_month"] = new_clients_by_three_month
print(new_clients_by_three_month)
new_clients_by_twelve_month = clients.getByTwelveMonths()
context["new_clients_by_twelve_month"] = new_clients_by_twelve_month
print(new_clients_by_twelve_month)
new_events_by_month = events.getByMounth()
context["new_events_by_month"] = new_events_by_month
print("#################")
print(new_events_by_month)
new_events_by_three_month = events.getThreeMonths()
context["new_events_by_three_month"] = new_events_by_three_month
print(new_events_by_three_month)
new_events_by_twelve_month = events.getByTwelveMonths()
context["new_events_by_twelve_month"] = new_events_by_twelve_month
print(new_events_by_twelve_month)
raiting_by_event_frequency_sorted = DataSets.RatingByEventFrequency(new_events_by_month)
context["raiting_by_event_frequency_sorted"] = raiting_by_event_frequency_sorted
print(raiting_by_event_frequency_sorted)
raiting_by_days_load_sorted = DataSets.RatingByDaysLoad(new_events_by_month)
context["raiting_by_days_load_sorted"] = raiting_by_days_load_sorted
print(raiting_by_days_load_sorted)
return render(request, "crm_statistic.html", context)
class CrmFinance(LoginRequiredView):
pass
class DetailedFeadback(LoginRequiredView):
@classmethod
def getModelInstanceData(cls, instance):
data = {}
for f in instance._meta.get_fields():
if not f.auto_created:
data[f.name] = getattr(instance, f.name)
# print(type(f))
return data
def get(self, request, feadback_id):
feadback = get_object_or_404(Feadback, pk=feadback_id)
event_form = EventForm(initial={'feadback': feadback})
context = {}
context["feadback"] = feadback
context["event_form"] = event_form
return render(request, "detailed_feadback.html", context)
def post(self, request, feadback_id):
feadback = get_object_or_404(Feadback, pk=feadback_id)
# print(DetailedFeadback.getModelInstanceData(self, feadback))
client_data = DetailedFeadback.getModelInstanceData(feadback)
del client_data['wish']
del client_data['date']
del client_data['has_event']
client_obj, created = Client.objects.get_or_create(tel=feadback.tel, defaults=client_data)
event_form = EventForm(request.POST)
if event_form.is_valid():
event_data = event_form.cleaned_data
event_data["client"] = client_obj
event_update_obj, event_create_bool = Event.objects.update_or_create(feadback=feadback, defaults=event_data)
feadback.has_event = True
feadback.save()
# if event_create_bool:
# event_form["feadback"].client.services.add(event_update_obj.detailed_service)
# else:
# event_form["feadback"].client.services.
return redirect('/crm/')
class ClientCard(LoginRequiredView):
@classmethod
def updateModelInstanceData(cls, model_inst, data_dict):
for key in data_dict.keys():
setattr(model_inst, key, data_dict[key])
model_inst.save()
@classmethod
def getPrice(cls, event_obj):
try:
return getattr(event_obj, "price")
except AttributeError:
return None
def get(self, request, event_id=None, client_id=None):
print(event_id, client_id)
context = {}
if event_id and not client_id:
print("event")
event = get_object_or_404(Event, pk=event_id)
event_period = EventPeriod(event)
client = event.client
event_list = Event.objects.filter(client=client).order_by("-date_time")
event_price = self.getPrice(event)
print(event_price)
client_form = ClientForm(initial=client.__dict__)
price_form = PriceForm(initial=event_price.__dict__)
result_form = ResultForm(initial={
'date': (event.date_time + timedelta(hours=event.duration.hour, minutes=event.duration.minute))})
pay_form = PayForm(initial={
'date_time': (event.date_time + timedelta(hours=event.duration.hour, minutes=event.duration.minute))})
detailed_event_form = DetailedEventForm(initial={})
context["event_id"] = int(event_id)
context["event"] = event
context["event_period"] = event_period
elif client_id and not event_id:
print("client")
client = get_object_or_404(Client, pk=client_id)
event_list = Event.objects.filter(client=client).order_by("-date_time")
client_form = ClientForm(initial=client.__dict__)
price_form = PriceForm(initial={})
result_form = ResultForm(initial={'date': timezone.now()})
pay_form = PayForm(initial={'date_time': timezone.now()})
detailed_event_form = DetailedEventForm(initial={})
context["client"] = client
context["event_list"] = event_list
context["client_form"] = client_form
context["price_form"] = price_form
context["result_form"] = result_form
context["pay_form"] = pay_form
context["detailed_event_form"] = detailed_event_form
if event_id and not client_id:
return render(request, "detailed_event.html", context)
elif client_id and not event_id:
return render(request, "detailed_client.html", context)
def post(self, request, event_id=None, client_id=None):
result_form = ResultForm(request.POST, request.FILES)
pay_form = PayForm(request.POST)
detailed_event_form = DetailedEventForm(request.POST)
client_form = ClientForm(request.POST)
price_form = PriceForm(request.POST)
event = get_object_or_404(Event, pk=request.POST.get("event_id"))
if result_form.is_valid() and request.POST.get("submit") == "add_result":
result_data = result_form.cleaned_data
result_data["client"] = event.client
result_data["detailed_service"] = event.detailed_service
result = Result.objects.create(**result_data)
event.results.add(result)
# return render(request, "create_event.html", {})
if pay_form.is_valid() and request.POST.get("submit") == "add_pay":
pay_data = pay_form.cleaned_data
pay_data["client"] = event.client
pay_data["detailed_service"] = event.detailed_service
# pay_update_obj, pay_create_bool = Pay.objects.update_or_create(event=event, defaults=pay_data)
pay = Pay.objects.create(**pay_data)
event.pays.add(pay)
# return render(request, "create_event.html", {})
if client_form.is_valid() and request.POST.get("submit") == "edit_client":
client_data = client_form.cleaned_data
# print(client_data)
self.updateModelInstanceData(event.client, client_data)
# event.status = detailed_event_form.cleaned_data["status"]
event.save()
print(price_form.is_valid())
if price_form.is_valid() and request.POST.get("submit") == "edit_price":
price_data = price_form.cleaned_data
# print(price_data)
self.updateModelInstanceData(event.price, price_data)
# event.status = detailed_event_form.cleaned_data["status"]
event.save()
if detailed_event_form.is_valid() and request.POST.get("submit") == None:
detailed_event_data = detailed_event_form.cleaned_data
event.status = detailed_event_form.cleaned_data["status"]
# print(detailed_event_form.cleaned_data["status"])
event.save()
# return render(request, "create_event.html", {})
return redirect("/crm/event/%s" % event_id)
class CreateTask(LoginRequiredView):
def get(self, request):
task_form = TaskForm()#initial={'date_time': datetime, 'duration': duration})
context = {}
context["task_form"] = task_form
return render(request, "create_task.html", context)
def post(self, request):
task_form = TaskForm(request.POST)
if task_form.is_valid():
task_data = task_form.cleaned_data
Task.objects.create(**task_data)
return redirect("/crm")
return HttpResponse(task_form.is_valid())
class TaskActions(LoginRequiredView):
def post(self, request):
if request.method == "POST" and request.is_ajax():
context = {}
task_id = request.POST.get("task_id")
action_flag = request.POST.get("action_flag")
print(action_flag)
if action_flag == "done":
event = get_object_or_404(Task, pk=task_id)
event.done = True
event.save()
context["task_list"] = Task.objects.all()
return render(request, "crm_main/task_list_ajax.html", context)
class CreateEvent(LoginRequiredView):
def get(self, request):
# feadback = get_object_or_404(Feadback, pk=feadback_id)
datetime = request.GET.get("datetime")
duration = time(1, 0)
feadback_form = FeadbackForm(initial={})
event_form = EventForm(initial={'date_time': datetime, 'duration': duration})
price_form = PriceForm(initial={'discount': 0})
context = {}
context["feadback_form"] = feadback_form
context["event_form"] = event_form
context["price_form"] = price_form
return render(request, "create_event.html", context)
def post(self, request):
feadback_form = FeadbackForm(request.POST)
event_form = EventForm(request.POST)
price_form = PriceForm(request.POST)
print(price_form)
if event_form.is_valid() and feadback_form.is_valid():
feadback_data = feadback_form.cleaned_data
price_data = price_form.cleaned_data
client_data = feadback_form.cleaned_data.copy()
del client_data['wish']
client_obj, created = Client.objects.get_or_create(tel=client_data['tel'], defaults=client_data)
feadback_data["has_event"] = True
event_data = event_form.cleaned_data
feadback = Feadback.objects.create(**feadback_data)
price = Price.objects.create(**price_data)
event_data["feadback"] = feadback
event_data["price"] = price
event_data["client"] = client_obj
q1 = Q(date_time__gte=event_data["date_time"])
q2 = Q(date_time__lt=(event_data["date_time"] + timedelta(hours=event_data["duration"].hour, minutes=event_data["duration"].minute)))
if Event.objects.filter( q1 & q2).count() < 1:
event = Event.objects.create(**event_data)
return redirect('/crm/')
# event_update_obj, event_create_bool = Event.objects.update_or_create(feadback=feadback, defaults=event_data)
# feadback.has_event = True
# feadback.save()
# if event_create_bool:
# event_form["feadback"].client.services.add(event_update_obj.detailed_service)
# else:
# event_form["feadback"].client.services.
else:
return HttpResponse('недостаточно времени')
return HttpResponse('feadback_form {} event_form {}'%(event_form.is_valid(), feadback_form.is_valid()))
class TransferEvent(CrmMain):
def get(self, request, event_id):
context = {"event_id": event_id}
week_periods = Week(event_list=EventList.eventsOnweek())
context["week_periods"] = week_periods
return render(request, "transfer_event_calendar.html", context)
class DeleteEvent(LoginRequiredView):
def get(self, request, event_id):
event = get_object_or_404(Event, pk=event_id)
context = {"event": event}
return render(request, "delete_event.html", context)
def post(self, request, event_id):
event = get_object_or_404(Event, pk=event_id)
if request.POST.get("submit") == "yes":
event.delete()
return redirect('/crm/')
elif request.POST.get("submit") == "no":
return redirect('/crm/event/%s/'%event_id)
# data = {}
# for f in Event._meta.get_fields():
# if not f.auto_created:
# data[f.name] = getattr(event, f.name)
# # print(type(data["feadback"]))
# # return HttpResponse(type(data["feadback"]))
# # CanceledEvent.objects.create()
# # obj, created = CanceledEvent.objects.update_or_create(defaults=data)
# event.delete()
# # return HttpResponse(obj)
return redirect('/crm/')
class DeleteResult(LoginRequiredView):
def get(self, request, event_id, result_id):
result = get_object_or_404(Result, pk=result_id)
context = {"result": result}
return render(request, "delete_result.html", context)
def post(self, request, event_id, result_id):
result = get_object_or_404(Result, pk=result_id)
if request.POST.get("submit") == "yes":
result.delete()
return redirect('/crm/event/%s/'%event_id)
def transferEvent(request, event_id):
datetime = request.GET.get("datetime")
event = get_object_or_404(Event, pk=event_id)
data = {"date_time" : datetime}
obj, created = Event.objects.update_or_create(pk=event_id, defaults=data)
return redirect('/crm/')
def searchFeadback(request):
pass
def feadbackBar(request):
if request.method== "POST" and request.is_ajax():
context = {}
if request.POST.get("filter_type") == "all":
feadback_list_inwork = Feadback.objects.filter(has_event=False).order_by('date')
feadback_list_done = Feadback.objects.filter(has_event=True).order_by('-date')
feadback_list = list(feadback_list_inwork)+list(feadback_list_done)
context["feadback_list"] = feadback_list
return render(request, "crm_main/feadback_list_ajax.html", context)
elif request.POST.get("filter_type") == "to_work":
feadback_list = Feadback.objects.filter(has_event=False).order_by('date')
context["feadback_list"] = feadback_list
return render(request, "crm_main/feadback_list_ajax.html", context)
elif request.POST.get("filter_type") == "processed":
feadback_list = Feadback.objects.filter(has_event=True).order_by('-date')
context["feadback_list"] = feadback_list
return render(request, "crm_main/feadback_list_ajax.html", context)
def taskBar(request):
if request.method == "POST" and request.is_ajax():
context = {}
all_tasks = Task.objects.all()
print("all tasks")
print(all_tasks)
if request.POST.get("filter_type") == "all":
task_list_inwork = all_tasks.filter(done=False)
print(task_list_inwork)
if task_list_inwork:
task_list_inwork = task_list_inwork.order_by('date')
else:
task_list_inwork = []
task_list_done = all_tasks.filter(done=True)
print(task_list_done)
if task_list_done:
task_list_done = task_list_done.order_by('-date')
else:
task_list_done = []
task_list = list(task_list_inwork) + list(task_list_done)
print(task_list)
context["task_list"] = task_list
return render(request, "crm_main/task_list_ajax.html", context)
elif request.POST.get("filter_type") == "on_day":
task_list_inwork = QueryByPeriod.queryOnday(all_tasks.filter(done=False))
print(task_list_inwork)
if task_list_inwork:
task_list_inwork = task_list_inwork.order_by('date')
else:
task_list_inwork = []
task_list_done = QueryByPeriod.queryOnday(all_tasks.filter(done=True))
print(task_list_done)
if task_list_done:
task_list_done = task_list_done.order_by('-date')
else:
task_list_done = []
task_list = list(task_list_inwork) + list(task_list_done)
context["task_list"] = task_list
return render(request, "crm_main/task_list_ajax.html", context)
elif request.POST.get("filter_type") == "on_week":
task_list_inwork = QueryByPeriod.queryOnweek(all_tasks.filter(done=False))
print(task_list_inwork)
if task_list_inwork:
task_list_inwork = task_list_inwork.order_by('date')
else:
task_list_inwork = []
task_list_done = QueryByPeriod.queryOnweek(all_tasks.filter(done=True))
print(task_list_done)
if task_list_done:
task_list_done = task_list_done.order_by('-date')
else:
task_list_done = []
task_list = list(task_list_inwork) + list(task_list_done)
context["task_list"] = task_list
return render(request, "crm_main/task_list_ajax.html", context)
elif request.POST.get("filter_type") == "on_month":
task_list_inwork = QueryByPeriod.queryOnmonth(all_tasks.filter(done=False))
print(task_list_inwork)
if task_list_inwork:
task_list_inwork = task_list_inwork.order_by('date')
else:
task_list_inwork = []
task_list_done = QueryByPeriod.queryOnmonth(all_tasks.filter(done=True))
print(task_list_done)
if task_list_done:
task_list_done = task_list_done.order_by('-date')
else:
task_list_done = []
task_list = list(task_list_inwork) + list(task_list_done)
context["task_list"] = task_list
return render(request, "crm_main/task_list_ajax.html", context)
def changeWeekCalendar(request):
context = {}
if request.method== "POST" and request.is_ajax():
if request.POST.get("go_to_week") == "prev":
print(request.POST.get("start_date"))
start_date = request.POST.get("start_date")
start_date = start_date.split("-")
min_datetime = datetime(int(start_date[0]), int(start_date[1]), int(start_date[2]))-timedelta(days=7)
week_periods = Week(event_list=EventList.eventsOnweek(min_datetime), date_time=min_datetime)
context["week_periods"] = week_periods
print(week_periods)
if request.POST.get("transfer_event"):
context["event_id"] = request.POST.get("event_id")
return render(request, "transfer_event_week_calendar.html", context)
return render(request, "crm_calendar/week_calendar.html", context)
elif request.POST.get("go_to_week") == "next":
print(request.POST.get("start_date"))
start_date = request.POST.get("start_date")
start_date = start_date.split("-")
min_datetime = datetime(int(start_date[0]), int(start_date[1]), int(start_date[2]))+timedelta(days=7)
week_periods = Week(event_list=EventList.eventsOnweek(min_datetime), date_time=min_datetime)
context["week_periods"] = week_periods
print(week_periods)
# print(request.POST.get("transfer_event"))
if request.POST.get("transfer_event"):
context["event_id"] = request.POST.get("event_id")
return render(request, "transfer_event_week_calendar.html", context)
return render(request, "crm_calendar/week_calendar.html", context)
def timeView(request):
min_datetime = datetime.date(datetime.now(timezone.utc)-timedelta(days=datetime.weekday(datetime.now(timezone.utc))))
max_datetime = min_datetime+timedelta(days=7)
# return HttpResponse(max_d)
return HttpResponse(datetime.combine(min_datetime, datetime.min.time()))
def updateTotalPaid(request):
clients = Client.objects.all()
pays = Pay.objects.all()
for client in clients:
client_pays = pays.filter(client=client)
client.total_paid =sum((pay.pay for pay in client_pays))
client.save()
return redirect("/crm/")
|
Last edited by magnus on Tue Jul 06, 2010 12:50 am, edited 1 time in total.
I'm surprised there would be fanfics at all this early.
Panchocheesecake wrote: I'm surprised there would be fanfics at all this early.
Made it a sticky for now. If there's no interest, I'll unsticky it.
Location: Right behind you. All of you! At the same time!
Hmmm, I'd take a whack at it, but my forte is doing darkfics. I don't think you guys want that.
The strip's been running for a year, and fanfic has been spawned by as little as a non-character being named in a roll-call.
Probably the only thing keeping it in check is the personal presence of the creators, and their lack of active consent to it.
actually i got permision from giz to do so in her ask giz annoying questions thread.
he looked down and adjusted his cat-eyes to see a young man whos entire body was covered in a shroud of moving shadow.
"sorry but i prefer the dark"
"call me kade. kade white--"
"Whiteclaw, i know, im usually ethereal when the lights are on so i dont really talk much, it scares people."
" My names Marcus, im a Shadowspectre."
"a ghost consumed by darkness"
Actually....how about we do a storyfriday kinda thing that they do on twitter, just in a forum way. We write one line after the other, in around 150 characters, taking turns and continuing the story as it flows.
Panchocheesecake wrote: Actually....how about we do a storyfriday kinda thing that they do on twitter, just in a forum way. We write one line after the other, in around 150 characters, taking turns and continuing the story as it flows.
Don Alexander wrote: If we had a "Post of the day" prize, you'd get it!!
|
# -*- coding: utf-8
# pylint: disable=line-too-long
"""
Classes to classify genes based on coverages across metagenomes.
anvi-mcg-classifier is the default client using this module
"""
import os
import anvio
import numpy as np
import pandas as pd
import matplotlib
# TODO: according to the warning, this call to set the back-hand is meaningless
# I need to experiment to see what happens if I delete it.
matplotlib.use('pdf')
import anvio.utils as utils
import matplotlib.pyplot as plt
import anvio.terminal as terminal
import anvio.filesnpaths as filesnpaths
from scipy import odr as odr
from anvio.mcgops import MCGPlots
from anvio.errors import ConfigError, FilesNPathsError
from matplotlib.backends.backend_pdf import PdfPages
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "Alon Shaiber"
__email__ = "alon.shaiber@gmail.com"
run = terminal.Run()
progress = terminal.Progress()
pp = terminal.pretty_print
columns_for_samples_coverage_stats_dict = ['non_outlier_mean_coverage', 'non_outlier_coverage_std']
class MetagenomeCentricGeneClassifier:
def __init__(self, args, run=run, progress=progress):
self.run = run
self.progress = progress
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
self.output_file_prefix = A('output_file_prefix')
self.alpha = A('alpha')
self.collection_name = A('collection_name')
self.bin_id = A('bin_id')
self.bin_ids_file_path = A('bin_ids_file')
self.exclude_samples = A('exclude_samples')
self.include_samples = A('include_samples')
self.outliers_threshold = A('outliers_threshold')
self.zeros_are_outliers = A('zeros_are_outliers')
self.gen_figures = A('gen_figures')
self.overwrite_output_destinations = A('overwrite_output_destinations')
self.split_coverage_values_per_nt_dict = None
self.gene_level_coverage_stats_dict = None
self.gene_level_coverage_stats_dict_of_dataframes = None
self.profile_db = {}
self.coverage_values_per_nt = None
self.gene_coverages = {}
self.gene_detections = None
self.samples = None
self.positive_samples = []
self.number_of_positive_samples = None
self.negative_samples = {}
self.number_of_negative_samples = None
self.gene_class_df = {}
self.samples_detection_information = {}
self.gene_presence_absence_in_samples_initiated = False
self.gene_presence_absence_in_samples = None
self.additional_description = ''
self.total_length = None
self.samples_coverage_stats_dicts_was_initiated = False
self.samples_coverage_stats_dicts = {}
self.non_outlier_indices = {}
self.gene_coverage_consistency_dict = {}
self.gene_coverage_consistency_dict_initiated = False
self.samples_to_exclude = set([])
self.samples_to_include = set([])
self.write_output_to_files = None
if self.exclude_samples:
# check that there is a file like this
filesnpaths.is_file_exists(self.exclude_samples)
self.samples_to_exclude = set([l.split('\t')[0].strip() for l in open(self.exclude_samples, 'rU').readlines()])
if not self.samples_to_exclude:
raise ConfigError("You asked to exclude samples, but provided an empty list.")
run.info('Excluding Samples', 'The following samples will be excluded: %s' % self.samples_to_exclude,)
if self.include_samples:
# check that there is a file like this
filesnpaths.is_file_exists(self.include_samples)
self.samples_to_include = set([l.split('\t')[0].strip() for l in open(self.include_samples, 'rU').readlines()])
if not self.samples_to_include:
raise ConfigError("You provided an empty list of samples to include.")
run.info('Including Samples', 'The following samples will be included: %s' % self.samples_to_include,)
# run sanity check on all input arguments
self.sanity_check()
def init(self, gene_level_coverage_stats_dict=None, split_coverage_values_per_nt_dict=None, additional_description=None):
""" setting the dictionaries for gene coverage stats and for split coverage per nucleotide"""
if gene_level_coverage_stats_dict is None and split_coverage_values_per_nt_dict is None:
raise ConfigError("MCGC needs at least one of the following in order to work: "
"gene_level_coverage_stats_dict or/and split_coverage_values_per_nt_dict")
# We want to make sure these are empty in case we use "init" multiple times for different bins
self.coverage_values_per_nt = None
self.gene_class_df = {}
self.samples_detection_information = {}
self.gene_presence_absence_in_samples_initiated = False
self.gene_presence_absence_in_samples = None
self.samples_coverage_stats_dicts_was_initiated = False
self.samples_coverage_stats_dicts = {}
self.non_outlier_indices = {}
self.gene_coverage_consistency_dict = {}
self.gene_coverage_consistency_dict_initiated = False
self.gene_level_coverage_stats_dict = gene_level_coverage_stats_dict
self.split_coverage_values_per_nt_dict = split_coverage_values_per_nt_dict
if additional_description:
self.additional_description = '-' + additional_description
try:
samples = next(iter(self.gene_level_coverage_stats_dict.values())).keys()
except:
samples = next(iter(self.split_coverage_values_per_nt_dict.values())).keys()
self.init_samples(samples)
def sanity_check(self):
"""Basic sanity check for class inputs"""
if self.output_file_prefix:
filesnpaths.is_output_file_writable(self.output_file_prefix + '-additional-layers.txt', ok_if_exists=self.overwrite_output_destinations)
try:
if self.gen_figures:
plot_dir = self.output_file_prefix + '-nucleotide-coverage-distribution-plots'
os.makedirs(plot_dir, exist_ok=self.overwrite_output_destinations)
except FileExistsError as e:
raise FilesNPathsError("%s already exists, if you would like to overwrite it, then use -W (see help menu)." % plot_dir)
# checking alpha
if not isinstance(self.alpha, float):
raise ConfigError("alpha value must be a type float.")
# alpha must be a min of 0 and smaller than 0.5
if self.alpha < 0 or self.alpha >= 0.5:
raise ConfigError("alpha must be a minimum of 0 and smaller than 0.5")
if self.exclude_samples and self.include_samples:
raise ConfigError("You cannot use both --include-samples and --exclude-samples! Please choose one.")
def init_samples(self, samples_list):
""" Create the set of samples according to user input and store it in self.samples"""
# remove the samples that should be excluded
samples = set(samples_list) - self.samples_to_exclude
if self.include_samples:
samples_to_include_that_are_not_there = self.samples_to_include - samples
if samples_to_include_that_are_not_there:
raise ConfigError("You requested to include some samples that are not in the profile database. Here are the samples in the profile database: %s. "
"And here are the samples you requested, and that are not there: %s" % (samples, samples_to_include_that_are_not_there))
samples = self.samples_to_include
self.samples = samples
def init_gene_level_coverage_stats_dict_of_dataframes(self):
""" converts the dictionaries of gene_level_coverage_stats_dict to dataframes"""
self.gene_level_coverage_stats_dict_of_dataframes = {}
for key in ['mean_coverage', 'detection', 'non_outlier_mean_coverage', 'non_outlier_coverage_std']:
# Only include samples that the user want
gene_stat = utils.get_values_of_gene_level_coverage_stats_as_dict(self.gene_level_coverage_stats_dict, key, as_pandas=True, samples_of_interest=self.samples)
self.gene_level_coverage_stats_dict_of_dataframes[key] = gene_stat
for key in ['gene_coverage_values_per_nt', 'non_outlier_positions']:
gene_stat = utils.get_values_of_gene_level_coverage_stats_as_dict(self.gene_level_coverage_stats_dict, key, as_pandas=False, samples_of_interest=self.samples)
self.gene_level_coverage_stats_dict_of_dataframes[key] = gene_stat
def init_samples_coverage_stats_dict(self):
""" populate the samples_coverage_stats_dict, and determine positive, negative, and ambiguous samples with the genome detection information
(--alpha, --genome-detection-uncertainty)
The samples_coverage_stats_dict dataframe is used to calculate the gene consistency information.
It is also used for plotting purposes (both for the nucleotide-coverage-distribution plots and the gene-consistency plots).
The coverage_values_per_nt is used to calculate the detection value (portion of nucleotides
covered) for a sample. Then, a cutoff for detection values is used to determine the presence
or absence of the genome in each sample.
"""
if self.coverage_values_per_nt is None:
self.coverage_values_per_nt = get_coverage_values_per_nucleotide(self.split_coverage_values_per_nt_dict, samples=self.samples)
total_length = len(next(iter(self.coverage_values_per_nt.values())))
MCG_samples_information_table_structure = ['samples', 'presence', 'detection', 'number_of_taxon_specific_core_detected']
# create an empty dataframe
samples_information = pd.DataFrame(index=self.samples, columns=MCG_samples_information_table_structure[1:])
positive_samples = []
negative_samples = []
self.progress.new("Finding nucleotide positions in samples with outlier coverage values")
progress.update('...')
num_samples, counter = len(self.samples), 1
detection = {}
total_length = len(next(iter(self.coverage_values_per_nt.values())))
self.samples_coverage_stats_dicts = pd.DataFrame(index=self.samples, columns=columns_for_samples_coverage_stats_dict)
for sample in self.samples:
if num_samples > 100 and counter % 100 == 0:
self.progress.update('%d of %d samples...' % (counter, num_samples))
# get the non-outlier information
non_outlier_indices, self.samples_coverage_stats_dicts.loc[sample,] = get_non_outliers_information(self.coverage_values_per_nt[sample], MAD_threshold=self.outliers_threshold, zeros_are_outliers=self.zeros_are_outliers)
self.non_outlier_indices[sample] = non_outlier_indices
number_of_non_outliers = len(self.non_outlier_indices[sample])
if anvio.DEBUG:
self.run.info_single('The mean and std of non-outliers in sample %s are: %s, %s respectively' % (sample, self.samples_coverage_stats_dicts['non_outlier_mean_coverage'][sample], self.samples_coverage_stats_dicts['non_outlier_coverage_std'][sample]))
self.run.info_single('The number of non-outliers is %s of %s (%.2f%%)' % (number_of_non_outliers, total_length, 100.0 * number_of_non_outliers / total_length))
detection[sample] = np.count_nonzero(self.coverage_values_per_nt[sample]) / total_length
samples_information['presence'][sample] = get_presence_absence_information(number_of_non_outliers/total_length, self.alpha)
if detection[sample] <= 0.5:
samples_information['presence'][sample] = False
if samples_information['presence'][sample]:
positive_samples.append(sample)
elif samples_information['presence'][sample] == False:
negative_samples.append(sample)
samples_information['detection'][sample] = detection[sample]
counter += 1
self.positive_samples = positive_samples
self.number_of_positive_samples = len(self.positive_samples)
self.negative_samples = negative_samples
self.samples_detection_information = samples_information
self.run.warning('The number of positive samples is %s' % self.number_of_positive_samples)
self.run.warning('The number of negative samples is %s' % len(self.negative_samples))
self.samples_coverage_stats_dicts_was_initiated = True
self.progress.end()
def plot_nucleotide_coverage_distribution(self):
""" Creates a pdf file with the following plots for each sample the sorted nucleotide coverages \
(with the outliers in red and non-outliers in blue), and a histogram of coverages for the non-outliers"""
# Creating a dircetory for the plots. If running on bins, each bin would be in a separate sub-directory
if not self.samples_coverage_stats_dicts_was_initiated:
self.init_samples_coverage_stats_dict()
plot_dir = self.output_file_prefix + '-nucleotide-coverage-distribution-plots' + '/'
self.progress.new('Saving figures of taxon specific distributions to pdf')
progress.update('...')
number_of_fininshed = 0
for sample in self.positive_samples:
coverages_pdf_output = plot_dir + sample + self.additional_description + '-coverages.pdf'
pdf_output_file = PdfPages(coverages_pdf_output)
v = self.coverage_values_per_nt[sample]
# Using argsort so we can use the non_oulier indices
sorting_indices = np.argsort(v)
# we would need the reverse of the sorting of the indices to create the x axis for the non-outliers
reverse_sorted_indices = np.zeros(len(sorting_indices))
reverse_sorted_indices[sorting_indices] = range(len(reverse_sorted_indices))
# plotting the ordered coverage values (per nucleotide)
# the non-outliers are plotted in blue
# the outlier values are plotted in red
fig = plt.figure()
ax = fig.add_subplot(111, rasterized=True)
ax.set_xlabel = 'Nucleotide Number (ordered)'
ax.set_ylabel = r'$Nucleotide Coverage^2$'
x1 = range(len(v)) # FIXME: this shouldn't be in the loop (only here because I need to fix the mock data)
x2 = reverse_sorted_indices[self.non_outlier_indices[sample]]
#y2 = v[self.non_outlier_indices[sample]]
# plot all in red
ax.semilogy(x1,v[sorting_indices],'r.', rasterized=True)
# plot on top the non-outliers in blue
ax.semilogy(x2,v[self.non_outlier_indices[sample]],'b.', rasterized=True)
fig.suptitle("%s - sorted coverage values with outliers" % sample)
plt.savefig(pdf_output_file, format='pdf')
plt.close()
# plotting a histogram of the non-outliers
# This would allow to see if they resemble a normal distribution
hist_range = (min(v[self.non_outlier_indices[sample]]),max(v[self.non_outlier_indices[sample]]))
# computing the number of bins so that the width of a bin is ~1/4 of the standard deviation
# FIXME: need to make it so the bins are only of integers (so the smallest bin is of width 1
# and that bins are integers)
number_of_hist_bins = np.ceil((hist_range[1] - hist_range[0]) / (self.samples_coverage_stats_dicts['non_outlier_coverage_std'][sample]/4)).astype(int) # setting the histogram bins to be of the width of a quarter of std
fig = plt.figure()
ax = fig.add_subplot(111, rasterized=True)
ax.set_xlabel = 'Coverage'
ax.hist(v[self.non_outlier_indices[sample]], number_of_hist_bins,hist_range, rasterized=True)
fig.suptitle("%s - histogram of non-outliers" % sample)
# adding the mean and std of the non-outliers as text to the plot
text_for_hist = u'$\mu = %d$\n $\sigma = %d$' %\
(self.samples_coverage_stats_dicts['non_outlier_mean_coverage'][sample],\
self.samples_coverage_stats_dicts['non_outlier_coverage_std'][sample])
ax.text(0.8, 0.9, text_for_hist, ha='center', va='center', transform=ax.transAxes)
plt.savefig(pdf_output_file, format='pdf')
plt.close()
# close the pdf file
pdf_output_file.close()
number_of_fininshed += 1
self.progress.update("Finished %d of %d" % (number_of_fininshed, self.number_of_positive_samples))
self.progress.end()
def init_gene_presence_absence_in_samples(self):
""" Determining presence and absence of genes in samples according to gene detection values."""
if not self.gene_level_coverage_stats_dict:
raise ConfigError("gene presence/absence in samples cannot be determined without a gene_level_coverage_stats_dict,\
but it seems that you don't have one. maybe you should run init()?")
if self.gene_level_coverage_stats_dict_of_dataframes is None:
self.init_gene_level_coverage_stats_dict_of_dataframes()
gene_callers_id = self.gene_level_coverage_stats_dict_of_dataframes['detection'].index
self.gene_presence_absence_in_samples = pd.DataFrame(index=gene_callers_id, columns=self.samples)
T = lambda x: get_presence_absence_information(sum(x)/len(x), self.alpha)
self.progress.new('Computing gene presence/absence in samples')
progress.update('...')
genes_above_outlier_threshold = pd.DataFrame.from_dict(self.gene_level_coverage_stats_dict_of_dataframes['non_outlier_positions'], orient='index').applymap(T)
genes_with_detection_above_half = self.gene_level_coverage_stats_dict_of_dataframes['detection'].applymap(lambda x: x > 0.5)
self.gene_presence_absence_in_samples = genes_above_outlier_threshold & genes_with_detection_above_half
self.gene_presence_absence_in_samples_initiated = True
self.progress.end()
def init_gene_coverage_consistency_information(self):
""" Perform orthogonal distance regression for each gene to determine coverage consistency.
The question that we are trying to ask is:
Do the non-outlier nt coverage of the gene in samlpes correlates to the non-outlier
nt coverage of the genome in samples?
The regression is performed only for positive samples.
For each gene, the regression is performed only according to samples in which
the gene is present (according to the detection critrea).
"""
if not self.samples_coverage_stats_dicts_was_initiated:
self.init_samples_coverage_stats_dict()
if not self.gene_presence_absence_in_samples_initiated:
self.init_gene_presence_absence_in_samples()
self.progress.new("Computing coverage consistency for all genes.")
progress.update('...')
gene_ids = self.gene_level_coverage_stats_dict_of_dataframes['mean_coverage'].index
num_genes, counter = len(gene_ids), 1
for gene_id in gene_ids:
if num_genes > 100 and counter % 100 == 0:
self.progress.update('%d of %d genes...' % (counter, num_genes))
# samples in which the gene is present
_samples = self.gene_presence_absence_in_samples.loc[gene_id,self.gene_presence_absence_in_samples.loc[gene_id,]==True].index
# mean and std of non-outlier nt in each sample
x = list(self.samples_coverage_stats_dicts.loc[_samples,'non_outlier_mean_coverage'].values)
if "non_outlier_coverage_std" in self.samples_coverage_stats_dicts:
# we only expect to have the sample coverage std in "full" mode
std_x = list(self.samples_coverage_stats_dicts.loc[_samples,'non_outlier_coverage_std'].values)
else:
std_x = None
if len(_samples) > 1:
# mean and std of non-outlier nt in the gene (in each sample)
y = self.gene_level_coverage_stats_dict_of_dataframes['non_outlier_mean_coverage'].loc[gene_id, _samples].values
std_y = self.gene_level_coverage_stats_dict_of_dataframes['non_outlier_coverage_std'].loc[gene_id, _samples].values
# performing the regression using ODR
_data = odr.RealData(x, y, std_x, std_y)
_model = lambda B, c: B[0] * c
_odr = odr.ODR(_data, odr.Model(_model), beta0=[3])
odr_output = _odr.run()
# store results
self.gene_coverage_consistency_dict[gene_id] = {}
self.gene_coverage_consistency_dict[gene_id]['slope'] = odr_output.beta[0]
self.gene_coverage_consistency_dict[gene_id]['slope_std'] = odr_output.sd_beta[0]
self.gene_coverage_consistency_dict[gene_id]['slope_precision'] = odr_output.sd_beta[0] / odr_output.beta[0]
# compute R squered
f = lambda b: lambda _x: b*_x
R_squered = 1 - sum((np.apply_along_axis(f(odr_output.beta[0]),0,x)-y)**2) / sum((y-np.mean(y))**2)
# Check if converged
self.gene_coverage_consistency_dict[gene_id]['R_squered'] = R_squered
if odr_output.stopreason[0] == 'Sum of squares convergence':
self.gene_coverage_consistency_dict[gene_id]['converged'] = True
else:
self.gene_coverage_consistency_dict[gene_id]['converged'] = False
self.gene_coverage_consistency_dict_initiated = True
self.progress.end()
def get_gene_specificity(self, gene_id):
""" return True for gene if it occurs in positive samples and doesn't occur in negative samples.
Ambiguous occurences are not counted as anything. This means that if a gene is ambiguously
occuring in a negative sample it could still be counted as "specific". It also means that
if a gene is only ambiguously occuring in positive samples then it would be considered
as "non-specific".
"""
if self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] > 1 and self.gene_class_df.loc[gene_id, 'occurence_in_negative_samples'] == 0:
return True
else:
return False
# TODO: if there are no occurences of the gene at all, then we should maybe return None instead of False
def get_gene_coverage_consistency(self, gene_id):
""" return true if the gene's coverage is consistent accross positive samples, False otherwise."""
# TODO: make sure coverage_consistency_dict has been initiated
if self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] == 0:
# if the gene doesn't occur in positive samlpes then there is no classification
return None
elif self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] == 1:
# if the gene occurs only in one positive sample then return True.
# XXX: we might prefer to return None, we should consider this in the future.
return True
elif self.gene_coverage_consistency_dict[gene_id]['converged']:
# FIXME: this is where we use an arbitrary threshold again :-(
# if the slope precision is smaller than the threshold then the regression
# fit is considered accurate enough and the gene coverage is considered consistent.
return self.gene_coverage_consistency_dict[gene_id]['slope_precision'] < 0.5
else:
# The regression didn't converege so the coverage is probably not consistent.
return False
def determine_if_gene_is_core(self, gene_id, gene_specificity):
""" return True for core gene, False for accessory gene
If the gene is specific to positive samples, then core would be considered if it
occurs in all positive samples. Otherwise it would be considered core if it
occurs in all positive AND all negative samples.
Ambiguous occurences of a gene are not considered (i.e. they are the same as absence).
"""
if gene_specificity:
# return True if the the gene occurs in all positive samples.
return self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] == len(self.positive_samples)
else:
# return True if the gene occurs in all positive AND all negative samples
return self.gene_class_df.loc[gene_id, 'occurence_in_positive_and_negative_samples'] == len(self.positive_samples) + len(self.negative_samples)
def init_gene_class_df(self):
""" generate dictionary with the class information per gene.
This dictionary could be later use to produce an additional-layer
text file for vizualization.
"""
# TODO: make sure gene presence absence was calculated
if not self.gene_coverage_consistency_dict_initiated:
self.init_gene_coverage_consistency_information()
# XXX: only negative and positive samples are used here
# ambiguous samples are ignored as if they were never
# there. This is not ideal, but is easy to do.
gene_ids = self.gene_level_coverage_stats_dict_of_dataframes['mean_coverage'].index
self.gene_class_df = pd.DataFrame(index=gene_ids)
for gene_id in gene_ids:
# determine the number of occurences in positive samples
self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] = len([s for s in self.positive_samples if self.gene_presence_absence_in_samples.loc[gene_id,s] == True])
# determine the number of occurences in negative samples
self.gene_class_df.loc[gene_id, 'occurence_in_negative_samples'] = len([s for s in self.negative_samples if self.gene_presence_absence_in_samples.loc[gene_id,s] == True])
# set the occurence_in_positive_and_negative_samples
self.gene_class_df.loc[gene_id, 'occurence_in_positive_and_negative_samples'] = self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] + self.gene_class_df.loc[gene_id, 'occurence_in_negative_samples']
gene_specificity = self.get_gene_specificity(gene_id)
gene_coverage_consistency = self.get_gene_coverage_consistency(gene_id)
# determine core accessory
gene_is_core = self.determine_if_gene_is_core(gene_id, gene_specificity)
self.gene_class_df.loc[gene_id, 'specificity'] = gene_specificity
self.gene_class_df.loc[gene_id, 'coverage_consistency'] =gene_coverage_consistency
self.gene_class_df.loc[gene_id, 'core'] = gene_is_core
self.gene_class_df.loc[gene_id, 'MCG_class'] = get_class_string(gene_specificity, gene_coverage_consistency, gene_is_core)
def update_samples_information_from_gene_class_df(self):
# after running classification we sum up some information regarding
# the results of the classifier per sample
for sample in self.samples_detection_information:
TSC = [g for g in self.gene_class_df.index if (self.gene_class_df.loc[g,'coverage_consistency'] and \
self.gene_class_df.loc[g,'core'])]
self.samples_detection_information['number_of_taxon_specific_core_detected'] = len(TSC)
def gen_gene_consistency_plots(self):
""" generate and save the gene consistency plots for each gene."""
if not self.gene_coverage_consistency_dict_initiated:
self.init_gene_coverage_consistency_information()
gene_ids = self.gene_level_coverage_stats_dict_of_dataframes['mean_coverage'].index
num_genes, counter = len(gene_ids), 1
progress.new('Plotting gene consistency information')
progress.update('...')
for gene_id in gene_ids:
if num_genes > 100 and counter % 100 == 0:
self.progress.update('%d of %d genes...' % (counter, num_genes))
p = MCGPlots(self, gene_id, run=run, progress=progress)
p.plot()
progress.end()
def save_gene_class_information_in_additional_layers(self):
output_file_path = self.output_file_prefix + self.additional_description + '-additional-layers.txt'
self.gene_class_df.to_csv(output_file_path, sep='\t', index_label='gene_callers_id')
def save_samples_information(self):
samples_information_file_name = self.output_file_prefix + self.additional_description + '-samples-information.txt'
samples_information = pd.concat([self.samples_detection_information, self.samples_coverage_stats_dicts], axis=1, sort=True)
samples_information.to_csv(samples_information_file_name, sep='\t', index_label='samples')
def classify(self):
self.init_gene_class_df()
self.update_samples_information_from_gene_class_df()
if self.write_output_to_files:
self.save_gene_class_information_in_additional_layers()
self.save_samples_information()
if self.gen_figures:
# Create the plots for nucleotide-level coverage data per sample.
self.plot_nucleotide_coverage_distribution()
# generate plots for coverage consistency information for each gene.
self.gen_gene_consistency_plots()
def get_coverage_values_per_nucleotide(split_coverage_values_per_nt_dict, samples=None):
""" Helper function that accepts a split_coverage_values_per_nt_dict and returns a dictionary with
samples as keys and the concatenated coverage values for all splits as one array
"""
if not split_coverage_values_per_nt_dict:
raise ConfigError("You did not provide a split_coverage_values_per_nt_dict, and we need it...")
progress.new('Merging coverage values accross splits')
progress.update('...')
d = {}
if samples is None:
samples = next(iter(split_coverage_values_per_nt_dict.values())).keys()
number_of_samples = len(samples)
number_of_finished = 0
# find the combined legnth of all contigs first
total_length = 0
for split in split_coverage_values_per_nt_dict:
total_length += len(split_coverage_values_per_nt_dict[split][next(iter(samples))])
for sample in samples:
# create an array of zero with the total length
# this is much faster than appending the vectors of splits
d[sample] = np.zeros(total_length)
pos = 0
for split in split_coverage_values_per_nt_dict:
split_values = split_coverage_values_per_nt_dict[split][sample]
split_len = len(split_values)
d[sample][pos:pos+split_len] = split_values
pos += split_len
#d[sample] = np.array(d[sample])
number_of_finished += 1
progress.update("Finished sample %d of %d" % (number_of_finished,number_of_samples))
progress.end()
return d
def get_non_outliers_information(v, MAD_threshold=2.5, zeros_are_outliers=False):
""" returns the non-outliers for the input pandas series using MAD"""
d = pd.Series(index=columns_for_samples_coverage_stats_dict)
outliers = utils.get_list_of_outliers(v, threshold=MAD_threshold, zeros_are_outliers=zeros_are_outliers)
non_outliers = np.logical_not(outliers)
non_outlier_indices = np.where(non_outliers)[0]
if not(len(non_outlier_indices)):
non_outlier_indices = np.array([])
d['non_outlier_mean_coverage'] = 0.0
d['non_outlier_coverage_std'] = 0.0
else:
d['non_outlier_mean_coverage'] = np.mean(v[non_outlier_indices])
d['non_outlier_coverage_std'] = np.std(v[non_outlier_indices])
return non_outlier_indices, d
# The order of the strings is very important since it is used in get_class_string
class_short_names = ['NNA', 'SNA', 'NCA',\
'SCA', 'NNC', 'SNC',\
'NCC', 'SCC']
class_long_names = ['Non-specific_Non-consistent_Accessory', 'Specific_Non-consistent_Accessory', 'Non-specific_Consistent_Accessory',\
'Specific_Consistent_Accessory', 'Non-specific_Non-consistent_Core', 'Specific_Non-consistent_Core',\
'Non-specific_Consistent_Core', 'Specific_Consistent_Core']
class_short_name_long_name_dict = dict(zip(class_short_names,class_long_names))
def get_class_long_name_from_short_name(short_name):
return class_short_name_long_name_dict[short_name]
def get_class_string(gene_specificity, gene_coverage_consistency, gene_is_core):
""" Takes the values of the three categories and returns a string to represent the class."""
value_list = [gene_specificity, gene_coverage_consistency, gene_is_core]
if None in value_list:
return 'NA'
# converting the list of booleans to a number
# this solution was takes from here: https://stackoverflow.com/a/4066807/7115450
index = sum(1<<i for i, b in enumerate(value_list) if b)
return class_short_names[index]
def get_presence_absence_information(number_of_non_outliers, alpha):
""" Helper function to determine presence/absence according to a threshold."""
##### WHAT WE SHOULD DO IN THE FUTURE #####
# Arbitrary cut-offs are terrible.
# If we assume there are no accessory genes (we will get back to this later),
# then if the gnomes is present, then we expect ALL of it to be present. Thus,
# if we had an unlimited number of reads, then we expect detection to be 1.
# as the number of reads gets smaller, the expected detection value is smaller.
# for a given genome size, a given read length, and the number of reads mapped to
# the genome, we can compute the following value: "what is the probability that
# the detection value will be greater than the actual detection value", if that
# probability is high, then that is a good sign that the genome is not present
# in the sample, and that any reads that we got are due to non-specific coverage.
# the same thing could be calculated for a given gene.
# we can create a measure for agreement between the mean coverage of a gene
# and the detection of the gene. It would simply be the probability that the
# coverage of the gene would exist with a detection that is higher than the
# actual detection of the gene. All we need for that is the read length,
# gene/genome length, and the expected genomic portion shared by two genomes that
# belong to the population in question.
if number_of_non_outliers >= 0.5 + alpha:
return True
elif np.sum(number_of_non_outliers) <= 0.5 - alpha:
return False
else:
return None
|
What does it mean that Article 282 was decriminalized, and which statutes will functionally replace it?
The amendments that partially decriminalized Article 282, Russia’s criminal statute on inciting hate and enmity, went into effect at the beginning of January 2019. Since then, sentences based on that law have gradually been canceled, and the government has stopped pursuing new charges. That process will probably take several more months, and my estimates show that it will affect around 2,000 people.
In a February 2019 meeting with officials in the Ministry of Internal Affairs, Putin said that the police’s job at this point is to “stop pulling criminal cases out of thin air and start focusing on preventing extremism.” That suggestion can be interpreted as follows: “Don’t focus on detective work and research; focus on warnings and suppression.” In other words, work more on groups that are a risk for the government: young people with critical views, Muslims, and so on.
The decriminalization of Article 282 wasn’t called partial for nothing. On one hand, the criminal aspect really was eliminated to some degree: now, inciting hate enmity or degrading someone’s dignity is not a crime when committed for the first time. On the other hand, those actions are still a legal offense. Article 282 wasn’t transferred in its entirety from the Criminal Code to the Administrative Code, and that means it can still be used to bring criminal charges. For example, its second section is still active in criminal law.
The consequences of that decriminalization will undoubtedly be felt by people who were targeted by criminal cases or who were one step away from being charged. But they’re not the only ones who will notice a change: decriminalization will also affect the distribution of responsibility among law enforcement agencies. Previously, cases on the incitement of hate or enmity were handled both by Center E (the Center for Combating Extremism) and the FSB. Decriminalization introduced a strict border between the two. Right now, a new division of labor is forming between the fight against extremism and the fight against terrorism, and a change is also taking place in the way government politics deals with those two phenomena. Government politics will focus from here on out on the fight against terrorism while leaving the fight against extremism behind somewhat.
Imagine something like Maslow’s hierarchy of needs, but with only two parts. On the bottom, you have the many administrative violations that Center E handles. People accused of participating in illegal protests, unwanted organizations, insulting government officials, early-stage extremism, publications of Nazi imagery or other forbidden symbols, or illegal missionary activity would fall into that part of the pyramid.
The FSB will answer for the upper part of the pyramid. That part includes fewer cases, but the crackdown on them is tougher, and the punishments for them are harsher, all the way up to a life sentence. Here, you’re talking about criminal violations: participating in extremist societies like Hizb ut-Tahrir or New Greatness, founding terrorist organizations like The Network or Artpodgotovka, and justifying terrorism.
The FSB will work primarily with two laws: Article 280 (on “Encouraging extremist activity”) and Article 205.2 (on “Encouraging terrorist activity or public justification of terrorism”). However, it is more difficult to prove encouragement than incitement of hate or enmity — that is a narrower legal area, and it requires particular linguistic constructions that appear much more rarely than, say, language that degrades the dignity of some national or religious group. In 2017, 571 people were convicted under Article 282, 170 under Article 280, and 96 under Article 205.2.
When Andrey Klishas’s bill on disrespecting the government is added to Russia’s Code of Administrative Violations, it won’t be considered extremism-related in the direct sense of the word because the bill itself doesn’t use the word “extremism.” However, Center E will be the one dealing with these cases, which means that the official political perspective will see this as extremism or something like it. I think that we’ll be seeing a wave of administrative cases against insulting “His Majesty” and his court, and they will have big fines attached. Even the fine for participating in an illegal protest will be three times less. It’s like they’re telling us that criticizing the authorities on the Web carries major societal dangers and will be punished three times as harshly as criticizing the government at a physical protest. In other words, online is three times as dangerous as offline.
But we have to remember that those cases will not reach a mass scale. That’s because, unlike cases like the administrative penalties for participating in protests, the facts that must be proven here are much more complex. You have to analyze the text and order a linguistic examination, and that means spending additional money and effort. There’s no way to put together an administrative case around a report from a coworker as officers do with illegal protests because the text in question will always be unique.
By the same token, cases involving “participation in an undesirable organization” could also be considered extremism-related. There had already been quite a lot of administrative cases under that statute, but it was recently used to raise a criminal case for the first time. I’m talking about Anastasia Shevchenko, who was charged when she became the leader of the civic organization Open Russia [which is associated with the opposition leader and former oil tycoon Mikhail Khodorkovsky].
The problem here is that criminal cases in the “undesirable organization” category are a formality. If someone faces two administrative cases under that statute in the course of one year, the case automatically becomes a criminal one.
There’s no legal complexity here, but there is a political decision to be made: so far, 99% of all the “undesirable organization” cases only involve Open Russia. Based on that fact, we can conclude that there is enough political will to squeeze Open Russia dry, and activists in that organization face a serious threat [of prosecution].
I don’t yet see a risk that people will be prosecuted en masse under criminal law for participating in undesirable organizations. After all, a series of administrative cases has to be brought forward first before being transferred to criminal law, and that’s a three-step process, so it deprives the government of the ability to use that statute against a broad, disparate group of people.
|
# -*- coding: utf-8 -*-
import os
from django.forms import Form
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.core.urlresolvers import NoReverseMatch
from django.db import transaction
from django.template.response import SimpleTemplateResponse
from django.utils.translation import get_language_from_request
try:
# This try/except block can be removed when we stop supporting Django 1.6
from django.contrib.formtools.wizard.views import SessionWizardView
except ImportError: # pragma: no cover
# This is fine from Django 1.7
from formtools.wizard.views import SessionWizardView
from cms.models import Page
from .wizard_pool import wizard_pool
from .forms import (
WizardStep1Form,
WizardStep2BaseForm,
step2_form_factory,
)
class WizardViewMixin(object):
language_code = None
@transaction.atomic()
def dispatch(self, request, *args, **kwargs):
self.language_code = get_language_from_request(request, check_path=True)
response = super(WizardViewMixin, self).dispatch(
request, *args, **kwargs)
return response
def get_form_kwargs(self):
kwargs = super(WizardViewMixin, self).get_form_kwargs()
kwargs.update({'wizard_language': self.language_code})
return kwargs
class WizardCreateView(WizardViewMixin, SessionWizardView):
template_name = 'cms/wizards/start.html'
file_storage = FileSystemStorage(
location=os.path.join(settings.MEDIA_ROOT, 'wizard_tmp_files'))
form_list = [
('0', WizardStep1Form),
# Form is used as a placeholder form.
# the real form will be loaded after step 0
('1', Form),
]
def get_current_step(self):
"""Returns the current step, if possible, else None."""
try:
return self.steps.current
except AttributeError:
return None
def is_first_step(self, step=None):
step = step or self.get_current_step()
return step == '0'
def is_second_step(self, step=None):
step = step or self.get_current_step()
return step == '1'
def get_context_data(self, **kwargs):
context = super(WizardCreateView, self).get_context_data(**kwargs)
if self.is_second_step():
context['wizard_entry'] = self.get_selected_entry()
return context
def get_form(self, step=None, data=None, files=None):
if step is None:
step = self.steps.current
# We need to grab the page from pre-validated data so that the wizard
# has it to prepare the list of valid entries.
if data:
page_key = "{0}-page".format(step)
self.page_pk = data.get(page_key, None)
else:
self.page_pk = None
if self.is_second_step(step):
self.form_list[step] = self.get_step_2_form(step, data, files)
return super(WizardCreateView, self).get_form(step, data, files)
def get_form_kwargs(self, step=None):
"""This is called by self.get_form()"""
kwargs = super(WizardCreateView, self).get_form_kwargs()
kwargs['wizard_user'] = self.request.user
if self.is_second_step(step):
kwargs['wizard_page'] = self.get_origin_page()
else:
page_pk = self.page_pk or self.request.GET.get('page', None)
if page_pk and page_pk != 'None':
kwargs['wizard_page'] = Page.objects.filter(pk=page_pk).first()
else:
kwargs['wizard_page'] = None
return kwargs
def get_form_initial(self, step):
"""This is called by self.get_form()"""
initial = super(WizardCreateView, self).get_form_initial(step)
if self.is_first_step(step):
initial['page'] = self.request.GET.get('page')
return initial
def get_step_2_form(self, step=None, data=None, files=None):
entry_form_class = self.get_selected_entry().form
step_2_base_form = self.get_step_2_base_form()
form = step2_form_factory(
mixin_cls=step_2_base_form,
entry_form_class=entry_form_class,
)
return form
def get_step_2_base_form(self):
"""
Returns the base form to be used for step 2.
This form is sub classed dynamically by the form defined per module.
"""
return WizardStep2BaseForm
def get_template_names(self):
if self.is_first_step():
template_name = self.template_name
else:
template_name = self.get_selected_entry().template_name
return template_name
def done(self, form_list, **kwargs):
"""
This step only runs if all forms are valid. Simply emits a simple
template that uses JS to redirect to the newly created object.
"""
form_two = list(form_list)[1]
instance = form_two.save()
url = self.get_success_url(instance)
if not url:
page = self.get_origin_page()
if page:
try:
url = page.get_absolute_url(self.language_code)
except NoReverseMatch:
url = '/'
else:
url = '/'
return SimpleTemplateResponse("cms/wizards/done.html", {"url": url})
def get_selected_entry(self):
data = self.get_cleaned_data_for_step('0')
return wizard_pool.get_entry(data['entry'])
def get_origin_page(self):
data = self.get_cleaned_data_for_step('0')
return data.get('page')
def get_success_url(self, instance):
entry = self.get_selected_entry()
success_url = entry.get_success_url(
obj=instance,
language=self.language_code
)
return success_url
|
Dave brings 38 years of financial leadership and experience to the management of Otak’s operations. His experience spans large, multinational corporations as well as high-growth, new venture companies in the U.S. and globally. He strives to improve operational efficiency for our clients while driving growth and strategic plan objectives in collaboration with all Otak staff.
|
import cPickle as pickle
import numpy
import glob, os, stat, time, datetime
import re
class TSPLException(Exception):
def __init__(self,arg):
self.value=arg
print self.value
class TSPLBase:
def __init__(self,file,k1,k2):
self.f=open(file)
self.j=pickle.load(self.f)
self.f.close()
try:
self.wayness=int(re.findall('\d+',self.j.acct['granted_pe'])[0])
except KeyError:
try:
self.wayness=self.j.acct['cores']/self.j.acct['nodes']
except ZeroDivisionError:
print "Read zero nodes, assuming 16 way job"
self.wayness=16
try:
self.owner=self.j.acct['owner']
except KeyError:
self.owner=self.j.acct['uid']
self.numhosts=len(self.j.hosts.keys())
if self.numhosts == 0:
raise TSPLException('No hosts')
elif 'amd64_core' in self.j.hosts.values()[0].stats:
self.pmc_type='amd64'
elif 'intel_pmc3' in self.j.hosts.values()[0].stats:
self.pmc_type='intel'
else:
raise TSPLException('No PMC data for: ' + self.j.id)
if self.pmc_type in k1:
self.k1=k1[self.pmc_type]
self.k2=k2[self.pmc_type]
else:
self.k1=k1
self.k2=k2
self.t=(self.j.times-self.j.times[0])
if len(k1) != len(k2):
raise TSPLException('Lengths don\'t match')
self.index=[ self.j.get_schema(self.k1[i])[self.k2[i]].index
for i in range(len(self.k1))]
g=self.j.hosts[self.j.hosts.keys()[0]]
self.size=len(g.stats[self.k1[0]].values()[0])
d=datetime.datetime.fromtimestamp(self.j.acct['end_time'])
self.end_date=d.strftime('%Y-%m-%d %H:%M:%S')
self.title='ID: %(ID)s, u: %(u)s, N: %(name)s, D: %(date)s, NH: %(nh)d' % \
{ 'ID' : self.j.id,'u': self.owner,
'name': self.j.acct['name'], 'nh' : self.numhosts,
'date': self.end_date }
# Create an array of dictionaries of lists initialized and constructed using
# derived class methods for the keys of interest.
# self.index embedds the location of self.k2 in the sechma
self.data=[]
for i in range(len(self.k1)):
self.data.append({})
for k in self.j.hosts.keys():
h=self.j.hosts[k]
self.data[i][k]=self.data_init()
for s in h.stats[self.k1[i]].values():
self.data_assign(self.data[i][k],s[:,self.index[i]])
# Initialize to an empty array and accumulate with appending
def data_init(self):
return []
def data_assign(self,d,v):
d.append(v)
# Generate a label for title strings
def label(self,k1,k2,mod=1.):
u=''
if mod==1e9 or mod == 1024.**3:
u='G'
elif mod==1e6 or mod == 1024.**2:
u='M'
l=k1 + ' ' + k2
s=self.j.get_schema(k1)[k2]
if not s.unit is None:
l+=' ' + u + s.unit
if len(l) > 10:
l=k1 + '\n' + k2
s=self.j.get_schema(k1)[k2]
if not s.unit is None:
l+=' ' + u + s.unit
return l
# These iterator fuctions iterate linearly over the array of dictionaries. We
# should probably create a sorted version, but this works for now.
def __iter__(self):
self.ind=-1
self.a=len(self.data)
self.b=len(self.data[0].keys())
self.c=len(self.data[0][self.data[0].keys()[0]])
return(self)
def next(self):
if self.ind == self.a*self.b*self.c-1:
raise StopIteration
self.ind += 1
inds=numpy.unravel_index(self.ind,(self.a,self.b,self.c))
k=self.data[inds[0]].keys()[inds[1]]
return self.data[inds[0]][k][inds[2]]
# Load a job file and sum a socket-based or core-based counter into
# time-dependent arrays for each key pair. Takes a tacc stats pickle file and
# two lists of keys.
class TSPLSum(TSPLBase):
def __init__(self,file,k1,k2):
TSPLBase.__init__(self,file,k1,k2)
# Initialize with an zero array and accumuluate to the first list element with
# a sum
def data_init(self):
return [numpy.zeros(self.size)]
def data_assign(self,d,v):
d[0]+=v
|
The Indian Army is simultaneously pursuing the fulfilment of several outstanding tactical vehicle requirements. The Army is poised to continue to acquire locally manufactured tactical vehicles on a large scale over the course of the coming decade as it modernises its vast transport, logistics and utility vehicle stocks to a more modern standard and replaces its remaining stocks of foreign designs with high-quality indigenous vehicles.
The HMV will serve as a long-term replacement for the Czech-designed Force series of trucks, previously produced under license by Indian state-owned manufacturer BEML Ltd in order to fulfil the same operational functions as the new HMVs.
In 2016, Ashok Leyland executives outlined recent Stallion series orders by the Indian Army in a television interview with local media. The most recent specialised purchase consisted of 450 Super Stallion vehicles and 850 4×4 ambulance variants. According to the executives, Ashok Leyland delivers an average of 2,500 Stallion vehicles to the Indian military on an annual basis.
The long-term requirement to procure large quantities of modernised, locally manufactured vehicle stocks will ensure a steady stream of follow-on orders for tactical vehicles by the Indian military through the next decade and beyond.
Indian tactical vehicles also generate considerable sales on the international defence market, particularly in Southeast Asia and Africa, although the reporting of orders and finalised deliveries in this domain remains largely opaque.
Reports indicate that Ashok Leyland tactical vehicles were sold to Zimbabwe to the tune of over 600 units in 2016, and Malawi was rumoured to be on the verge of signing a similarly large deal with the company that same year. However, it is unclear if an order ultimately materialised.
Although domestic requirements will remain the primary driver of demand for Indian tactical vehicles through the next ten years, medium-scale export sales of defence products to countries in Africa and Southeast Asia are an increasingly important and profitable source of additional revenue for Indian automotive contractors.
|
# Send print to logger
import scriptoutputwriter
# Access C++ module from Python
import reflectiontest
'''
Type testing
https://docs.python.org/2/library/types.html
Types not tested yet:
types.CodeType
types.MethodType
types.UnboundMethodType
types.BuiltinFunctionType
types.BuiltinMethodType
types.ModuleType
types.FileType
types.XRangeType
types.SliceType
types.EllipsisType
types.TracebackType
types.FrameType
types.BufferType
types.DictProxyType
types.NotImplementedType
types.GetSetDescriptorType
types.MemberDescriptorType
types.StringTypes
'''
class OldCallableClassTest:
def __call__( self, value ):
return "Callable class test " + value
class NewCallableClassTest( object ):
def __call__( self, value ):
return "Callable class test " + value
class DescriptorTest( object ):
def __init__( self, value ):
self.value = value
def __get__( self, obj, objtype ):
return self.value
def __set__( self, obj, value ):
self.value = value
def firstn(n):
'''Generator test'''
num = 0
while num < n:
yield num
num += 1
class ValueObjectTest( object ):
'''
Test object for reflected property paths.
The reflection system can get a path for "childTest.tupleTest[0]" only if
the value type is a Python object.
Basic types like int and string do not have path info stored on them.
'''
def __init__( self, value ):
self.value = value
class ChildObjectTest( object ):
def __init__( self ):
self.stringTest = "Child"
self.tupleTest = (ValueObjectTest( 0 ),
ValueObjectTest( 1 ),
ValueObjectTest( 2 ),
ValueObjectTest( 3 ) )
self.listTest = [ValueObjectTest( 0 ),
ValueObjectTest( 1 ),
ValueObjectTest( 2 ),
ValueObjectTest( 3 )]
self.dictTest = {ValueObjectTest( 'Bacon' ) : ValueObjectTest( 0 )}
class BadComparison( object ):
def __cmp__( self, other ):
raise Exception( "Bad comparison" )
class OldClassTest:
'''Test of old-style classes'''
'''
Properties exposed to GUI.
In the format "attribute name" : "meta data name"
'''
_metaData = {
"floatTest" : "MetaSlider",
}
# Enable for testing
#def __setattr__( self, name, value ):
# '''
# Hook for notifying the GUI
# '''
# print "setattr", self, name
# self.__dict__[ name ] = value
# Enable for testing
#def __delattr__( self, name ):
# '''
# Hook for notifying the GUI
# '''
# print "delattr", self, name
# del object.name
classIntTest = 1
def __init__( self ):
self.noneTest = None
self.boolTest = True
self.intTest = 1
self.longTest = 1L
self.floatTest = 1.0
#self.complexTest = 1.0j
self.stringTest = "Spam"
self.unicodeTest = u"Spam"
self.childTest = ChildObjectTest()
self.tupleTest = (1, 2, 3, "Spam")
self.listTest = [0, 1, 2, 3]
self.dictTest = {'Bacon': 1, 'Ham': 0}
self.functionTest1 = \
lambda testString: "Function test " + testString
self.functionTest2 = OldCallableClassTest()
self.functionTest3 = NewCallableClassTest()
#self.generatorTest = firstn
self.badComparison = BadComparison()
# Old-style classes only
self.typeTest1 = type( OldClassTest )
self.typeTest2 = type( self.typeTest1 )
self.classTest1 = OldClassTest
self.classTest2 = self.__class__
self.instanceTest = type( self )
def methodTest( self, testString ):
return "Method test " + testString
@classmethod
def classMethodTest( cls, testString ):
return "Class method test " + testString
@staticmethod
def staticMethodTest( testString ):
return "Static method test " + testString
class ConstructorTest1:
def __init__( self, value ):
self.constructorTest = "Constructor class test " + value
class ConstructorTest2:
pass
def updateValues( self ):
OldClassTest.classIntTest = OldClassTest.classIntTest + 1
self.noneTest = None
self.boolTest = not self.boolTest
self.intTest = self.intTest + 1
self.longTest = self.longTest + 1
self.floatTest = self.floatTest + 1.0
self.stringTest = "Spam" + repr( self.intTest )
self.unicodeTest = u"Spam" + repr( self.intTest )
class NewClassTest( object ):
'''Test of new-style classes'''
'''
Properties exposed to GUI.
In the format "attribute name" : "meta data name"
'''
_metaData = {
"floatTest" : "MetaSlider",
"readOnlyPropertyTest1" : "MetaReadOnly",
"readOnlyPropertyTest2" : "MetaReadOnly",
}
# Enable for testing
#def __setattr__( self, name, value ):
# '''
# Hook for notifying the GUI
# Note: descriptors will not be caught by this hook.
# '''
# print "setattr", self, name
# super( NewClassTest, self ).__setattr__( name, value )
# Enable for testing
#def __delattr__( self, name ):
# '''
# Hook for notifying the GUI
# Note: descriptors will not be caught by this hook.
# '''
# print "delattr", self, name
# del object.name
classIntTest = 1
def __init__( self ):
self.noneTest = None
self.boolTest = True
self.intTest = 1
self.longTest = 1L
self.floatTest = 1.0
#self.complexTest = 1.0j
self.stringTest = "Spam"
self.unicodeTest = u"Spam"
self.childTest = ChildObjectTest()
self.tupleTest = (1, 2, 3, "Spam")
self.listTest = [0, 1, 2, 3]
self.dictTest = {'Bacon': 1, 'Ham': 0}
self.functionTest1 = \
lambda testString: "Function test " + testString
self.functionTest2 = OldCallableClassTest()
self.functionTest3 = NewCallableClassTest()
#self.generatorTest = firstn
self.badComparison = BadComparison()
# New-style classes only
self.typeTest1 = type( NewClassTest )
self.typeTest2 = type( self.typeTest1 )
self.classTest1 = NewClassTest
self.classTest2 = self.__class__
self.instanceTest = type( self )
self.propertyTest1_ = "Read-only Property"
self.propertyTest2_ = "Read-only Property"
self.descriptorTest = DescriptorTest( "Descriptor property" )
def methodTest( self, testString ):
return "Method test " + testString
def getReadOnlyPropertyTest1( self ):
'''Only works for new-style classes'''
return self.propertyTest1_
readOnlyPropertyTest1 = property( getReadOnlyPropertyTest1 )
@property
def readOnlyPropertyTest2( self ):
'''Only works for new-style classes'''
return self.propertyTest2_
@classmethod
def classMethodTest( cls, testString ):
return "Class method test " + testString
@staticmethod
def staticMethodTest( testString ):
return "Static method test " + testString
class ConstructorTest1( object ):
def __init__( self, value ):
self.constructorTest = "Constructor class test " + value
class ConstructorTest2( object ):
pass
def updateValues( self ):
NewClassTest.classIntTest = NewClassTest.classIntTest + 1
self.noneTest = None
self.boolTest = not self.boolTest
self.intTest = self.intTest + 1
self.longTest = self.longTest + 1
self.floatTest = self.floatTest + 1.0
self.stringTest = "Spam" + repr( self.intTest )
self.unicodeTest = u"Spam" + repr( self.intTest )
def run():
print "~~ Begin test"
print "~~ Python to C++"
oldClassTest = OldClassTest()
reflectiontest.oldStyleConversionTest( oldClassTest )
newClassTest = NewClassTest()
reflectiontest.newStyleConversionTest( object=newClassTest )
print "~~ Passed"
print "~~ End test"
|
Why is it so hard for business leaders to admit they know less than they thought they did about their circumstances and must instead come to grips with the facts in order to confront reality? Before we can answer that question, we must first ask: Why is it that we expect CEOs and others in the executive leadership suite to have all the best answers instead of asking all the best questions?
The answer to these rhetorical questions lies in the immense pressure to justify a CEO’s leadership to a board of directors and the so-called shareholders they represent. I say “so-called” because, at least in our publicly traded corporations, shareholders are often algorithms that could care less whether or not the company is sound or sustainable, but rather whether the price will go up or down in the next millisecond.
Algorithms aside, we all seem to understand CEOs who fail to produce profits and growth that supersede their competitors will be punished in the stock market and, eventually, will be forced out for one who can. Fortune 500 CEO tenure has been dropping since 2000 and is now down to 4.6 years.
The same study cited the differences between good and bad CEOs were largely driven by unexpected traits. Execution alignment was important, but it was not as important as managing complexity, engaging and inspiring the workforce, and instilling trust in company culture.
In the same Fortune interview last week, Merlo described how his company had to ask hard questions. If they’re a healthcare company, why are they selling tobacco products? Why are they selling painkillers they know will contribute to the opioid epidemic? Whether Merlo is a good example of a teachable business leader can be debated: He was also the CEO with the worst pay gap with employees in the Fortune 500.
Role model or not, the question remains valid: If the CEO isn’t willing to ask tough, self-critical questions, who will?
“Making Business Leaders Teachable” is the theme for Tuesday, April 24 agenda at RECONVERGE:G2 2018. And we think we’ve got an outstanding plan for how to do just that!
Tuesday morning, after breakfast and check-in, we’ll enjoy remarks from one of the most innovative CEOs in the region – someone we’ll introduce you to soon in a future post. This CEO founded a company that created their category almost 40 years ago and continued to ask the tough questions necessary to fulfill a mission in the world that goes far beyond profits or growth.
Next, it’s time to meet our 2018 Mathews Medal winners! Every year since 2012, Aurora WDC has brought two university students with competitively selected essays on the role intelligence has to play in business together with the most innovative intelligence leaders from business to push their learning to another level, on both sides of the podium. The Jim Mathews Award for Intelligence Excellence has become a prestigious and competitive honor for those who achieve it. If you know of university students who would qualify, let them know that essay submissions are now being taken.
After lunch, we’ll have three more such “dynamic duo” sessions on topics related to how intelligence makes business leaders more teachable, concluding our day with a 3-hour reception and dinner featuring our second annual “Presentation Smackdown” where competitors go head-to-head presenting slides they’ve never seen before and must use to argue diametrically opposed hypotheses effectively. Only the bravest presenters should try this in the wild, because the audience will select a winner based solely on how influential (and entertaining) the argument was!
But you’ll miss all of that, if you don’t register and tickets are already going fast!
|
""" Holds configuration class """
from foscambackup.util import helper
from foscambackup.constant import Constant
import foscambackup.util.file_helper as file_helper
class Config:
""" Hold the config options for use in program """
host = ""
port = 0
username = ""
password = ""
model = ""
currently_recording = False
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def get_model_serial(self, read_file):
""" get the model serial from file """
data = read_file.readlines()
data[len(data)-1] = 'model_serial:' + self.model
file_helper.open_write_file(Constant.settings_file, self.write_model_serial, data)
def write_model_serial(self, write_file, args):
""" Write the data to file """
write_file.writelines(args['data'])
def write_model_to_conf(self, model):
""" Retrieves the model_serial folder name and writes to conf """
self.model = model
file_helper.open_readonly_file(Constant.settings_file, self.get_model_serial)
|
I’ll be in the Bay Area for some consulting work Nov 15-19, so I decided it’s time for another Ribbonfarm Field Trip. If you missed the first one (Sausalito Houseboats), I hope you can make this one. We had a lot of fun last time (here’s the post about Field Trip #1, with more pictures).
This time, I thought it would be interesting to visit the Computer History museum in Mountain View and chat over coffee afterwards.
We’ll meet at 1:00 PM on Saturday, November 19. Click here to register (free). You’ll have to buy a ticket to enter the museum itself when you get there ($15 general admission). I’ll buy everyone a round of coffee after we’re done (after all, you guys have been buying me coffees for years now).
I keep meaning to visit each time I am in the area, but something always gets in the way. With the passing of Steve Jobs and an equally important academic figure, John McCarthy, it’s an interesting time to take stock and ponder the future of technology from the perspective of the longer story, now that Act I is sorta symbolically over. I am also reading Neal Stephenson’s Cryptonomicon right now and noodling around with themes for my next book, which will likely have a strong technology angle. So all in all, we have ingredients for an interesting conversation. I’ll try to rope in a couple of gray eminences who’ve survived a couple of boom-bust cycles, to talk history and context at us.
If you register and later need to cancel, let me know. Last time, we had some people bailing at the last minute without telling me, so I didn’t have time to let the waiting-list people know, and we ended up with extra box lunches.
Same as last time, let me know if you need to carpool.
And in case you’re wondering about the mysterious, missing Field Trip #2, that was actually an exploration of the Las Vegas storm drain system a couple of weekends ago with Bay Area reader Laura Wood, who I met on Field Trip #1.
I learned about the extensive storm drain system (hundreds of miles of tunnels under Las Vegas) from another reader, Josh Ellis, one of exactly two readers I appear to have in Las Vegas, and told Laura about them during the first field trip.
I had no more than a casual curiosity at that point, but Laura got interested enough that she hunted down the author of a book about the storm drain system and the homeless people living in them (Matt O’Brien, the book is Beneath the Neon, I am reading it now).
When Laura told me she wanted to come down to Vegas and explore the storm drains, we briefly talked making it a larger group event and roping in more readers from the Bay Area and LA, but ultimately decided it would be too dicey.
So it was just the two of us. We first met up with Matt, got some advice and tips, and then spent several hours over the next two days exploring miles and miles of underground tunnels, filled with fantastic graffiti, garbage, smelly water and a few homeless people.
Later, I met up separately with Matt and Josh over coffee and chatted more about this and that (Josh did the initial explorations and co-authored some articles with Matt, who later explored the storm drains more deeply and wrote the book).
I’ll write a longer post about the storm drains at some point, once I am done with Matt’s book.
Anyway, if you’re up for Field Trip #2, go ahead and register.
Also, if you’re interested in meeting up 1:1 for lunch/dinner/coffee between Nov 15 – Nov 19, email me.
And finally, once again I am in the market for couches. Rather than wearing out my welcome with my gracious hosts from last time (thanks Mark, Jane and Greg) I figured I’d see if there were other potential hosts out there with whom I could stay and explore more Bay Area neighborhoods. I’ll need a place to stay the nights of Nov 15, 16, 17 and 18.
But surely Dennis Ritchie shoudl be on your recently deceased computer scientists list.
He shaped the current computing landscape more than anyone – C & Unix to name just two.
Without Dennis, there would have been no NeXT for Steve to rip off.
Wow that’s so cool that the two of you actually went down the storm drains! I can’t wait to hear more about it.
|
# -*- coding: utf-8 -*-
"""
moe.paste.handlers
~~~~~~~~~~~~~~~~~~
Handlers for a really simple pastebin.
:copyright: 2010 by tipfy.org.
:license: BSD, see LICENSE.txt for more details.
"""
from tipfy import NotFound, request, Response, url_for, redirect_to
from tipfy.ext.i18n import _
from moe.base.handlers import AreaRequestHandler
from moe.paste.models import Paste, PasteForm
from moe.paste.highlighting import highlight
class PasteBaseHandler(AreaRequestHandler):
"""Base class for the pastebin."""
def __init__(self, app, request):
AreaRequestHandler.__init__(self, app, request)
# Set a flag in context for menus.
self.request.context['current_app'] = 'paste'
# Initialize list of breadcrumbs.
self.breadcrumbs = []
def get_breadcrumb(self, endpoint, text, **kwargs):
return (url_for(endpoint, area_name=self.area.name, **kwargs),
text)
def add_breadcrumb(self, endpoint, text, **kwargs):
self.breadcrumbs.append(self.get_breadcrumb(endpoint, text, **kwargs))
def render_response(self, filename, **values):
self.request.context['breadcrumbs'] = [
self.get_breadcrumb('home/index', _('Home')),
self.get_breadcrumb('paste/index', _('Paste'))] + self.breadcrumbs
return super(PasteBaseHandler, self).render_response(filename, **values)
class PasteNewHandler(PasteBaseHandler):
"""Displays a paste form and saves a new paste."""
form = None
def get(self, **kwargs):
context = {
'form': self.form or PasteForm(language=kwargs.pop('language',
'python')),
}
return self.render_response('paste/new.html', **context)
def post(self, **kwargs):
self.form = PasteForm(request.form)
if self.form.validate():
if self.current_user:
user_key = str(self.current_user.key())
else:
user_key = None
language_code = request.form.get('language')
code_raw = request.form.get('code', u'')
code = highlight(code_raw, language_code)
values = {
'area_key': str(self.area.key()),
'user_key': user_key,
'code_raw': code_raw,
'code': code,
'language': language_code,
}
paste = Paste(**values)
paste.put()
self.set_message('success', _('The paste was saved.'), flash=True)
return redirect_to('paste/view', paste_id=paste.id,
area_name=self.area.name)
else:
self.set_form_error(_('Ooops, code is empty! Please post '
'some lines.'))
return self.get()
class PasteViewHandler(PasteBaseHandler):
"""Displays a paste."""
def get(self, **kwargs):
paste_id = kwargs.pop('paste_id', None)
if not paste_id:
raise NotFound()
paste = Paste.get_by_id(paste_id)
if not paste:
raise NotFound()
self.add_breadcrumb('paste/view',
_('Paste #%(paste_id)s', paste_id=paste.id),
paste_id=paste.id)
form = PasteForm(code=paste.code_raw, language=paste.language)
context = {
'paste': paste,
'form': form,
}
return self.render_response('paste/view.html', **context)
class PasteViewRawHandler(PasteBaseHandler):
"""Displays a paste in raw mode, as text."""
def get(self, **kwargs):
paste_id = kwargs.pop('paste_id', None)
if not paste_id:
raise NotFound()
paste = Paste.get_by_id(paste_id)
if not paste:
raise NotFound()
return Response(paste.code_raw)
class PasteListHandler(PasteBaseHandler):
"""Not implemented."""
def get(self, **kwargs):
context = {
}
return self.render_response('paste/new.html', **context)
|
What is the cost of ideas? What normally happens when people come up with bright ideas at work? A manager will typically calculate the cost of implementing it. This cost will then be balanced against the value potential of the idea. This is normally additional income from increased sales or reduced operational costs. The more creative an idea is, the harder it can be to determine the value in monetary terms. Many potentially very exciting ideas are not implemented simply because a manager has decided that to do so would be too costly.
Many managers are excellent at working out the cost of implementing an idea. They often fail to calculate the cost of NOT implementing an idea. This can often be far more than the cost of implementing. The cost of ideas is thus a two sided coin.
How much does it cost not to implement an idea? Here is a simple example where an idea might lead to cost savings on a production line. The cost of the idea in terms of equipment and labour is USD500,000 and is a one-off cost. As a result of this, the cost of manufacturing each widget that comes off your production line is reduced by USD5.00. Your Sales department tells you that you are currently making 100,000 widgets each year. Sales are expected to rise 10% per year over the 5 year life of the equipment.
Things are not always this easy though. Imagine that one of your R&D staff has come up with a pen-sized device that can see through solid objects. There are potential applications in medicine, construction and intelligence gathering to name a few. To get such a device into production might cost say USD50,000,000 but how can you predict the sales potential of such new technology? How can you also keep it secret from your competitors until launch? The potential seems huge but you cannot put your finger on it.
We know that ideas do not spring from single sources. It is likely that a competitor will come up with a similar idea at some point. What will they do? Will they develop the idea and create a new product? What will happen, will it be a success? If it is then you lose out big time in terms of cash. But what about your reputation?
The cost of not implementing an idea might be both financial and long-lasting damage to your reputation and brand.
|
import sys
sys.path.append('../../vmdgadgets')
import vmdutil
from vmdutil import vmddef
def replace_controlpoints(cp_all, cp, index):
for i in range(4):
cp_all[i][index] =cp[i]
return cp_all
sine1 = vmdutil.SINE1_CONTROLPOINTS # sin, [1x, 1y, 2x, 2y]
sine2 = vmdutil.SINE2_CONTROLPOINTS # 1 - cos
cp_all = vmddef.BONE_LERP_CONTROLPOINTS # [1x[X,Y,Z,R], 1y[],2x[],2y[]]
replace_controlpoints(cp_all, sine1, 2) # Z: sin
replace_controlpoints(cp_all, sine2, 0) # X: (1 - cos)
interpolation1 = vmddef.bone_controlpoints_to_vmdformat(cp_all)
cp_all = vmddef.BONE_LERP_CONTROLPOINTS
replace_controlpoints(cp_all, sine1, 0) # X: sin
replace_controlpoints(cp_all, sine2, 2) # Z: (1 - cos)
interpolation2 = vmddef.bone_controlpoints_to_vmdformat(cp_all)
bone = vmddef.BONE_SAMPLE
bone_frames = []
initial_frame = bone._replace(position=(30, 0, 0))
bone_frames.append(initial_frame)
# frame 30: X:sine2, Z:sine1, (0, 0, 30)
bone_frames.append(
bone._replace(
frame=30, position=(0, 0, 30), interpolation=interpolation1))
# frame 60: X:sine1, Z:sine2, (-30, 0, 0)
bone_frames.append(
bone._replace(
frame=60, position=(-30, 0, 0), interpolation=interpolation2))
# frame 90 X:sine2, Z:sine1, (0, 0, -30)
bone_frames.append(
bone._replace(
frame=90, position=(0, 0, -30), interpolation=interpolation1))
# frame 120 X:sine1, Z:sine2, (30, 0, 0)
bone_frames.append(
bone._replace(
frame=120, position=(30, 0, 0), interpolation=interpolation2))
vmdout = vmdutil.Vmdio()
vmdout.header = vmdout.header._replace(
model_name='circle_sample'.encode(vmddef.ENCODING))
vmdout.set_frames('bones', bone_frames)
vmdout.store('circle.vmd')
|
Data-driven marketing is simply the act of using data collected through consumer engagements with a website to make marketing decisions like creating relevant products, optimizing your website to match what your visitors want and them showing relevant ads and promotions in the future.
What will be your digital marketing business’ primary goal this year and the most effective method for achieving it?
How are you planning to create more success for your online marketing strategy this year and beyond?
Does data drive your decisions or do you rely on hunches and gut feelings?
According to data-driven marketing case studies like the 2017 Digital Marketing Survey, 70% of respondents said their primary goal was web traffic and 68% answered brand awareness.
Almost 70% of the survey respondents said that they must do better planning when it comes to their digital strategy. In addition, 44% of the organizations said they planned on training and improving their knowledge on the subject.
If you’re unsure, know that you’re not alone. Another study found that one-third of B2B marketers say they weren’t clear as to which digital marketing approach had the most positive impact on revenue. Luckily, we recently published an article on 7 actionable data-driven marketing examples you can apply right now to begin making business decisions.
Fast, scalable, secure and intuitive interface.
Enterprise features and Pricing plan.
Google Analytics is the trendiest web analytics platform that uses big data technology to track website behavior. It reports visitors activity like how they engage with your contents, amount of traffic a specific page gets, average time spent on site, bounce rates, e-commerce sales, and can even recognize your most profitable online marketing channels.
The vast majority of marketing teams across the globe uses Google Analytics as a foundation for their data-driven marketing strategy. In fact, Progressive used Google Analytics to improve their mobile app experience which resulted to $2 billion in written premiums in a single year. According to Marketing Land, about 30–50 million websites use the Google Analytics tools. With more than 12 years in business and being free of charge, Google Analytics continues to be a valuable business tool.
Suppose, for instance, that you are airplane pilot and web analytics is the cockpit control panel. The dashboard displays information about how the airplane is flying and what optimization will be needed. It asks about your mileage, how long you have been flying, about the engine’s efficiency and how much fuel is in your fuel tank.
Google Analytics is like your airplane dashboard that tracks and reports on your website working efficiently and effectively.
It’s a freemium web data analytics service presented by Google. It’s completely free for all internet data analysis with an easy dashboard and simple interface. Also, its interface provides many free web research tools for Conversion Rate Optimization (CRO), including a wide layout for B2B Data-Driven Marketers and for small businesses.
It also features an integration with AdWords, which a digital marketer can easily track landing page quality and conversions.
What are the Best B2B Lead Generation Strategies and tools?
70% top B2B marketing challenges are generating more leads and improving the value of those leads.
With a total number of 467 million users, LinkedIn is the world’s largest social network for working professionals, making it a lucrative source of information for B2B marketers’ prospecting and lead generation efforts. However, LinkedIn’s limitations, such as their InMail restrictions and lack of access to users real-time, current contact information, can sometimes lead to frustrating dead ends.
Using its plug-in, Scout accesses its real-time extensive database, you pay only for triple-verified, current professional contact information (email, phone and postal) for your prospects without adding them as LinkedIn connections.
Once you add the Scout extension to Chrome, you can search and add as many contacts you want from the “Add” button visible against the prospect’s or company’s name.
Create and add as many contacts as you want to one or many lists in the Scout dialogue box found at the bottom of the page.
There’s no limit to adding contacts or creating lists. This allows you to organize and segment your audience in the way that works best for you.
Scout is just one part of a set of tools offered by Stirista, a data-driven marketing agency that specializes in creating custom audience segments and executing campaigns via digital, email, and social channels.
Gain full access to the power of Stirista’s 30 million B2B contacts.
Triple-verified contacts save you both time and money.
Email your contacts directly without the need for third-party vendors.
All Scout subscribers get FREE: Stirista Access, the powerful online data management, and marketing tool.
Pricing: Free 14-day trial includes 25 triple-verified contacts.
* All data used by Scout is run through three thorough processes: An SMTP check, an MX lookup and a correction of syntax errors.
Subscription plans are monthly based, so you can subscribe or unsubscribe at any time and at your convenience.
Custom Credit plans are available for those who like to pay as they go. Custom credit quantities are one-time, bulk credit purchases at the standard credit purchase rate (1,000 credits = $10.00). These credits do not expire and do not provide any volume discounts or automatic replenishment.
Do you know how Conversion Rate Optimization (CRO) Tools turn your visitors into leads in B2B Marketing?
B2B Marketers struggle to convert leads. Whoisvisiting is a data-driven marketing CRO tool that increases your ROI by understanding which of your marketing campaign channels is steering the highest number of companies to your website.
Email addresses, telephone numbers, and other contact information are examples of what you can measure with this analytical tool by tracking B2B visitors. The competitive advantage for you is the ability to find out who the ‘real’ visitors are on your site. The visitors become ‘real identities’ instead of numbers, which helps identify their interests and allows you the ability to contact them to seal the deal.
This is one of the fastest growing data-driven marketing trends in 2018 and beyond, especially for B2B and SaaS companies; having the ability to know the actual companies visiting your website, then using that data to find out how your product and service can relevant to those companies. Consequently, case studies, whitepapers, articles, and brochures can then be created addressing how your products can be used to solve a common problem a particular company (that frequently visits your website) is facing. Armed with these resources, your sales team can begin having meaningful conversations with prospects. From the prospects point of view, this feels like you know exactly what they are going through which could lead to a closed deal. That is the power of data-driven marketing.
Pricing: 14 days free trial for any plan.
Have you learned about heat mapping? And which heat mapping tools are most convenient for your website?
A heat map is a two-dimensional graphical representation of data in the form of a map or diagram in which the values of data are illustrated by different colors.
Crazy Egg has been in business 14 years and serves more than 100,000 customers. This robust heat mapping tool shows you how users scroll and click through your website, as well as calculates pages viewed and clicked, and tabs and links clicked, providing you with the most relevant glimpse of what is and what’s not gaining the attention of your website’s visitors. It’s a great tool for visualizing web activity and analysis data.
Pricing: 30 days free trial for all plans.
The most powerful way to extend your reach to your target audience without relying on paid media is by influence marketing and outreach because influencers are a great source of audiences.
BuzzSumo can analyze the key influencers’ ideas for your Content Marketing strategy.
Do you want to figure out which online outlets or influencers are getting the most traction in your space?
Use the “Influencers Search Tools” to search the key influencers in any topic, area or place.
Filter and follow influencers by the “Analyze and Follow Influencers” button.
Export Influencers data help to export detail analysis data as a CSV or Excel file.
BuzzSumo offers a search option for finding backlinks. The Backlinks tool illustrates which websites are linking to top content by keyword.
Search options for finding backlinks.
Easy to understand for customer content curation.
Simple to find the best topic for your niche audience.
Pricing: Free for five searches.
Pro $79 – 5 alerts and 5K mentions.
Plus $139 – 10 alerts, 10K mentions and question analyzer.
Large $239 – 30 alerts, 20K mentions, question & Facebook analyzers.
HubSpot started in 2005 and today has more than 30,000 plus customers in 90 countries, as well as 3,400 agency partners. Offering paying customers 24/7 live chat, community support, phone and email support, HubSpot offers a one-stop-shop approach with content marketing management, social media marketing, landing pages, search engine optimization (SEO) and web analytics.
Create personalized landing pages, message-scheduling, social-media analytics, emails and more.
Blogging: Blog SEO recommendations and analytics, integrated social publishing and mobile optimization to attract leads to your site.
Search engine optimization (SEO): An easy way to your prospect can find your website.
Calls-to-Action (CTAs): Without any technical or design skill, you can build smart A/B testing of CTAs and drive leads through the funnel.
Sales hub: Free and paid monthly subscription options.
Starter $50 per month one (1) user included.
Professional $400 per month five (5) users included.
Marketing hub: Free and paid subscription options.
Curata has more than 20 years of saving time and energy for so many B2B, B2C as well as small, medium and large businesses and enterprises. Curata allows you to create the right content for the right people at the right time.
Curata’s Content Marketing platform has a role in optimizing supply chain optimization and enables insight into content ROI.
The main components of Curata CMP include strategy, production, and analysis.
Editorial calendar and workflow allow you to optimize streamline content planning and creation.
Analytics function can help you visualize what content is ideal for social sharing, leads generated, pipeline touched and fix specific content to influence and increase ROI.
Optimize the best contributor authors.
24/7 customer service includes knowledge base, online and phone support; and video tutorials.
Integrates with CMS (WordPress, Joomla, Drupal and more), Eloqua, Marketo, MailChimp, social media and Google Analytics.
Pricing: No credit card required during free trial.
Learn how can “Readability” scores help your Content Marketing strategy. Here’s how the combination of these two tools can help boost your content ROI.
Every time I open my laptop, both Hemingway Editor and Grammarly are my go-to resources. I always run content through both tools to improve grammar and readability. However, both have different goals. Here are the main advantages of each.
When you copy and paste your content on a Hemingway editorial tool, it illustrates your writing boldly and clearly with blocks of color that you can hover over, which will give you suggestions to help improve your content. By checking the readability score, you can infer that the reader can easily read your content if it scores grades 6 through 9.
Grammarly is a free proofreading, grammar and spell checker.
Grammarly Premium is a paid upgrade that offers its users additional services.
For instance, you could paste 75,000 words of content into the editor function, which analyzes it in seconds.
Features more than 400 advanced rules before delivering your content’s grammar, spelling and punctuation errors, as well as style, sentence structure and citation suggestions. Includes a plagiarism detector.
Technical support via email, Q&A resource, and community support.
$19.99 downloadable app for both Mac and PC. Includes free upgrades.
Social media (12%), email marketing (13%) and SEO (14%) are the three best lead sources for B2B companies. LinkedIn itself is generating 80% lead for B2B marketers through social media from 500 million members. An automated lead generation and sales prospecting tool could save innumerable hours of prospects research and follow-ups.
LeadFuze helps B2B companies discover new leads and have more sales conversations automatically.
Click on Search “Lead” and response to industry, employee size, role, keywords, etc.
Fuzebot is LeadFuze’s automated lead generation tool that adds leads daily to its personas.
Finds thousands of emails of leads by your requirements. Plus, the emails are double-verified.
You could search by people contact and by importing a list in CSV and use “Blacklist” to ignore any company or email.
LeadFuze has drip email campaign features that allow you to connect with your end-users personally.
Through cold email automation, you can scale the outbound automated lead generation.
It’s an automatic email in one platform.
Both “Sequence” and “Scheduling” feathers allow creating an email to send to leads automatically only for paid users.
The “Reporting” feathers provide a comprehensive insight on click-through rates.
Lead interaction track, report, and details on your split test emails.
LeadFuze has integrations with Hubspot, Close.io, Pipedrive, SalesForce, Zapier, IMAP and Gmail.
Everyone has a major challenge in getting an audience to completely and accurately fill out the forms, especially on mobile, due to the small screen and keyboard size. That’s why LinkedIn created “Lead Gen Forms,” an apex data-driven marketing tool, which allows you to collect high-quality leads instantly. For LinkedIn members, the form will already have information filled in.
To start using Lead Gen Forms, sign in to “Campaign Manager” and create a new Sponsored Content campaign and follow the steps indicated.
Lead Gen Forms can be attached to new Sponsored Content or Sponsored InMail campaigns when you select the campaign objective of “Collect leads using LinkedIn Lead Gen Forms” when creating the campaign workflow.
Once the content or campaign has been created, Linkedin members who click on your LinkedIn profile can click on a Call-to-Action (CTA), such as download an eBook or sign for your webinar.
When clicking on an advertisement, a person could be able to see a form that has been filled with professional information from their LinkedIn profile, including their name, contact info, company, job title, seniority and so on.
Anyone could instantly send you their information with a single click and without having to manually type anything.
The Lead Gen Forms will be exact and detailed, as people tend to keep their profiles updated.
When someone submits a form, they automatically see a custom thank you page that connects to your eBooks, websites, or other destination you specify.
Click on “Campaign Manager” to download the lead information.
Manage the lead in the CRM or Marketing automation of your choice.
Analyze the report of ROI, cost per leads and your leads from completion rate.
Note: The minimum CPC or CPM bid for Sponsored Content varies upon the targeted audience.
As a Data Research Analyst of Stirista, I must search leads and rely on LinkedIn and other prospecting tools that offer validated contact data. Since Rapportive was acquired by LinkedIn, the tool brings only LinkedIn profiles to Gmail.
Which lead generation tools help to verify email addresses or Invalid addresses?
How are you able to verify someone’s identity and to see LinkedIn as you reach out them?
Your Gmail inbox can become a dominant LinkedIn Lead generation and sales prospecting tool by using which tools?
Rapportive is a prospect’s email add-on that displays LinkedIn information about leads right in your inbox. And its advantageous social CRM tools let you manage customer data and interaction.
Start by installing the free add-on for Google Chrome or Firefox.
Check your regular email, find out who are the most important people in your business, or enter an email after searching other tools.
The tools help you to show full LinkedIn profile, including job title, position, where they are located, and shared connections with a common interest.
You could find out if this person’s email is valid or invalid and easily send emails.
For all users – beginners to advanced.
Buffer is an invaluable online marketing tool that is user-friendly and allows for social media scheduling and a handy reporting tool. Anyone can use Buffer regardless of their social media skills or experience. For 20 years, Buffer is serving 3+ million people focusing on simplicity of design to help to reach social audiences!
Allow you to schedule social media posts, distribute and access analysis.
All posts scheduled post from Facebook, Twitter, LinkedIn, Pinterest, Google+ and Instagram can easily be viewed by the Content tab.
Using multi-media extensions, you could optimize video and images for different social platforms.
Drag and drop posts to reorder and prevent duplication from scheduling the same post twice.
The Buffer browser extension feature “Power Scheduler” that makes it easier to schedule many social posts across separate times and social accounts.
Perfect size Image with Pablo.
Effortless content optimization and analysis of post and a smarter way to share on social networks.
Tiered structure based on Individual or Teams and Agencies.
Oktopost is a social media management platform specifically designed and focused on B2B Marketing. With Oktopost, your marketing campaign can reach multiple social media platforms from a single place. Furthermore, it tracks each lead generated and provides analytics help you understand which networks, profiles, and posts are most effective.
Manage as many campaigns as you like, and see right away the posts and how many clicks and comments they’ve received. Plus, you can see published or unpublished posts.
Manage the sharing of your content on LinkedIn Groups and Facebook pages.
Find out which post and groups are creating more visitors that are more likely to convert into more leads.
Know which groups pay more attention.
Easily access an inbound traffic generation report, as well as other marketing KPIs, such as the number of lead conversions and comments on your posts.
Inbound leads that generated from campaigns are tracked to their campaign or origin.
Leads can easily be synchronized with your CRM to capture the details leads wise information.
Measure the true ROI of social media.
The social Inbox scales the private LinkedIn group for comments on your post, and alert you in the unread comments.
Outstanding Integration with Google Analytics, Marketo, Salesforce, Bit.ly, and GoToWebinar.
Digital Marketers should know how visual content stimulates online growth and engagement as well as the power of an attractive image of a social media marketing campaign. With more than 10 million users worldwide, Canva allows you to create quality social media images with an easy ‘drag and drop’ interface you can utilize without having to learn design!
Layouts and templates are prefabricated and customizable.
Create social media banners and posts, covers for documents, advertisement banners, invitations, greeting cards, photo collages, infographics, eBooks, book covers, music album covers, marketing fliers, posters, restaurant menus and more.
Integrate with the stock photo bank.
Easy to customize your own photos.
For teams with 30 or more members, call to set up a customizable plan.
Bitly has been in business 10 years and has millions of users, including more than half of the Fortune 500. This tool shortens URLs, allowing tracking, sharing and managing your favorite links from around the internet. The short URL can be used in SMS, text messages, Twitter, email campaigns and in presentations.
After logging in to Bitly, easily customize content length link to share on various social media platforms like Twitter. After creating a custom link, you can analyze social media traffic by selecting “View Stats” or “Copy and Paste” the URL into a new tab.
See the Traffic report on your link, location, and the referrers. Advanced features of Bitly provide branded domain information, audience insights and mobile-friendly links, which optimize marketing efforts and click analytics.
Excellent customer service via email, phone, and training, as well as training support for vendors.
Create and track custom links and analyses.
Enterprise Pricing: available for small, medium and large organizations.
MailChimp has been in business 16 years and has more than 15 million people and companies as users. It’s is one of the most favored marketing automation platforms and also serves as an email marketing service provider with more than 80% of B2B and B2C marketers are using these tools for email marketing strategy.
The e-Commerce integration of MailChimp is most popular and with WordPress, Magento, Shopify, BigCommerce, WooCommerce, PrestaShop and other platforms.
Customer support via email, live chat, videos, and tutorials.
The Capterra market scores 100 out of 100 points.
Constant Contact is a promising email marketing tool for small businesses and nonprofits. It is used by more than 650,000 people around the world.
Create a personal connection and relationships that keep customers coming back by question-answering and creating a marketing strategy.
Manage your email lists, contacts, and email templates.
Have access to built-in social media sharing tools, easy tracking and reporting, and an image library.
Apps plug and Integration for Online Marketing.
The Capterra market scores 72 out of 100 points.
Customer support via live chat and email, as well as community support.
Additional savings for 6 or 12 months prepayment. Also, special discounts for nonprofit organizations, franchises, and associations.
VerticalResponse has been in business more than 16 years and has more than 1 million satisfied customers. It’s one of the most popular tools that can help you integrate your email marketing with your social media marketing on every device.
You can run email and social media from one account.
The tool is easy to use for beginners – self-service email marketing, online surveys and direct mail service.
Automatically resized templates that are responsive and mobile-friendly.
Create a smart automated campaign by using the marketing automation tools.
Can make a great first impression on your website traffic by adding a sign-up form or sharing a link to the form on social media.
Customer support via phone, live chat and email.
The Capterra market scores 71 out of 100 points.
Integrated with some third-party lead generation software, such as ZenDesk, Google Docs, OptinMonster, SalesForce, etc.
Send 4,000 emails per month to up to 300 contacts for free.
Pay-as-you-go and high-volume discounts also available.
Plus, annual and semi-annual discount options available at checkout.
is another simple yet powerful email marketing and automation application that helps businesses runs successful digital marketing campaigns.
Enables designers to create, send, manage and track branded emails.
The tool’s technology can help solve complex problems simply.
Using the template builder, create a free HTML email template in less than 60 seconds.
Based on behavior and actions for different contacts, you could create different campaigns by using their ‘drag-and-drop’ builder, visual (customer) journey designer, creative marketing strategies and real-time performance metrics.
To customize every message, the tool connects seamlessly to hundreds of pre-built apps and integrations, including Shopify, SalesForce, and others.
The Capterra market score is 71 out of 100 points.
In Search Engine Optimization (SEO), page loading time refers to the time to fully display a single page to your website (not the full site).
Google PageSpeed Insights is a powerful and free site loading checker tool that determines the time to display a page for mobile and desktop. According to a Hubspot survey, the ideal page loading time should be less than 1.5 seconds. If the page loading time increased from 0.4 to 0.9s, it decreases the traffic and the revenue of investment (ROI) by 20%.
Google uses algorithms to determine page load time that results in search engine rankings.
Enter the page URL on “Google PageSpeed Insights” and see the results both for mobile and desktop user-agent.
Fix the speed problems with its suggestions.
Better manage your website’s bounce rate.
Google is important for online marketing and search engine optimization (SEO). Most people rely solely on Google for search engine results pages (SERPs).
It’s an invaluable monitoring system of website performance in the Google search index.
Create sitemaps, robots.txt and other things in the Google Search Console.
By adding the tools, you will be able to analyze all positive or negative data of your website and mobile apps.
Informs you of malware attack information by messaging. You can see how and why your site has been attacked by a penguin and panda.
Discover 404 errors, duplicate/missing HTML elements, and pages blocked on your website.
Search traffic, search appearance, crawl data report and technical status updates are included.
Gauges the performance of your keywords.
As you know, links are the revenue-generators of the internet, and effective Search Engine Optimization (SEO) relies on it.
Are you looking for link strategies and ideas of your competitors? Do you want inbound links from high page rank sites? What is an Online Marketing tool best for Link Research & Backlink Checking?
The unique answer is Open Site Explorer, a Moz tool that provides the opportunity to create an SEO friendly inbound link profile.
Measure your domain rank, which is an SEO metric.
The tools tell you about Dofollow and Nofollow Link.
There are mainly three key factors in SEO: keyword research, content marketing, and link building.
SEMrush is a data-driven marketing SEO keyword research tools, which can be a leading competitive tool in your digital marketing arsenal. Simply enter your domain into the main search box to see the organic search traffic number and paid traffic, total backlinks and your organic top keywords. You also can find your company’s main organic competitors here and even a competitive positioning map to see how you compare to your competitors.
Find competitors most profitable keywords.
Link building ideas and strategies.
Rank keyword difficulties and receive helpful suggestions.
Find competitors’ top performing content.
Conduct an SEO audit of the website on which you are currently working.
Generate an SEO report if you are working with clients and need to report back to them.
Exporting analysis report in PDF.
Monitor changes in search engine ranking positions.
Customized plans and Enterprise options also available.
Which online marketing tools has your business found most effective? Are there any tools that we have forgotten that you think belong on the list? Do you have any data-driven marketing examples you can share? Share it with us on Facebook or Twitter.
All Pricing: as of Dec. 1, 2017.
Contact us to learn how Stirista’s data, digital advertising, and email campaigns can fuel your company’s growth.
Dipankar joined Stirista in March 2016 as a data research analyst. He has extensive experience in internet data research, blogging and SEO as a freelancer and has done freelance work for Stirista since January 2014. He also is a Silva graduate. In his free time, Dipankar enjoys traveling and learning about affiliate marketing.
|
#
# Copyright (c) 2013 Joshua Hughes <kivhift@gmail.com>
#
import array
import cStringIO
import re
from pu.utils import is_an_integer
class Section(object):
def __init__(self, addr, buffer):
self.start_addr = addr
self.buffer = buffer
self.end_addr = addr + len(buffer) - 1
def __add__(self, other):
if self.end_addr + 1 != other.start_addr:
raise ValueError('Sections are not adjacent!')
return self.__class__(self.start_addr, self.buffer + other.buffer)
def __cmp__(self, other):
ssa = self.start_addr
osa = other.start_addr
if ssa < osa:
return -1
elif ssa > osa:
return 1
else:
return 0
def __len__(self):
return len(self.buffer)
def __str__(self):
ret = cStringIO.StringIO()
ret.write('@{:04x}\n'.format(self.start_addr))
i = 0
for b in self.buffer:
ret.write('{}{:02x}'.format(' ' if i else '', b))
i += 1
if 16 == i:
i = 0
ret.write('\n')
if i:
ret.write('\n')
return ret.getvalue()
class FirmwareImage(object):
def __init__(self, infile = None):
self.section = []
if infile: self.parse(infile)
def __str__(self):
ret = cStringIO.StringIO()
for sec in self.section:
ret.write(str(sec))
ret.write('q\n')
return ret.getvalue()
def __getitem__(self, key):
if is_an_integer(key):
for sec in self.section:
if key >= sec.start_addr and key <= sec.end_addr:
key -= sec.start_addr
return sec.buffer[key]
else:
start = key.start
if start is None:
raise IndexError('Must give start index.')
stop = key.stop
for sec in self.section:
if start >= sec.start_addr and start <= sec.end_addr:
start -= sec.start_addr
if stop is not None:
stop -= sec.start_addr
return sec.buffer[slice(start, stop, key.step)]
raise IndexError('Given index is invalid.')
def merge_sections(self):
self.section.sort()
sec = self.section
i = 0
while i < (len(sec) - 1):
if sec[i].end_addr + 1 == sec[i + 1].start_addr:
sec[i] += sec.pop(i + 1)
continue
i += 1
def parse(self, infile):
quit_re = re.compile('^[qQ]$')
addr_re = re.compile('^@[0-9a-fA-F]{4}$')
bytes_re = re.compile('^[0-9a-fA-F]{2}(\s+[0-9a-fA-F]{2}){15}$')
section = []
addr = None
buf = None
def _add_sec():
if buf is not None:
section.append(Section(addr, buf))
with open(infile, 'rb') as inf:
for i, line in enumerate(inf):
ln = line.strip()
if quit_re.match(ln):
_add_sec()
break
elif addr_re.match(ln):
_add_sec()
addr = int(ln[1:], 16)
buf = array.array('B')
elif bytes_re.match(ln):
buf.extend([int(x, 16) for x in ln.split()])
else:
raise ValueError('Invalid line @ %d: %r' % (i, line))
if not quit_re.match(ln):
raise ValueError('Ran out of file without finding "q".')
self.section = section
|
NISSAN Australia appears to have changed its mind again on the Juke compact SUV and is set to bring the distinctive crossover vehicle here next year.
Only seven months ago, then president Dan Thompson told GoAuto he had finally decided against taking the current-model Juke after considering no fewer than three business plans, but it seems that his replacement, Bill Peffer, has taken a different stance on the vehicle.
Mr Peffer, who was previously Nissan North America marketing communications director, saw first-hand the success of the Pulsar-based Juke in that market and is said to be a fan of the global success story.
The British-built Juke appeared briefly in a 45-second Nissan Australia TV commercial titled “What if” that aired for the first time on Sunday night, which almost certainly means it has been written into the local product plan.
Nissan Australia public affairs manager Jeff Fisher told GoAuto that Mr Peffer had an influence on the change in thinking, along with the vehicle’s global sales success.
“We want to have another look at it and we are having another look,” he said.
Left: The UK Nissan Juke.
Mr Fisher said price negotiations were taking place with Nissan in the UK and Japan, where Juke is also built in right-hand drive.
He indicated that 2013 was the most likely launch timing.
Only last December, Mr Thompson told GoAuto we “won’t be getting this generation of Juke” despite “at least three different cracks at studying” the vehicle for this market.
Juke was designed specifically for the European market and has become something of a youth-oriented icon since it was revealed as a concept at the Geneva motor show in March 2009 and as a production car a year later.
It was released in Europe, Japan and the United States in 2010, and is also sold in New Zealand.
In the first half of this year, the Juke helped Nissan gain 5.0 per cent in the depressed European market and, together with the fellow UK-built Qashqai (which comes here as the Dualis) sold 40,000 in June alone, representing a 23 per cent increase.
US sales have also been strong, rising 52.7 per cent in June over the same month last year to 3101 sales, bringing its first-half total to 18,728, an increase of 7.2 per cent.
Built on the same platform as the forthcoming new-generation Pulsar, the Juke is powered by a range of engines, including a 1.6-litre petrol developing 86kW and 158Nm, a 1.5-litre diesel producing 81kW and 240Nm, and a performance model with a 1.6-litre turbocharged petrol unit pushing out 140kW and 240Nm.
It is sold in both front-wheel-drive and part-time all-wheel-drive forms, with manual and automatic (CVT) transmissions.
It would provide a funky alternative in the growing small-SUV market segment (which rose some 68.0 per cent in the first half of 2012) alongside the more conservative Dualis, sales of which grew 37.5 per cent to overtake the Hyundai ix35 and become the top-selling vehicle in the segment.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import codecs
from morphoconllu import read_conllu
from tools import warn
usage = '%s IN OUT' % os.path.basename(__file__)
VERB_TAGS = set(['VERB', 'AUX'])
def remove_Adv_CASE(word):
# Remove case feature from adverbs. Omorfi is only expected to
# assign the CASE feature value Dis (distributive) to adverbs, and
# only inconsistently. Distributive is not recognized as a Finnish
# case by ISK (http://scripta.kotus.fi/visk/sisallys.php?p=81).
# Decided to remove this altogether, resulting in a consistent
# treatment where no adjective has case.
# https://github.com/TurkuNLP/UniversalFinnish/issues/17
if word.cpostag != 'ADV':
return
fmap = word.feat_map()
if 'CASE' not in fmap:
return
value = fmap['CASE']
if value == 'Dis':
word.remove_feat('CASE', 'Dis')
else:
warn('unexpected CASE value for ADV: ' + value)
def remove_Inf1_CASE_Lat(word):
# Remove case feature with value Lat (lative) from infinitive
# verbs. Omorfi follows a dated analysis where the base form of
# the A-infinitive (Infinitive 1) is termed lative. Lative is not
# recognized by ISK (http://scripta.kotus.fi/visk/sisallys.php?p=81,
# see also http://scripta.kotus.fi/visk/sisallys.php?p=120 Huom 1).
# Decided to remove this case. Note that no information is removed,
# as the Lat value for case fully coincides with Inf1 and no other
# case in Omorfi.
# https://github.com/TurkuNLP/UniversalFinnish/issues/44
fmap = word.feat_map()
if 'CASE' not in fmap:
return
value = fmap['CASE']
if value != 'Lat':
return
if word.cpostag not in VERB_TAGS:
warn('unexpected CPOSTAG with CASE=Lat: ' + word.cpostag)
word.remove_feat('CASE', 'Lat')
def remove_Inf5(word):
# Remove Inf5 feature from verbs. Omorfi generates Inf5 *very*
# rarely (once in TDT) and inconsistently, and the "maisillaan"
# form termed as the "5th infinitive" is not considered as such by
# ISK (http://scripta.kotus.fi/visk/sisallys.php?p=120).
fmap = word.feat_map()
if 'INF' not in fmap:
return
value = fmap['INF']
if value != 'Inf5':
return
if word.cpostag not in VERB_TAGS:
warn('unexpected CPOSTAG with INF=Inf5: ' + word.cpostag)
word.remove_feat('INF', 'Inf5')
remove_funcs = [
remove_Adv_CASE,
remove_Inf1_CASE_Lat,
remove_Inf5,
]
def remove_feats(sentence):
for w in sentence.words():
for remove_func in remove_funcs:
remove_func(w)
def process(inf, outf):
for s in read_conllu(inf):
if not isinstance(s, basestring): # skip comments and sentence breaks
remove_feats(s)
print >> outf, unicode(s)
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) != 3:
print >> sys.stderr, 'Usage:', usage
return 1
infn, outfn = argv[1], argv[2]
with codecs.open(infn, encoding='utf-8') as inf:
with codecs.open(outfn, 'w', encoding='utf-8') as outf:
process(inf, outf)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
The old sign looked like this.
The new sign is "Back-lit" so we can get by with a smaller sign.
County code only allows us 37.5 sf of sign face.
Here is what they came up with, stating that sample 2 would be difficult and have a greater costs.
|
#!/usr/bin/env python
import tempfile
import datetime
import logging
import os
import ftputil # external dependency, must be installed via pip or similar
# CONFIGURATION START
# Set this to the hostname of the FTP server
FTP_ADDRESS = "some.host.name.here.com"
FTP_USERNAME = "anonymous" # change these to a user that can upload files!
FTP_PASSWORD = "anonymous@"
# List the folders we are supposed to work with. Remember to make them
# absolute, i.e., have them start with a slash.
FOLDERS = [
"/an/absolute/path",
"/some/other/absolute path",
"/note/that/spaces are not escaped",
"/at least not for windows hosts",
]
# The label we want to give all files. Will be used for naming, e.g., if set
# to "recent", most recent file will be called "most-recent".
TARGET_LABEL = "recent"
# What file types are we working with?
FILE_TYPE = ".jpg"
# Define interesting times of day here. The most recent file for each period
# will be found and uploaded to the server, with a name constructed as:
# TARGET_LABEL-PERIOD(name).FILE_TYPE
# e.g., "recent-morning.jpg".
# Make the list empty if there are no interesting times of day that should
# be dealt with particularly.
# Periods can overlap. This is intentional: if you want to find the most
# recent file overall, make a period that covers the entire day like in
# the example below, and call it "overall". A file can then match both
# a "morning" period and the "overall" period, for example.
PERIODS = [
dict(name="morning", start="04:00", end="09:59"),
dict(name="midday", start="10:00", end="14:59"),
dict(name="overall", start="00:00", end="23:59"),
dict(name="evening", start="15:00", end="22:00")
]
# CONFIGURATION END
class FileInfo(object):
def __init__(self, mtime=0.0, path=None, name=None):
self.mtime = mtime
self.path = path
self.name = name
def download_file(remote_abspath, local_abspath):
"Download the remote file to the local path, both absolute"
with ftputil.FTPHost(FTP_ADDRESS, FTP_USERNAME, FTP_PASSWORD) as ftp:
ftp.download(remote_abspath, local_abspath)
def upload_file(local_abspath, remote_abspath):
"Upload the local file to the remote path, both absolute"
with ftputil.FTPHost(FTP_ADDRESS, FTP_USERNAME, FTP_PASSWORD) as ftp:
ftp.upload(local_abspath, remote_abspath)
def within_period(modification_time, period):
"Checks if the given modification time is within the given period"
start_hour, start_minute = period["start"].split(":")
end_hour, end_minute = period["end"].split(":")
# TODO Can we always assume UTC works here?
mtime = datetime.datetime.utcfromtimestamp(modification_time).time()
start = datetime.time(hour=int(start_hour), minute=int(start_minute))
end = datetime.time(hour=int(end_hour), minute=int(end_minute))
result = start <= mtime and mtime <= end
logging.debug("%s within interval %s -- %s? %s",
str(mtime), period["start"], period["end"],
str(result))
return result
def construct_file_name(period):
"Construct file name for a given period."
return TARGET_LABEL + "-" + period["name"] + FILE_TYPE
def find_newest_files(folder):
"""Return absolute paths of newest files on server.
This function will descend into subdirectories of the folder.
:param folder: The folder on the FTP server where we shall find the
newest file. We will descend into subdirectories of this folder.
:type folder: str
:returns: The path name of the newest file, i.e., the one with the
most recent modification time.
"""
newest_in_period = {period["name"]: FileInfo(name=construct_file_name(period))
for period in PERIODS}
file_names_to_avoid = [construct_file_name(period) for period in PERIODS]
with ftputil.FTPHost(FTP_ADDRESS, FTP_USERNAME, FTP_PASSWORD) as ftp:
for dirpath, dirnames, files in ftp.walk(folder):
for f in [fname for fname in files
if fname.endswith(FILE_TYPE)
and fname not in file_names_to_avoid]:
fullpath_filename = dirpath + "/" + f
statinfo = ftp.stat(fullpath_filename)
mtime = statinfo.st_mtime
logging.debug("%s modified at %f",
fullpath_filename,
mtime)
for period in PERIODS:
if within_period(mtime, period):
nip = newest_in_period[period["name"]]
if mtime > nip.mtime:
nip.path = fullpath_filename
nip.mtime = mtime
newest_files = [fi for fi in newest_in_period.itervalues() if fi.path]
return newest_files
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
for folder in FOLDERS:
temporary_directory = tempfile.mkdtemp()
for fi in find_newest_files(folder):
local_abspath = os.path.join(temporary_directory, fi.name)
logging.info("File under %s (%s) saved temporarily as %s",
folder, fi.path, local_abspath)
download_file(fi.path, local_abspath)
upload_file(local_abspath, folder + "/" + fi.name)
|
Strong Points: Excellent size, length, speed and athleticism for an O-lineman. Flashes as a run blocker and a pass blocker. Strong and explosive.
Weak Points: Too many mental mistakes, such as getting off the ball late, taking the wrong angle, failing to get good position on his opponent, and going one way while the rest of the line goes the other. Inconsistent hand use. Gets tall at times.
The Way We See It: Prince was a three-year starter at right tackle for Ohio State. The Buckeyes tried him at left tackle in the 2017 offseason but moved him back to right tackle when fall practice began last year. Prince is one of the most physically gifted tackles in this draft but doesn't play like it. There are flashes of dominance but also too much inconsistency and myriad mental mistakes. Can he be trusted to play to his talent level? Interviews and workout will be very important for Prince, who has second-round talent but fourth-round tape. There is obvious upside, but will Prince ever reach it?
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import os
import random
from benchmark import run_benchmark
MAX_ERR_EM = 0.002
DATADIR = os.path.join(
os.path.dirname(__file__), os.path.pardir, 'tests', 'data')
def setup_fonts_to_quadratic_defcon():
from defcon import Font
return [[Font(os.path.join(DATADIR, 'RobotoSubset-Regular.ufo'))],
MAX_ERR_EM]
def main():
run_benchmark(
'ufo_benchmark', 'cu2qu.ufo', 'fonts_to_quadratic',
setup_suffix='defcon', repeat=10)
if __name__ == '__main__':
random.seed(1)
main()
|
DALLAS (August 2, 2018) – Aprima Medical Software, a leading provider of innovative electronic health record (EHR), practice management (PM) and revenue cycle management (RCM) solutions for medical practices, is pleased to announce that Mike Davis, KLAS Arch Collaborative Ambassador, former Managing Vice President of Gartner’s Healthcare Research & Advisory Service and former Executive Vice President of HIMSS Analytics, will be the keynote speaker at the Aprima 2018 User Conference, on August 17-19 at the Gaylord Texan Resort Hotel & Conference Center in Grapevine, Texas.
In Davis’s August 17th presentation, he will share key learnings from a comprehensive study of EHR best practices by the Arch Collaborative from KLAS Research, a leading independent research firm. Davis will reveal findings based on 600,000 data points collected from over 50,000 EHR users, including 20,000 physicians, that User Conference attendees can use to help drive their practices’ improved success.
During the keynote address, Aaron Gleave, KLAS Clinical Research Director will speak to the rigorous process of becoming Best in KLAS and the details about how Aprima won the 2018 Best in KLAS award in the Small Practice Ambulatory EMR/PM category (1-10 physicians).
Participants in this year’s Aprima User Conference can earn up to 15.5 Continuing Medical Education (CME) credits. They can also attend pre-conference Boot Camps on how to optimize their use of Aprima. Additionally, Diamond Sponsor Surescripts will be hosting a special Boot Camp that will discuss their capabilities that have been integrated in Aprima to help inform care decisions and improve outcomes at the point of care.
There will also be special sessions on MIPS, care plan oversight, chronic care management, cyber security, protecting profits, and managing a practice’s online reputation, among other topics. In addition, exhibitors at the User Conference will display a variety of products that extend Aprima’s capabilities.
Click here to learn more about Aprima and the 2018 Aprima User Conference.
|
""" Implementation of the Master-Slave NSGA-II Genetic Algorithm using JSPEval
to evaluate the individuals. The fitness function is evaluated in parallel by
the Slave processors.
"""
import random
import time
import numpy as np
from mpi4py import MPI
from deap import creator, base, tools, algorithms
from deap110 import emo
from JSPEval.jspsolution import JspSolution
from JSPEval.jspmodel import JspModel
from JSPEval.jspeval import JspEvaluator
import params
import operators
import output
# --- Setup ---
# MPI environment
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
print(size)
# read parameters
term_m, term_v, pop_size, f_out, f_model, _, _,\
mut_prob, mut_eta, xover_prob, xover_eta = params.get()
# start multiple runs
start = time.time()
# -- setup algorithm --
# init evaluator
model = JspModel(f_model)
evaluator = JspEvaluator(model)
# init GA
fitness_size = evaluator.metrics_count()
weights = tuple([-1 for _ in range(fitness_size)])
creator.create("FitnessMin", base.Fitness, weights=weights)
creator.create("Individual", JspSolution, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("values",
tools.initRepeat,
list,
random.random,
model.solution_length())
toolbox.register("individual", # alias
operators.init_individual, # generator function
creator.Individual, # individual class
model, # model to use
toolbox.values) # value generator
toolbox.register("population",
tools.initRepeat,
list,
toolbox.individual)
toolbox.register("mate", operators.crossover, eta=xover_eta)
toolbox.register("mutate", operators.mutation, indpb=mut_prob, eta=mut_eta)
toolbox.register("select", tools.selNSGA2)
# init first population
population = toolbox.population(n=pop_size)
fits = map(lambda x: operators.calc_fitness(x, evaluator), population)
for fit, i_pop in zip(fits, population):
i_pop.fitness.values = fit
# --- main GA loop ---
gen = 0
terminate = False
term_reqs = []
for node in range(size):
term_reqs.append(comm.irecv(source=node, tag=0))
while not terminate:
gen += 1
# -- execute genetic operators --
# selection
emo.assignCrowdingDist(population)
offspring = tools.selTournamentDCD(population, len(population))
# crossover and mutation
offspring = algorithms.varAnd(
offspring,
toolbox,
cxpb=xover_prob,
mutpb=1.0) # is taken care of by mutation operator
# fitness calculation
fits = map(
lambda x: operators.calc_fitness(x, evaluator),
offspring)
# -- select next population --
# assign fitness
for fit, i_off in zip(fits, offspring):
i_off.fitness.values = fit
# selection
offspring.extend(population)
population = toolbox.select(
offspring,
len(population))
terminate = operators.termination(term_m, term_v, gen, population)
# send a termination signal to all others
# needed for makespan termination
if terminate:
print('rank: {} termination, sending signal'.format(rank))
for node in range(size):
comm.isend(True, node, tag=0)
# test for termination of others
_, node_term, _ = MPI.Request.testany(term_reqs)
if node_term:
print('rank: {}, termination signal received'.format(rank))
terminate = terminate | node_term
# --- process results ---
# collect results
sol_values = np.empty([pop_size, model.solution_length()])
fit_values = np.empty([pop_size, fitness_size])
for i, ind in zip(range(pop_size), population):
sol_values[i] = ind.get_values()
fit_values[i] = ind.fitness.values
sol_all = None
fit_all = None
if rank == 0:
sol_all = np.empty([pop_size * size, model.solution_length()])
fit_all = np.empty([pop_size * size, fitness_size])
comm.Gather(sol_values, sol_all, root=0)
comm.Gather(fit_values, fit_all, root=0)
if rank == 0:
all_pop = toolbox.population(n=pop_size * size)
for i, ind in zip(range(pop_size * size), all_pop):
ind.set_values(sol_all[i])
ind.fitness.values = fit_all[i]
duration = time.time() - start
output.write_pareto_front(all_pop, f_out)
with open('{}.time'.format(f_out), 'a') as myfile:
myfile.write('{}\n'.format(duration))
|
Simple design with retro style abounding in sense of art offers an industrial aesthetic feeling, this Functional bookcase is great for home or office organizing and decor!
clean-lined design adds a refined touch to any space, making it the perfect addition to all kinds of retro style decoration, restaurants, cafes, office and other place.
to each other to create an elegant storage wall.
SATISFACTION GUARANTEE: We support 30-day free return and 12-month warranty.
★ Compliment any room: Stylish and clean-lined design adds a refined touch to any space, making it the perfect addition to all kinds of retro style decoration, restaurants, cafes, office and other place.
★ Strong Construction, Built to Last: Heavy duty metal frame combined with thick MDF board (not solid wood) design makes the shelves super stable, not solid wood. Weight capacity of each shelf is up to 70-lbs.
★ Easy to clean and assemble:Necessary hardware and instructions provided, It only takes you several minutes to assemble effortlessly.
|
import cis
import numpy as np
files = ["../../resources/WorkshopData2016/AerosolCCI/20080411002335-ESACCI-L2P_AEROSOL-AER_PRODUCTS-AATSR-ENVISAT-ORAC_31962-fv03.04.nc",
"../../resources/WorkshopData2016/AerosolCCI/20080411020411-ESACCI-L2P_AEROSOL-AER_PRODUCTS-AATSR-ENVISAT-ORAC_31963-fv03.04.nc",
"../../resources/WorkshopData2016/AerosolCCI/20080411034447-ESACCI-L2P_AEROSOL-AER_PRODUCTS-AATSR-ENVISAT-ORAC_31964-fv03.04.nc",
"../../resources/WorkshopData2016/AerosolCCI/20080411052523-ESACCI-L2P_AEROSOL-AER_PRODUCTS-AATSR-ENVISAT-ORAC_31965-fv03.04.nc",
"../../resources/WorkshopData2016/AerosolCCI/20080411070559-ESACCI-L2P_AEROSOL-AER_PRODUCTS-AATSR-ENVISAT-ORAC_31966-fv03.04.nc"]
def subset_africa(ungridded_data):
northern_africa_lat_bounds = -20, 50
northern_africa_lon_bounds = 0, 40
southern_africa_lat_bounds = -40, 0
southern_africa_lon_bounds = 10, 50
africa_points = np.zeros(ungridded_data.shape, dtype=np.bool)
for i, d in enumerate(ungridded_data.data):
if ((northern_africa_lat_bounds[0] < ungridded_data.lat.points[i] < northern_africa_lat_bounds[1]) and
(northern_africa_lon_bounds[0] < ungridded_data.lon.points[i] < northern_africa_lon_bounds[1])) or \
((southern_africa_lat_bounds[0] < ungridded_data.lat.points[i] < southern_africa_lat_bounds[1]) and
(southern_africa_lon_bounds[0] < ungridded_data.lon.points[i] < southern_africa_lon_bounds[1])):
africa_points[i] = True
return ungridded_data[africa_points]
def subset_aerosol_cci_over_africa():
from subset_by_region.utils import stack_data_list
subsetted_data = []
for f in files:
d = cis.read_data(f, "AOD550")
subsetted_data.append(subset_africa(d))
subset = stack_data_list(subsetted_data)
return subset
if __name__ == '__main__':
import matplotlib.pyplot as plt
subset = subset_aerosol_cci_over_africa()
subset.plot(xaxis='longitude', yaxis='latitude')
plt.show()
|
Welcome to The Mark Yaletown! Suite 3008 has absolute ZERO wasted space! Bright South Facing Suite with False Creek Views in every room. Private Collection with endless upgrades including Control4, SUBZERO Integrated fridge, Miele Gas Stove Top, Miele dishwasher, Faber hood fan, soft closing designer cabinets. Spa inspired washrooms with Nuheat flooring, marble countertops, designer tile & fixtures. Automatic Roller Blinds, NEST heating & cooling & 45 SF Balcony and 2 side by side parking & 1 storage room w/2 separate storage units. Enjoy 24 hour Concierge, Huge Fitness Centre w/Yoga studio, Sauna, Steam, Outdoor Pool/Hot tub, Party Rooms, 2 Guest Suites! Sorry, Showings Only during Open Houses! Open Sun, May 7th 2-5pm.
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 3 16:40:58 2014
@author: cokelaer
"""
from .midas import XMIDAS
class MultiMIDAS(object):
"""Data structure to store multiple instances of MIDAS files
You can read a MIDAS file that contains several cell lines:
and acces to the midas files usig their cell line name
.. doctest::
>>> mm = MultiMIDAS(cnodata("EGFR-ErbB_PCB2009.csv"))
>>> mm.cellLines
['HepG2', 'PriHu']
>>> mm["HepG2"].namesCues
['TGFa', 'MEK12', 'p38', 'PI3K', 'mTORrap', 'GSK3', 'JNK']
where the list of cell line names is available in the :attr:`cellLines`
attribute.
Or you can start from an empty list and add instance later on using :meth:`addMIDAS`
method.
"""
def __init__(self, filename=None):
""".. rubric:: constructor
:param str filename: a valid MIDAS file (optional)
"""
self._midasList = []
self._names = []
if filename:
self.readMIDAS(filename)
def addMIDAS(self, midas):
"""Add an existing MIDAS instance to the list of MIDAS instances
.. doctest::
>>> from cellnopt.core import *
>>> m = MIDASReader(cnodata("MD-ToyPB.csv"))
>>> mm = MultiMIDAS()
>>> mm.addMIDAS(m)
"""
if midas.celltypeName not in self._names:
self._midasList.append(midas)
self._names.append(midas.celltypeName)
else:
raise ValueError("midsa with same celltype already in the list")
def readMIDAS(self, filename):
"""read MIDAS file and extract individual cellType/cellLine
This function reads the MIDAS and identifies the cellLines. Then, it
creates a MIDAS instance for each cellLines and add the MIDAS instance to the
:attr:`_midasList`. The MIDAS file can then be retrieved using their
cellLine name, which list is stored in :attr:`cellLines`.
:param str filename: a valid MIDAS file containing any number of cellLines.
"""
raise NotImplementedError
m = XMIDAS(filename)
self.addMIDAS(m)
def _get_cellLines(self):
names = [x.celltypeName for x in self._midasList]
return names
cellLines = property(_get_cellLines,
doc="return names of all cell lines, which are the MIDAS instance identifier ")
def __getitem__(self, name):
index = self.cellLines.index(name)
return self._midasList[index]
def plot(self):
"""Call plot() method for each MIDAS instances in different figures
More sophisticated plots to easily compare cellLines could be
implemented.
"""
for i,m in enumerate(self._midasList):
from pylab import figure, clf
figure(i+1)
clf()
m.plot()
|
Tag Archive for 'Maria Bello' at Why So Blu?
You are currently browsing the Why So Blu? weblog archives for 'Maria Bello' tag.
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import pytest
import numpy as np
from numpy.testing import (
assert_almost_equal,
)
from MDAnalysisTests.datafiles import MMTF, MMTF_gz, MMTF_skinny2
from MDAnalysis.coordinates.MMTF import MMTFReader
class TestMMTFReader(object):
@pytest.fixture(scope='class')
def r(self):
return MMTFReader(MMTF)
def test_read_frame_size(self, r):
assert r.ts.n_atoms == 512
def test_read_positions(self, r):
assert_almost_equal(r.ts.positions[0],
np.array([-0.798, 12.632, 23.231]),
decimal=4)
assert_almost_equal(r.ts.positions[-1],
np.array([10.677, 15.517, 11.1]),
decimal=4)
def test_velocities(self, r):
assert not r.ts.has_velocities
def test_forces(self, r):
assert not r.ts.has_forces
def test_len(self, r):
# should be single frame
assert len(r) == 1
class TestMMTFReaderGZ(object):
@pytest.fixture(scope='class')
def r(self):
return MMTFReader(MMTF_gz)
def test_read_frame_size(self, r):
assert r.ts.n_atoms == 1140
def test_read_positions(self, r):
assert_almost_equal(r.ts.positions[0],
np.array([38.428, 16.440, 28.841]),
decimal=4)
assert_almost_equal(r.ts.positions[-1],
np.array([36.684, 27.024, 20.468]),
decimal=4)
def test_velocities(self, r):
assert not r.ts.has_velocities
def test_forces(self, r):
assert not r.ts.has_forces
def test_len(self, r):
# should be single frame
assert len(r) == 1
def test_dimensionless():
r = MMTFReader(MMTF_skinny2)
assert r.ts.dimensions is None
|
Seeking out alcohol rehab of help and treatment is most often what it takes to finally break free of such quagmires. A look at the services offered by one leading Women's Addiction Treatment Program in Arizona will reveal that there is an option suitable to every situation.
Spiritual support. drug rehab centers near me is ultimately up to each individual addict to determine whether any level of recovery can be sustained and preserved for life. Unfortunately, inpatient alcohol rehab up falling back into former habits of substance abuse even after having initially made impressive progress. For quite a few who have endured such disappointments, discovering or building on religious faith ends up being the most important subsequent step. As a result, christian rehab for women has become an increasingly popular option.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.