text
stringlengths 29
850k
|
|---|
# Functions/classes for WCSAxes related to APE14 WCSes
import numpy as np
from astropy.coordinates import SkyCoord, ICRS, BaseCoordinateFrame
from astropy import units as u
from astropy.wcs import WCS
from astropy.wcs.utils import local_partial_pixel_derivatives
from astropy.wcs.wcsapi import SlicedLowLevelWCS
from .frame import RectangularFrame, EllipticalFrame, RectangularFrame1D
from .transforms import CurvedTransform
__all__ = ['transform_coord_meta_from_wcs', 'WCSWorld2PixelTransform',
'WCSPixel2WorldTransform']
IDENTITY = WCS(naxis=2)
IDENTITY.wcs.ctype = ["X", "Y"]
IDENTITY.wcs.crval = [0., 0.]
IDENTITY.wcs.crpix = [1., 1.]
IDENTITY.wcs.cdelt = [1., 1.]
def transform_coord_meta_from_wcs(wcs, frame_class, slices=None):
if slices is not None:
slices = tuple(slices)
if wcs.pixel_n_dim > 2:
if slices is None:
raise ValueError("WCS has more than 2 pixel dimensions, so "
"'slices' should be set")
elif len(slices) != wcs.pixel_n_dim:
raise ValueError("'slices' should have as many elements as WCS "
"has pixel dimensions (should be {})"
.format(wcs.pixel_n_dim))
is_fits_wcs = isinstance(wcs, WCS) or (isinstance(wcs, SlicedLowLevelWCS) and isinstance(wcs._wcs, WCS))
coord_meta = {}
coord_meta['name'] = []
coord_meta['type'] = []
coord_meta['wrap'] = []
coord_meta['unit'] = []
coord_meta['visible'] = []
coord_meta['format_unit'] = []
for idx in range(wcs.world_n_dim):
axis_type = wcs.world_axis_physical_types[idx]
axis_unit = u.Unit(wcs.world_axis_units[idx])
coord_wrap = None
format_unit = axis_unit
coord_type = 'scalar'
if axis_type is not None:
axis_type_split = axis_type.split('.')
if "pos.helioprojective.lon" in axis_type:
coord_wrap = 180.
format_unit = u.arcsec
coord_type = "longitude"
elif "pos.helioprojective.lat" in axis_type:
format_unit = u.arcsec
coord_type = "latitude"
elif "pos.heliographic.stonyhurst.lon" in axis_type:
coord_wrap = 180.
format_unit = u.deg
coord_type = "longitude"
elif "pos.heliographic.stonyhurst.lat" in axis_type:
format_unit = u.deg
coord_type = "latitude"
elif "pos.heliographic.carrington.lon" in axis_type:
coord_wrap = 360.
format_unit = u.deg
coord_type = "longitude"
elif "pos.heliographic.carrington.lat" in axis_type:
format_unit = u.deg
coord_type = "latitude"
elif "pos" in axis_type_split:
if "lon" in axis_type_split:
coord_type = "longitude"
elif "lat" in axis_type_split:
coord_type = "latitude"
elif "ra" in axis_type_split:
coord_type = "longitude"
format_unit = u.hourangle
elif "dec" in axis_type_split:
coord_type = "latitude"
elif "alt" in axis_type_split:
coord_type = "longitude"
elif "az" in axis_type_split:
coord_type = "latitude"
elif "long" in axis_type_split:
coord_type = "longitude"
coord_meta['type'].append(coord_type)
coord_meta['wrap'].append(coord_wrap)
coord_meta['format_unit'].append(format_unit)
coord_meta['unit'].append(axis_unit)
# For FITS-WCS, for backward-compatibility, we need to make sure that we
# provide aliases based on CTYPE for the name.
if is_fits_wcs:
name = []
if isinstance(wcs, WCS):
name.append(wcs.wcs.ctype[idx].lower())
name.append(wcs.wcs.ctype[idx][:4].replace('-', '').lower())
elif isinstance(wcs, SlicedLowLevelWCS):
name.append(wcs._wcs.wcs.ctype[wcs._world_keep[idx]].lower())
name.append(wcs._wcs.wcs.ctype[wcs._world_keep[idx]][:4].replace('-', '').lower())
if name[0] == name[1]:
name = name[0:1]
if axis_type:
if axis_type not in name:
name.insert(0, axis_type)
if wcs.world_axis_names and wcs.world_axis_names[idx]:
if wcs.world_axis_names[idx] not in name:
name.append(wcs.world_axis_names[idx])
name = tuple(name) if len(name) > 1 else name[0]
else:
name = axis_type or ''
if wcs.world_axis_names:
name = (name, wcs.world_axis_names[idx]) if wcs.world_axis_names[idx] else name
coord_meta['name'].append(name)
coord_meta['default_axislabel_position'] = [''] * wcs.world_n_dim
coord_meta['default_ticklabel_position'] = [''] * wcs.world_n_dim
coord_meta['default_ticks_position'] = [''] * wcs.world_n_dim
# If the world axis has a name use it, else display the world axis physical type.
fallback_labels = [name[0] if isinstance(name, (list, tuple)) else name for name in coord_meta['name']]
coord_meta['default_axis_label'] = [wcs.world_axis_names[i] or fallback_label for i, fallback_label in enumerate(fallback_labels)]
transform_wcs, invert_xy, world_map = apply_slices(wcs, slices)
transform = WCSPixel2WorldTransform(transform_wcs, invert_xy=invert_xy)
for i in range(len(coord_meta['type'])):
coord_meta['visible'].append(i in world_map)
inv_all_corr = [False] * wcs.world_n_dim
m = transform_wcs.axis_correlation_matrix.copy()
if invert_xy:
inv_all_corr = np.all(m, axis=1)
m = m[:, ::-1]
if frame_class is RectangularFrame:
for i, spine_name in enumerate('bltr'):
pos = np.nonzero(m[:, i % 2])[0]
# If all the axes we have are correlated with each other and we
# have inverted the axes, then we need to reverse the index so we
# put the 'y' on the left.
if inv_all_corr[i % 2]:
pos = pos[::-1]
if len(pos) > 0:
index = world_map[pos[0]]
coord_meta['default_axislabel_position'][index] = spine_name
coord_meta['default_ticklabel_position'][index] = spine_name
coord_meta['default_ticks_position'][index] = spine_name
m[pos[0], :] = 0
# In the special and common case where the frame is rectangular and
# we are dealing with 2-d WCS (after slicing), we show all ticks on
# all axes for backward-compatibility.
if len(world_map) == 2:
for index in world_map:
coord_meta['default_ticks_position'][index] = 'bltr'
elif frame_class is RectangularFrame1D:
derivs = np.abs(local_partial_pixel_derivatives(transform_wcs, *[0]*transform_wcs.pixel_n_dim,
normalize_by_world=False))[:, 0]
for i, spine_name in enumerate('bt'):
# Here we are iterating over the correlated axes in world axis order.
# We want to sort the correlated axes by their partial derivatives,
# so we put the most rapidly changing world axis on the bottom.
pos = np.nonzero(m[:, 0])[0]
order = np.argsort(derivs[pos])[::-1] # Sort largest to smallest
pos = pos[order]
if len(pos) > 0:
index = world_map[pos[0]]
coord_meta['default_axislabel_position'][index] = spine_name
coord_meta['default_ticklabel_position'][index] = spine_name
coord_meta['default_ticks_position'][index] = spine_name
m[pos[0], :] = 0
# In the special and common case where the frame is rectangular and
# we are dealing with 2-d WCS (after slicing), we show all ticks on
# all axes for backward-compatibility.
if len(world_map) == 1:
for index in world_map:
coord_meta['default_ticks_position'][index] = 'bt'
elif frame_class is EllipticalFrame:
if 'longitude' in coord_meta['type']:
lon_idx = coord_meta['type'].index('longitude')
coord_meta['default_axislabel_position'][lon_idx] = 'h'
coord_meta['default_ticklabel_position'][lon_idx] = 'h'
coord_meta['default_ticks_position'][lon_idx] = 'h'
if 'latitude' in coord_meta['type']:
lat_idx = coord_meta['type'].index('latitude')
coord_meta['default_axislabel_position'][lat_idx] = 'c'
coord_meta['default_ticklabel_position'][lat_idx] = 'c'
coord_meta['default_ticks_position'][lat_idx] = 'c'
else:
for index in range(len(coord_meta['type'])):
if index in world_map:
coord_meta['default_axislabel_position'][index] = frame_class.spine_names
coord_meta['default_ticklabel_position'][index] = frame_class.spine_names
coord_meta['default_ticks_position'][index] = frame_class.spine_names
return transform, coord_meta
def apply_slices(wcs, slices):
"""
Take the input WCS and slices and return a sliced WCS for the transform and
a mapping of world axes in the sliced WCS to the input WCS.
"""
if isinstance(wcs, SlicedLowLevelWCS):
world_keep = list(wcs._world_keep)
else:
world_keep = list(range(wcs.world_n_dim))
# world_map is the index of the world axis in the input WCS for a given
# axis in the transform_wcs
world_map = list(range(wcs.world_n_dim))
transform_wcs = wcs
invert_xy = False
if slices is not None:
wcs_slice = list(slices)
wcs_slice[wcs_slice.index("x")] = slice(None)
if 'y' in slices:
wcs_slice[wcs_slice.index("y")] = slice(None)
invert_xy = slices.index('x') > slices.index('y')
transform_wcs = SlicedLowLevelWCS(wcs, wcs_slice[::-1])
world_map = tuple(world_keep.index(i) for i in transform_wcs._world_keep)
return transform_wcs, invert_xy, world_map
def wcsapi_to_celestial_frame(wcs):
for cls, _, kwargs, *_ in wcs.world_axis_object_classes.values():
if issubclass(cls, SkyCoord):
return kwargs.get('frame', ICRS())
elif issubclass(cls, BaseCoordinateFrame):
return cls(**kwargs)
class WCSWorld2PixelTransform(CurvedTransform):
"""
WCS transformation from world to pixel coordinates
"""
has_inverse = True
frame_in = None
def __init__(self, wcs, invert_xy=False):
super().__init__()
if wcs.pixel_n_dim > 2:
raise ValueError('Only pixel_n_dim =< 2 is supported')
self.wcs = wcs
self.invert_xy = invert_xy
self.frame_in = wcsapi_to_celestial_frame(wcs)
def __eq__(self, other):
return (isinstance(other, type(self)) and self.wcs is other.wcs and
self.invert_xy == other.invert_xy)
@property
def input_dims(self):
return self.wcs.world_n_dim
def transform(self, world):
# Convert to a list of arrays
world = list(world.T)
if len(world) != self.wcs.world_n_dim:
raise ValueError(f"Expected {self.wcs.world_n_dim} world coordinates, got {len(world)} ")
if len(world[0]) == 0:
pixel = np.zeros((0, 2))
else:
pixel = self.wcs.world_to_pixel_values(*world)
if self.invert_xy:
pixel = pixel[::-1]
pixel = np.array(pixel).T
return pixel
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return WCSPixel2WorldTransform(self.wcs, invert_xy=self.invert_xy)
class WCSPixel2WorldTransform(CurvedTransform):
"""
WCS transformation from pixel to world coordinates
"""
has_inverse = True
def __init__(self, wcs, invert_xy=False):
super().__init__()
if wcs.pixel_n_dim > 2:
raise ValueError('Only pixel_n_dim =< 2 is supported')
self.wcs = wcs
self.invert_xy = invert_xy
self.frame_out = wcsapi_to_celestial_frame(wcs)
def __eq__(self, other):
return (isinstance(other, type(self)) and self.wcs is other.wcs and
self.invert_xy == other.invert_xy)
@property
def output_dims(self):
return self.wcs.world_n_dim
def transform(self, pixel):
# Convert to a list of arrays
pixel = list(pixel.T)
if len(pixel) != self.wcs.pixel_n_dim:
raise ValueError(f"Expected {self.wcs.pixel_n_dim} world coordinates, got {len(pixel)} ")
if self.invert_xy:
pixel = pixel[::-1]
if len(pixel[0]) == 0:
world = np.zeros((0, self.wcs.world_n_dim))
else:
world = self.wcs.pixel_to_world_values(*pixel)
if self.wcs.world_n_dim == 1:
world = [world]
world = np.array(world).T
return world
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return WCSWorld2PixelTransform(self.wcs, invert_xy=self.invert_xy)
|
Our A Frame Signs are not like the others. Don't be fooled by "their" lower price. Others tack the track right to these signs making them impossible to be used for anything else. All of our A Frame signs are mounted with 1/8" white pvc which are then mounted with tracks and your company logo. These PVC attachments can be removed so you can apply other signs on these frames. All of our PVC attachments are mounted with screws and not tape.
Be sure to upload your logo!
|
## Imitation Compiler
#
# @filename Imitation_Compiler.py
# @author Ben Mariano
# @date 5/9/2017
# Library Imports
import operator
import functools
import string
import random
# Local Imports
import Token
from Lexer import *
import Parser
from NodeVisitor import NodeVisitor
## Imitation Compiler
#
# @brief Compiles the second of the two required python scripts. This
# script traverses the CO-PCT tree in reverse using PyHop.
class Imitation_Compiler(NodeVisitor):
## Constructor
#
# @param parser Parser that will provide the AST to be compiled
def __init__(self, parser):
## @var parser
# Converts input into AST
self.parser = parser
## @var methods_dict
# Dictionary of methods where key is method name and
# value is a 3-tuple (list of arguments, cond, returns)
self.methods_dict = {}
## @var intention
# Current intention for access from conditional check
self.intention = None
## @var method_var_equivs
# Variable equivalents per method. This keeps track of which
# variable must be equal based on conditionals.
self.method_var_equivs = {}
## Visit Literal
#
# @brief Returns a tuple of the string 'LITERAL' and the literal
# value
#
# @param node AST instance to be evaluated
#
# @retval (String, String) tuple of the form 'LITERAL', literal_value
def visit_Literal(self, node):
return 'LITERAL', str(self.visit(node.name))
## Visit Boolean
#
# @brief Returns a four-tuple of the form 'UNIT', e1, comp, e2 where
# e1 and e2 are tuples representing eithing literals, variables
# or keyword phrases
#
# @param node AST instance to be evaluated
#
# @retval (String, Tuple, String, Tuple) tuple of the form 'UNIT', expr1, op, expr2
def visit_Boolean(self, node):
if node.op.type == EQUALS :
return 'UNIT', (self.visit(node.e1), "==", self.visit(node.e2))
elif node.op.type == LESSTHAN :
return 'UNIT', (self.visit(node.e1), "<", self.visit(node.e2))
elif node.op.type == GREATERTHAN :
return 'UNIT', (self.visit(node.e1), ">", self.visit(node.e2))
elif node.op.type == GREATEREQUAL :
return 'UNIT', (self.visit(node.e1), ">=", self.visit(node.e2))
elif node.op.type == LESSEQUAL :
return 'UNIT', (self.visit(node.e1), "<=", self.visit(node.e2))
elif node.op.type == NOTEQUAL :
return 'UNIT', (self.visit(node.e1), "!=", self.visit(node.e2))
elif node.op.type == PYTHON :
return 'UNIT', (self.visit(node.e1), "PYTHON", None)
## Visit Boolean Expression
#
# @brief Returns a three tuple of the form b_left, op, b_right
#
# @param node AST instance to be evaluated
#
# @retval (Tuple, String, Tuple) tuple of the form BooleanExpr, op, BooleanExpr
def visit_BoolExpr(self, node):
if node.op:
if node.op.type == AND:
return node.bound, self.visit(node.left), "and", self.visit(node.right)
elif node.op.type == OR:
return node.bound, self.visit(node.left) , "or", self.visit(node.right)
else:
return self.visit(node.left)
## Visit Arguments
#
# @brief Returns a list of strings representing the arguments
#
# @param node AST instance to be evaluated
#
# @retval String List list of the arguments as strings
def visit_Args(self, node):
args = []
for child in node.children:
args.append(self.visit(child))
return args
## Visit Action
#
# @brief Returns a tuple of the action_name, action_args
#
# @param node AST instance to be evaluated
#
# @retval (String, String List) tuple of the form action_name, action_args
def visit_Act(self, node):
return (self.visit(node.var), self.visit(node.args))
## Visit Actions
#
# @brief Returns a list of strings representing the actions
#
# @param node AST instance to be evaluated
#
# @retval (String, String List) List list of action tuples with action_name, action_args
def visit_Acts(self, node):
acts = []
for child in node.children:
acts.append(self.visit(child))
return acts
## Visit Caus
#
# @brief Returns the name of the intention
#
# @param node AST instance to be evaluated
#
# @retval String string representing the intention
def visit_Caus(self, node):
# acts = the right-side of the causal statement. Represent
# the actions that cause the 'intention'
# act = the left-side of the causal statement. Represents
# the 'intention' caused by the actions
acts = self.visit(node.acts)
act = self.visit(node.act)
# defines fold-left function
foldl = lambda func, acc, xs: functools.reduce(func, xs, acc)
# isolates and formats the names of the acts in order to be
# used in the if statement
act_names = foldl(operator.add, '', map(lambda x: '\''+x[0]+'\',', acts))
intention_Args = act[1]
act_name = act[0]
if not act_name in self.method_var_equivs:
self.method_var_equivs[act_name] = []
self.method_var_equivs[act_name].append({})
length = len(self.method_var_equivs[act_name])
# return statement
# defines return value as variable then returns this variable
ret = '__ret_val = ['
# iterate through each action adding it and its
# argument placeholders to the return string
for action in acts:
ret += '(\''+action[0]+'\','
# iterate through each argument to an action
for a in range(0, len(action[1])):
arg = action[1][a]
# Handle the special case of the CONT keyword
if arg[:4] == 'CONT':
# create a dictionary for act_name intention
# in the method_var_equivs if it's not there
tmp_dict = {}
index = a - int(arg[4:])
prev_arg = action[1][index]
# adjust arg name to avoid collisions
arg = arg + "-" + prev_arg
tmp_dict[arg] = prev_arg
self.method_var_equivs[act_name][length-1].update(tmp_dict)
# use hashtag notation to indicate arg_name to be replaced
ret += '#'+arg + ','
ret = ret[:len(ret)-1] + ')'
ret += ','
# add final bracket to return array
ret = ret[:len(ret)-1] + ']\n'
# add return statement with the return value
ret += 'return __ret_val\n'
# Check if method has already been defined
if act_name in self.methods_dict:
args, conds, rets = self.methods_dict[act_name]
hasLiteral = False
for arg in intention_Args:
if isinstance(arg, (tuple, list)):
hasLiteral = True
origHasLiteral = False
for arg in args:
if isinstance(arg, (tuple, list)):
origHasLiteral = True
added_star = False
# Check if you have to change parameters (add final *)
if 'NONE' not in intention_Args and not hasLiteral:
if 'NONE' in args or origHasLiteral or len(intention_Args) > len(args):
added_star = True
prev_arg = ''
index = -1
# iterate through intention args
for a in range(0, len(intention_Args)):
arg = intention_Args[a]
# handle CONT keyword
if arg[:4] == 'CONT':
# Get argument referenced by CONT number
index = a - int(arg[4:])
prev_arg = args[index]
# iterate through intention args coming after the first item
# referenced by the CONT
tmp_dict = {}
for i in range(index, len(intention_Args)-1):
prev_arg_2 = intention_Args[i]
# check if there's an entry yet for this intention
new_index = i - index
# add in new mapping for each of the args in CONT list
tmp_dict[prev_arg_2] = prev_arg+'['+str(new_index)+']'
# Map first arg in CONT list
tmp_dict[prev_arg] = prev_arg +'[0]'
for i in range(0, length):
self.method_var_equivs[act_name][i].update(tmp_dict)
# Use star notation to indicate that the method arguments needs
# star at the end to indicate variable length tuple in Python
prev_arg = '*' + prev_arg
adjusted_args = args
if index > -1:
adjusted_args[index] = prev_arg
else:
adjusted_args = intention_Args
self.methods_dict[act_name] = (adjusted_args, conds, rets)
if not added_star:
prev_arg = ''
index = -1
# iterate through intention args
for a in range(0, len(args)):
arg = args[a]
# handle CONT keyword
if arg[:4] == 'CONT':
# Get argument referenced by CONT number
index = a - int(arg[4:])
prev_arg = args[index]
# iterate through intention args coming after the first item
# referenced by the CONT
tmp_dict = {}
for i in range(index, len(args)-1):
prev_arg_2 = args[i]
# check if there's an entry yet for this intention
new_index = i - index
# add in new mapping for each of the args in CONT list
tmp_dict[prev_arg_2] = prev_arg+'['+str(new_index)+']'
# Map first arg in CONT list
tmp_dict[prev_arg] = prev_arg +'[0]'
self.method_var_equivs[act_name][length-1].update(tmp_dict)
# Use star notation to indicate that the method arguments needs
# star at the end to indicate variable length tuple in Python
prev_arg = '*' + prev_arg
# Update Methods Dict
self.methods_dict[act_name][2].append(ret)
else:
self.methods_dict[act_name] = (intention_Args, [], [ret])
return act_name
## Visit No Conditional
#
# @brief Return None when there is no conditional
#
# @param node AST instance to be evaluated
#
# @retval none
def visit_NoCond(self,node):
self.methods_dict[self.intention][1].append(None)
return None
## Listify Boolean Expression
#
# @brief Converts a boolean expression in the tuplized form (see
# visit_BoolExpr return) into a list of the form [a,b,c,...]
# where a,b,c,... are conjunctions. The commas represent disjunctions.
# Parsing the boolean expressions in this matter allows us to
# properly evaluate 'or' expressions.
#
# @param cond post evaluated condition to be redistributed
#
# @retval (Tuple List) List list of boolean AND expressions, where the members of the list of assumed to be ORED together
def listify_BoolExpr(self, cond):
new_conds = []
if not cond:
return []
if cond[0] == 'UNIT':
# Return single statement as is, nested in two lists
new_conds.append([cond])
else:
# Check if the first value in the tuple is a boolean
# if so, remove the boolean and evaluate appropriately
if isinstance(cond[0], bool):
# If the boolean is surrounded by parentheses
# evaluate it as a whole
if (cond[0]):
return self.listify_BoolExpr(cond[1:])
else:
# otherwise just cut of the first tuple val
cond = cond[1:]
# left = evaluate the left-most value (this language is
# right associative by default)
left = self.listify_BoolExpr(cond[0])
# Evaluate the rest of the conditions if there are any
if len(cond) > 1:
op = cond[1]
right = self.listify_BoolExpr([cond[2:]])
if (op == 'and'):
# iterate through each list and append the concatenation
# of each sublist of left and right
# i.e. if left = [[a],[b] and right = [[c],[d]]
# output = [[a,c],[a,d],[b,c],[b,d]]
for a in left:
for b in right:
new_conds.append(a+b)
elif (op == 'or'):
# for or just concatenate the lists
new_conds = left+right
else:
new_conds = left
return new_conds
## Traverse Boolean Expression
#
# @brief Recursively descend Boolean Expression and appropriately print it out
#
# @param node post evaluated and listified conditions to be compiled
#
# @rtype: (String, String) return an if statment and body in Python representing a conditional
def traverse_BoolExpr(self, cond):
# if cond[1] is one of the comparative operators than we
# know there is only one boolean statement (no && or ||)
if not cond:
return '', ''
body = ''
if_stmt = ''
tab = ' '
# if cond[1] in comps:
if cond[0] == 'UNIT':
body, if_stmt = self.compile_bool(cond[1])
# Only add in tabs for body if there is a body
else:
# op = the previous operand (either && or ||). It
# starts as 'if' for convenience sake as you'll
# see in the code below
op = 'if'
if isinstance(cond[0], bool):
body, if_stmt = self.traverse_BoolExpr(cond[1:])
if_stmt = if_stmt.replace('if ', 'if (')
if_stmt = if_stmt.replace(':\n', '):\n')
else:
body, if_stmt = self.traverse_BoolExpr(cond[0])
body2 = if_stmt2 = ''
if len(cond) > 1:
op = cond[1]
body2, if_stmt2 = self.traverse_BoolExpr(cond[2:])
# Only add in tabs if the new addition to body
# is not empty
if body2 != '':
# body += 2*tab+body2
body += body2
# Replace the ending colon and newline character with a
# space for the previous if statement. Replace the 'if'
# from the new if statement with the appropriate operand
# (either 'and' or 'or'). Doing this allows us to have
# the whole conditional on one line, avoiding tabbing issues
if if_stmt2 != '':
if_stmt = if_stmt.replace(':\n',' ')+if_stmt2.replace('if', op)
return body, if_stmt
## Develop And Expression
#
# @brief Takes in a list of boolean expressions and returns the 'AND'
# tuple of each element. The input is the same form as the output
# of the listify_BoolExpr function.
#
# @param exprList list of python conditions to be anded
#
# @retval Tuple Tuple of the same form as visit_BoolExpr to be compiled by compile_boolean
def develop_and_expr(self, exprList):
if len(exprList) == 0:
return None
elif len(exprList) == 1:
return exprList[0]
else:
return False, exprList[0], 'and', self.develop_and_expr(exprList[1:])
## Visit Conditional
#
# @brief Return the result of evaluating the boolean expression
#
# @param node AST instance to be evaluated
#
# @retval String Python code that represents the conditionals
def visit_Cond(self, node):
result = ''
boolean = self.visit(node.boolean)
bools_listified = self.listify_BoolExpr(boolean)
bool_list = []
for a in range(0, len(bools_listified)):
and_expr = bools_listified[a]
bool_list.append(self.develop_and_expr(and_expr))
if not a == len(bools_listified) - 1:
self.method_var_equivs[self.intention].append({})
# Comparative Operators in Custom Language
comps = ['==', '<', '>', '<=', '>=']
# body = Additional things added to the body of if_stmt.
# This could include calls to lookup_type or
# defining local variables
# if_stmt = Handles conditional relationship rules. For
# example, this if statement would include
# checking the type of an object
if_stmt = ''
body = ''
paren = ''
copy_ret = ''
# Evaluate each bool from bool_list and add it to the methods_dict
# along with a copy of the appropriate ret_val
if len(bool_list) > 0:
if len(self.methods_dict[self.intention][2]) > 0:
copy_ret = self.methods_dict[self.intention][2][len(self.methods_dict[self.intention][2])-1]
self.methods_dict[self.intention][2].pop()
for bool2 in bool_list:
body, if_stmt = self.traverse_BoolExpr(bool2)
result = body + if_stmt
self.methods_dict[self.intention][1].append(result)
self.methods_dict[self.intention][2].append(copy_ret)
result += body + if_stmt
return result
## Handle Type Keyword
#
# @brief Returns a string representing the updated if statement for
# the type keyword
#
# @param expr that is the name of the argument to TYPE
# @param arg_num integer that tells if the TYPE keyword is left or right of equals comparator
# @param if_stmt previous code from if statement that must be added to and returned
# @param pos boolean representing whether the comparator was '=' or '!='
#
# @retval String if statement representing the TYPE conditional
def handle_Type(self, expr, arg_num, if_stmt, pos):
# Handles arg 1 and 2 slightly differently
if isinstance(expr, (list, tuple)):
return if_stmt
if arg_num == 1:
var_name = '#'+expr
if pos:
if_stmt += 'if state.objs['+var_name+'][0] == '
else:
if_stmt += 'if not state.objs['+var_name+'][0] == '
elif arg_num == 2:
var_name = '#'+expr
if_stmt += 'state.objs['+var_name+'][0]:\n'
else:
raise Exception('There can only be one expression on either side of an equality comparator!')
return if_stmt
## Compile Boolean Statement
#
# @brief Returns a tuple (body, if statement) that represents the
# required additions to the output to succesffuly match the
# conditional.
#
# @param cond post-evaluated conditional in Tuple form to be compiled
#
# @retval (String, String) if statement and body in Python that represent a conditional
def compile_bool(self, cond):
# body = Additional things added to the body of if_stmt.
# This could include calls to lookup_type or
# defining local variables
# if_stmt = Handles conditional relationship rules. For
# example, this if statement would include
# checking the type of an object
body = ''
if_stmt = ''
# expr1 = left side of comparison. Could be a variable,
# literal, or keyword phrase like TYPE(obj)
# comp = comparison operator (should be '==')
# expr2 = right side of comparison. Could be a variable,
# literal, or keyword phrase like ALL(type)
expr1 = cond[0]
comp = cond[1]
expr2 = cond[2]
# Retrieve the intention arguments from the dictionary
# NOTE: It is known at this point that there is an entry
# for self.intention in the dictionary
intention_Args = self.methods_dict[self.intention][0]
length = len(self.method_var_equivs[self.intention])
# Check comparator type
if comp == '==':
# Evaluate All Keyword
if expr1[0] == 'ALL':
# define body statement
obj_id = expr1[1]+'_id'
body += 'all_'+expr1[1]+' = ['+obj_id+' for '
body += obj_id+' in state.objs if state.objs['+obj_id
body += '][0]==\''+expr1[1]+'\']\n'
# add this if statement to preserve appropriate tabbing
if_stmt += 'if True:\n'
# items in second expression list
# NOTE: expr2 must be a list
for a in range(0, len(expr2)):
arg = expr2[a]
# Handle CONT keyword
if isinstance(arg, (list, tuple)):
pass
else:
if arg[:4] == 'CONT':
cont_offset = int(arg[4:])
prev_arg = expr2[a-cont_offset]
# alter arg name to avoid namespace collision
arg = arg + '-' + prev_arg
self.method_var_equivs[self.intention][length-1][arg] = ')+tuple(all_'+expr1[1]+'['+str(a-cont_offset)+':]'
else:
self.method_var_equivs[self.intention][length-1][arg] = 'all_'+expr1[1]+'['+str(a)+']'
# evaluate TYPE keyword
elif expr1[0] == 'TYPE':
if_stmt = self.handle_Type(expr1[1], 1, if_stmt, True)
# the second expression is known to be either a literal
# or another TYPE expression
if if_stmt == '':
return body, 'if True:\n'
if expr2[0] == 'TYPE':
if_stmt_hold = if_stmt;
if_stmt = self.handle_Type(expr2[1], 2, if_stmt, True)
if if_stmt == if_stmt_hold:
return body, 'if True:\n'
else:
if_stmt += '\''+expr2+'\':\n'
# Handle variable/literal comparison
else:
if_stmt += 'if True:\n'
# var1 and var2 could be either variables or literals
var1 = ''
var2 = ''
isVar1Lit = False
isVar2Lit = False
isVar1Flt = False
isVar2Flt = False
try:
float(expr1)
var1 = str(expr1)
isVar1Flt = True
except:
pass
try:
float(expr2)
var2 = str(expr2)
isVar2Flt = True
except:
pass
# Add quotes around literals and determine which vars
# are literals
if not isVar1Flt:
if expr1[0] == 'LITERAL':
var1 = '\''+str(expr1[1])+'\''
isVar1Lit = True
else:
var1 = expr1
if not isVar2Flt:
if expr2[0] == 'LITERAL':
var2 = '\''+str(expr2[1])+'\''
isVar2Lit = True
else:
var2 = expr2
if isVar1Lit and isVar2Lit:
raise Exception('Comparing '+var1+' and '+var2+' which are both String literals!')
# They are both variables
elif isVar1Flt and isVar2Flt:
raise Exception('Comparing '+var1+' and '+var2+' which are both Floats!')
elif not isVar1Lit and not isVar2Lit and not isVar1Flt and not isVar2Flt:
var1_star = '*'+var1
var2_star = '*'+var2
real_var = ''
temp_var = ''
# The 'real_var' is the one present in the intention, i.e.
# the method args. References to the 'temp_var' should
# be replaced with the 'real_var'. Make sure to also check
# for the starred variables and check for equivalents in the
# method_var_equivs dictionary.
if var1 in intention_Args:
real_var = var1
temp_var = var2
elif var1_star in intention_Args:
# The star always refers to the 0 index
real_var = var1 + '[0]'
temp_var = var2
elif var2 in intention_Args:
real_var = var2
temp_var = var1
elif var2_star in intention_Args:
# The star always refers to the 0 index
real_var = var2 + '[0]'
temp_var = var1
elif self.intention in self.method_var_equivs:
if var1 in self.method_var_equivs[self.intention][length-1]:
real_var = self.method_var_equivs[self.intention][length-1][var1]
temp_var = var2
elif var2 in self.method_var_equivs[self.intention][length-1]:
real_var = self.method_var_equivs[self.intention][length-1][var2]
temp_var = var1
else:
return body, if_stmt
else:
return body, if_stmt
# raise Exception('Variables '+var1+','+var2+' were not found!')
tmp_dict = {}
tmp_dict[temp_var] = real_var
self.method_var_equivs[self.intention][length-1].update(tmp_dict)
# one variable is literal, one isn't
else:
lit_var = ''
real_var = ''
# determine which is the literal and assign locals
# appropriately
if isVar1Lit or isVar1Flt:
lit_var = var1
real_var = var2
else:
lit_var = var2
real_var = var1
tmp_dict = {}
tmp_dict[real_var] = lit_var
self.method_var_equivs[self.intention][length-1].update(tmp_dict)
elif comp == '!=':
# Evaluate All Keyword
if expr1[0] == 'ALL':
if_stmt += 'if True:\n'
# evaluate TYPE keyword
elif expr1[0] == 'TYPE':
if_stmt = self.handle_Type(expr1[1], 1, if_stmt, False)
# the second expression is known to be either a literal
# or another TYPE expression
if if_stmt == '':
return body, 'if True:\n'
if expr2[0] == 'TYPE':
if_stmt_hold = if_stmt;
if_stmt = self.handle_Type(expr2[1], 2, if_stmt, False)
if if_stmt == if_stmt_hold:
return body, 'if True:\n'
else:
if_stmt += '\''+expr2+'\':\n'
# Handle variable/literal comparison
else:
if_stmt += 'if True:\n'
elif comp == 'PYTHON':
if_stmt += 'if True:\n'
else:
raise Exception('\''+str(comp)+'\' comparator currently not supported')
return body, if_stmt
## Visit Statement
#
# @brief Evalulates a causal relation and a conditional. Returns None.
#
# @param node AST instance to be evaluated
#
# @retval none
def visit_Stmt(self, node):
# if_stmt = initial if statement string that differentiates
# which rule is being applied
# gadd = g.add statement string that adds the result of the
# rule to the final set
# arg_indices = dictionary containing the i,j indices in the 2d
# arguments array for each of the arguments of the
# actions
intention = self.visit(node.caus)
self.intention = intention
# cond = tuple representing the conditions under which the rule
# holds. See visit_BoolExpr for more insight into the
# formatting here
cond = self.visit(node.cond)
# self.methods_dict[intention][1].append(cond)
return None
## Visit Statements
#
# @brief Compile all statements and concatenate results
#
# @param node AST instance to be evaluated
#
# @retval String valid Python code output fo the program
def visit_Stmts(self, node):
result = ''
for child in node.children:
self.visit(child)
# Define standard tab
tab = ' '
# iterate through intentions in the method_var_equivs
for intent in self.method_var_equivs:
intent_list = self.method_var_equivs[intent]
for i in range(0, len(intent_list)):
int_dict = intent_list[i]
# iterate through the variables in the method_var_equivs at a
# given intention
for var in int_dict:
# Only make changes if one of the vars is CONT
if 'CONT' in var:
# Check for/update value mapped to CONT and update it
if int_dict[var] in self.method_var_equivs[intent][i]:
old_val = self.method_var_equivs[intent][i][int_dict[var]]
old_index = old_val[len(old_val)-2]
new_index = int(old_index)+1
cont_offset = int(var[4:var.find('-')])
new_index = str(new_index - cont_offset)
# new_val = ')+tuple('+old_val.replace(old_index+']', new_index+':]')
new_val = ')+tuple('+old_val.replace(']', ':]')
self.method_var_equivs[intent][i][var] = new_val
# Iterate through each intention in the methods dictionary
for intention in self.methods_dict:
args = self.methods_dict[intention][0]
conds = self.methods_dict[intention][1]
rets = self.methods_dict[intention][2]
# Iterate through the conditions
for c in range(0, len(conds)):
cond = conds[c]
# Replace all variables with dictionary equivalent if it exists
if cond:
if intention in self.method_var_equivs:
for var in self.method_var_equivs[intention][c]:
cond = cond.replace('#'+var, self.method_var_equivs[intention][c][var])
# Remove remaining unnecessary hashtags
if cond:
cond = cond.replace('#', '')
conds[c] = cond
# Iterate through the return statements
for r in range(0, len(rets)):
ret = rets[r]
# Replace all variables with their dictionary equivalents
if intention in self.method_var_equivs:
int_dict = self.method_var_equivs[intention][r]
for var in int_dict:
# Handle CONT keyword
if 'CONT' in var and var in ret:
cont_offset = int(var[4:var.find('-')]) + 1
temp_ret = ret
temp_ret = temp_ret.split('#'+var)
temp_ret = temp_ret[0][::-1]
index = -1
# Find index of ',' which denotes the end
# of the argument in question
for i in range(0, len(temp_ret)):
c = temp_ret[i]
if c == ',':
index = i
cont_offset -= 1
if cont_offset == 0:
break
var_index = ret.find('#'+var)
ret = ret[0:var_index-index]+ret[var_index:]
ret = ret.replace('#'+var, self.method_var_equivs[intention][r][var])
# Remove unnecessary hashtags
if ret:
ret = ret.replace('#', '')
rets[r] = ret
# Iterate through the now updated methods dictionary
for intention in self.methods_dict:
args = self.methods_dict[intention][0]
conds = self.methods_dict[intention][1]
rets = self.methods_dict[intention][2]
# Build method declaration string
method_dec = 'def '
# python functions cannot have hyphens :(
intention_no_hyphen = intention.replace('-', '_')
method_dec += intention_no_hyphen
method_dec += '(state'
objs_conv = ''
# Iterate through method args and print
for arg in args:
if isinstance(arg, (list, tuple)):
if arg[0] == 'PYTHON':
arg = arg[1]
else:
raise Exception('Must define intention at least once without literal argument \''+str(arg[1])+'\'')
if arg == 'NONE':
raise Exception('No full argument list for intention '+str(intention) + ' defined')
method_dec += ', '+arg
# identify the presence of the * in the args
# if it's there define the flatten call which will
# convert the multiple layer tuple into a single
# layer array/tuple
if arg[0] == '*':
objs_conv = tab+arg[1:]+' = flatten('+arg[1:]+')\n'
method_dec += '):\n'
pyhop_stmt = 'pyhop.declare_methods(\''+intention+'\','
pyhop_stmt += intention_no_hyphen+')\n'
result += method_dec
result += objs_conv
# Reduction check, includes all of the conditional obligations
# required when there are multiple reductions
red_check = ''
# tabbing for the first return
ret1_tabs = tab
# tabbing for the second return
ret2_tabs = tab
# Check if there are multiple possible reductions
if len(rets) > 1:
# This adds in a check which reduction should be used by creating a
# a comparitive statement that checks if the arguments in the return
# contains all the arguments passed into the function
ret2_tabs += tab
red_check = 2*tab+'__all_args = []\n'
red_check += 2*tab+'for __action in __ret_val:\n'
red_check += 3*tab+'for __arg in __action:\n'
red_check += 4*tab+'__all_args.append(__arg)\n'
red_check += 2*tab+'__all_intention_args = ['
for arg in args:
if arg[0] == '*':
red_check += '[__obj for __obj in '+arg[1:]+']'
else:
red_check += '['+arg + '],'
red_check += ']\n'
red_check += 2*tab+'__all_intention_args = flatten(__all_intention_args)\n'
red_check += 2*tab+'__all_args = flatten(__all_args)\n'
red_check += 2*tab+'if set(__all_intention_args).issubset(set(__all_args)):\n'
# Iterate through return statements
for i in range(0, len(rets)):
ret = rets[i]
cond = conds[i]
ret1_temp = ret1_tabs
ret2_temp = ret2_tabs
# adjust tabbing for condition and add it in
if cond:
if 'if' in cond and ':\n' in cond:
ret1_temp += tab
ret2_temp += tab
num_newlines = cond.count('\n')
result += tab + cond.replace('\n', '\n'+tab, num_newlines-1)
ret_lines = ret.split('\n')
# add actual returns, split in case there are two possible returns
result += ret1_temp + ret_lines[0] + '\n'
result += red_check
result += ret2_temp + ret_lines[1] + '\n'
result += pyhop_stmt
return result
## Visit Variable
#
# @brief Return a string representing the variable value/name
#
# @param node AST instance to be evaluated
#
# @retval String string representation of the variable value
def visit_Var(self, node):
return str(node.value)
## ID Generator
#
# @brief Randomly generates a variable id. Developed from:
# https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
#
# @param node AST instance to be evaluated
#
# @retval String random string of length 'size' made of characters 'chars'
def id_generator(self, size=10, chars=string.ascii_uppercase):
return ''.join(random.choice(chars) for _ in range(size))
## Visit State
#
# @brief Return a string representing the variable corresponding
# to the State keyword
#
# @param node AST instance to be evaluated
#
# @retval (String, String) tuple of form 'STATE', state_arg
def visit_State(self, node):
return 'STATE', self.id_generator()
## Visit Python
#
# @brief Return a string representing the Python code to be inlined
#
# @param node AST instance to be evaluated
#
# @retval (String, String) tuple of the form 'PYTHON', python_arg
def visit_Python(self, node):
return 'PYTHON', node.code
## Visit Digit
#
# @brief Returns a string representing a digit
#
# @param node AST instance to be evaluated
#
# @retval String string representation of a digit
def visit_Digit(self, node):
return str(node.value)
## Visit Integer
#
# @brief Returns a string representing a full integer, which is a
# string of concatenated digits
#
# @param node AST instance to be evaluated
#
# @retval String string representation of an integer
def visit_Int(self, node):
result = ''
# is int negative
if not node.sign:
result += '-'
for digit in node.digits:
result += self.visit(digit)
return result
## Visit Float
#
# @brief Returns a float string which is two integers separated by
# a dot
#
# @param node AST instance to be evaluated
#
# @retval String string representation of a float
def visit_Flt(self, node):
result = ''
# is float negative
if not node.sign:
result += '-'
result += self.visit(node.left) + '.' + self.visit(node.right)
return result
## Visit ALL
#
# @brief Returns a tuple of the form ('All', argument)
#
# @param node AST instance to be evaluated
#
# @retval (String, String) tuple of the form 'ALL', all_args
def visit_All(self, node):
return ALL, self.visit(node.arg)
## Visit Type
#
# @brief Returns a tuple of the form ('TYPE', argument)
#
# @param node AST instance to be evaluated
#
# @retval (String, String) tuple of the form 'TYPE', type_arg
def visit_Type(self, node):
return TYPE, self.visit(node.arg)
## Visit NoOp
#
# @brief Returns the empty string
#
# @param node AST instance to be evaluated
#
# @retval String empty string
def visit_NoOp(self, node):
return ''
## Interpret
#
# @brief Actually compile the statement. Returns a string of the final
# program code to be written to file
#
# @retval String final python code to be added to template files
def interpret(self):
tree = self.parser.parse()
return self.visit(tree)
|
Develop a custom project management application to facilitate and track the implementation of The Circuit Regional Trail System.
EPD is developing a project management tool for the Pennsylvania Environmental Council to facilitate implementation of The Circuit Trails, an 800-mile trail network in and around Philadelphia. A major roadblock to realizing trail plans is the complexity inherent in coordinating efforts between a wide range of organizations across different jurisdictions. The Pipeline Manager will empower over 100 organizations across 10 counties by ensuring that their effort is focused on moving projects forward efficiently.
The Pipeline Manager will serve as a central hub for maintaining information on each project. The relational database structure generates a unique dashboard for each organization that displays action items and allows for streamlined coordination between multiple organizations involved in a single project. Regional stakeholders can view the status of the entire system and auto-generate summary analytics to track progress and support grant writing.
|
from flask import Flask
from flask import jsonify
from bs4 import BeautifulSoup
import urllib
import re
import json
import unicodedata
app = Flask(__name__)
def remove_brackets(text):
ret = re.sub('\[.+?\]', '', text)
ret = re.sub('\(.+?\)','', ret)
return ret
@app.route('/api/')
@app.route('/api/<cricketer>')
def api(cricketer=None):
if cricketer == None:
res = {}
res['error'] = True
res['message'] = 'Please provide a cricketer name as GET parameter'
return jsonify(res)
else:
res = {}
cricketer = cricketer.replace (' ', '_')
url = 'https://en.wikipedia.org/wiki/'+str(cricketer)
html = urllib.urlopen(url).read()
soup = BeautifulSoup(html)
current = None
for row in soup.find("table", {"class": "infobox vcard"}).findAll('tr'):
children = row.find_all(True, recursive=False)
if len(children) == 1:
if children[0].name == 'th':
current = unicodedata.normalize('NFKD',children[0].text).encode('ascii','ignore')
current = current.lower().replace(' ','_').strip()
res[current] = {}
elif children[0].name == 'td' and children[0].table:
first = True
list = []
for r in children[0].table.findAll('tr'):
if first:
f = True
ths = r.find_all(True, recursive=False)
for head in ths:
if not f:
key = unicodedata.normalize('NFKD', head.text).encode('ascii','ignore')
key = remove_brackets(key).lower().replace('.','').strip().replace(' ','_')
res[current][key] = {}
list.append(key)
else:
list.append(key)
f= False
first = False
else:
ths = r.find_all(True, recursive=False)
key = unicodedata.normalize('NFKD',ths[0].text).encode('ascii','ignore')
key = remove_brackets(key).lower().replace('.','').strip().replace(' ','_')
f = True
i = 1
for head in list:
if not f:
value = unicodedata.normalize('NFKD',ths[i].text).encode('ascii','ignore')
value = remove_brackets(value).replace('\n','').strip()
if value.endswith('/'):
value += "0"
i += 1
res[current][head][key] = value
else:
f= False
elif len(children) == 2:
if current is not None:
value = unicodedata.normalize('NFKD',children[1].text).encode('ascii','ignore')
key = unicodedata.normalize('NFKD',children[0].text).encode('ascii','ignore')
key = remove_brackets(key).lower().replace('.','').strip().replace(' ','_')
value = remove_brackets(value).replace('\n','').strip()
res[current][key] = value
return jsonify(res)
@app.route('/')
@app.route('/index')
def index():
return 'Welcome to Crick Info API, this is currently under development!'
if __name__ == '__main__':
app.run()
|
Most CIS require tubing because of the moving parts the cartridges sit into. However, for models that don’t require the cartridges to move back and forth from side to side, tubing is not necessary and can be eliminated from the CIS system by combining the tank and cartridge into one.
What is the concept for a “CIS” or Continuous Ink System and why do Brother CIS systems not require tubes and external ink chambers?
A CIS or Continuous Ink System is exactly what it sounds like – a system that is designed to feed continuous ink supply to your printer without the need to change cartridges. The “system” comes with a lot of ink to start (usually, 4-10x the amount of ink found in a regular ink cartridge), and it also comes with a mechanism to keep the ink flowing (through a refill port, so you can keep adding ink without needing to change cartridges).
Brother printers are made a little differently to other popular printer brands such as Epson or Canon, and the main difference is that the cartridges inside a Brother printer are STATIC – that means, they do not move like they do with Canon or Epson. In Canon and Epson printers, the cartridges move from left to right as they go over the paper. With Brother, the cartridges DO NOT MOVE. Therefore, there is a practical difference in the design of the CIS for Brother vs other printers. Since the cartridges move inside other printers, the continuous ink system also needs to “move”. Hence, CIS systems for Canon and Epson have tubes, which move with the cartridges as the cartridges move, and they feed off of external ink tanks. Since Brother cartridges do not move, tubes are not needed to be a part of the CIS, and so external ink tanks are not needed. Therefore the CIS for Brother is not external. The Brother CIS system is essentially composed of oversized cartridges that hold 4-10x as much ink as regular ink cartridges, and are designed with refill ports so you can add ink to the oversized cartridges. In many ways, this is a better system than having external ink tanks and tubes.
|
from __future__ import print_function
from __future__ import division
import numpy as np
from numpy import logical_not, ones
from numpy.random import seed, randint
from numpy import concatenate
from random import sample
from collections import Counter
from .unbalanced_dataset import UnbalancedDataset
class UnderSampler(UnbalancedDataset):
"""
Object to under sample the majority class(es) by randomly picking samples
with or without replacement.
"""
def __init__(self,
ratio=1.,
random_state=None,
replacement=True,
verbose=True):
"""
:param ratio:
The ratio of majority elements to sample with respect to the number
of minority cases.
:param random_state:
Seed.
:return:
underx, undery: The features and target values of the under-sampled
data set.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self,
ratio=ratio,
random_state=random_state,
verbose=verbose)
self.replacement = replacement
def resample(self):
"""
...
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it
if key == self.minc:
continue
# Set the ratio to be no more than the number of samples available
if self.ratio * self.ucd[self.minc] > self.ucd[key]:
num_samples = self.ucd[key]
else:
num_samples = int(self.ratio * self.ucd[self.minc])
# Pick some elements at random
seed(self.rs)
if self.replacement:
indx = randint(low=0, high=self.ucd[key], size=num_samples)
else:
indx = sample(range((self.y == key).sum()), num_samples)
# Concatenate to the minority class
underx = concatenate((underx, self.x[self.y == key][indx]), axis=0)
undery = concatenate((undery, self.y[self.y == key][indx]), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
class TomekLinks(UnbalancedDataset):
"""
Object to identify and remove majority samples that form a Tomek link with
minority samples.
"""
def __init__(self, verbose=True):
"""
No parameters.
:return:
Nothing.
"""
UnbalancedDataset.__init__(self, verbose=verbose)
def resample(self):
"""
:return:
Return the data with majority samples that form a Tomek link
removed.
"""
from sklearn.neighbors import NearestNeighbors
# Find the nearest neighbour of every point
nn = NearestNeighbors(n_neighbors=2)
nn.fit(self.x)
nns = nn.kneighbors(self.x, return_distance=False)[:, 1]
# Send the information to is_tomek function to get boolean vector back
if self.verbose:
print("Looking for majority Tomek links...")
links = self.is_tomek(self.y, nns, self.minc, self.verbose)
if self.verbose:
print("Under-sampling "
"performed: " + str(Counter(self.y[logical_not(links)])))
# Return data set without majority Tomek links.
return self.x[logical_not(links)], self.y[logical_not(links)]
class ClusterCentroids(UnbalancedDataset):
"""
Experimental method that under samples the majority class by replacing a
cluster of majority samples by the cluster centroid of a KMeans algorithm.
This algorithm keeps N majority samples by fitting the KMeans algorithm
with N cluster to the majority class and using the coordinates of the N
cluster centroids as the new majority samples.
"""
def __init__(self, ratio=1, random_state=None, verbose=True, **kwargs):
"""
:param kwargs:
Arguments the user might want to pass to the KMeans object from
scikit-learn.
:param ratio:
The number of cluster to fit with respect to the number of samples
in the minority class.
N_clusters = int(ratio * N_minority_samples) = N_maj_undersampled.
:param random_state:
Seed.
:return:
Under sampled data set.
"""
UnbalancedDataset.__init__(self, ratio=ratio,
random_state=random_state,
verbose=verbose)
self.kwargs = kwargs
def resample(self):
"""
???
:return:
"""
# Create the clustering object
from sklearn.cluster import KMeans
kmeans = KMeans(random_state=self.rs)
kmeans.set_params(**self.kwargs)
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it.
if key == self.minc:
continue
# Set the number of clusters to be no more than the number of
# samples
if self.ratio * self.ucd[self.minc] > self.ucd[key]:
n_clusters = self.ucd[key]
else:
n_clusters = int(self.ratio * self.ucd[self.minc])
# Set the number of clusters and find the centroids
kmeans.set_params(n_clusters=n_clusters)
kmeans.fit(self.x[self.y == key])
centroids = kmeans.cluster_centers_
# Concatenate to the minority class
underx = concatenate((underx, centroids), axis=0)
undery = concatenate((undery, ones(n_clusters) * key), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
class NearMiss(UnbalancedDataset):
"""
An implementation of NearMiss.
See the original paper: NearMiss - "kNN Approach to Unbalanced Data
Distributions: A Case Study involving Information Extraction" by Zhang
et al. for more details.
"""
def __init__(self, ratio=1., random_state=None,
version=1, size_ngh=3, ver3_samp_ngh=3,
verbose=True, **kwargs):
"""
:param version:
Version of the NearMiss to use. Possible values
are 1, 2 or 3. See the original paper for details
about these different versions.
:param size_ngh:
Size of the neighbourhood to consider to compute the
average distance to the minority point samples.
:param ver3_samp_ngh:
NearMiss-3 algorithm start by a phase of re-sampling. This
parameter correspond to the number of neighbours selected
create the sub_set in which the selection will be performed.
:param **kwargs:
Parameter to use for the Nearest Neighbours.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self, ratio=ratio,
random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
# Check that the version asked is implemented
if not (version == 1 or version == 2 or version == 3):
raise ValueError('UnbalancedData.NearMiss: there is only 3 '
'versions available with parameter version=1/2/3')
self.version = version
self.size_ngh = size_ngh
self.ver3_samp_ngh = ver3_samp_ngh
self.kwargs = kwargs
def resample(self):
"""
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# For each element of the current class, find the set of NN
# of the minority class
from sklearn.neighbors import NearestNeighbors
# Call the constructor of the NN
nn_obj = NearestNeighbors(n_neighbors=self.size_ngh, **self.kwargs)
# Fit the minority class since that we want to know the distance
# to these point
nn_obj.fit(self.x[self.y == self.minc])
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it
if key == self.minc:
continue
# Set the ratio to be no more than the number of samples available
if self.ratio * self.ucd[self.minc] > self.ucd[key]:
num_samples = self.ucd[key]
else:
num_samples = int(self.ratio * self.ucd[self.minc])
# Get the samples corresponding to the current class
sub_samples_x = self.x[self.y == key]
sub_samples_y = self.y[self.y == key]
if self.version == 1:
# Find the NN
dist_vec, idx_vec = nn_obj.kneighbors(sub_samples_x,
n_neighbors=self.size_ngh)
# Select the right samples
sel_x, sel_y = self.__SelectionDistBased__(dist_vec,
num_samples,
key,
sel_strategy='nearest')
elif self.version == 2:
# Find the NN
dist_vec, idx_vec = nn_obj.kneighbors(sub_samples_x,
n_neighbors=self.y[self.y == self.minc].size)
# Select the right samples
sel_x, sel_y = self.__SelectionDistBased__(dist_vec,
num_samples,
key,
sel_strategy='nearest')
elif self.version == 3:
# We need a new NN object to fit the current class
nn_obj_cc = NearestNeighbors(n_neighbors=self.ver3_samp_ngh,
**self.kwargs)
nn_obj_cc.fit(sub_samples_x)
# Find the set of NN to the minority class
dist_vec, idx_vec = nn_obj_cc.kneighbors(self.x[self.y == self.minc])
# Create the subset containing the samples found during the NN
# search. Linearize the indexes and remove the double values
idx_vec = np.unique(idx_vec.reshape(-1))
# Create the subset
sub_samples_x = sub_samples_x[idx_vec, :]
sub_samples_y = sub_samples_y[idx_vec]
# Compute the NN considering the current class
dist_vec, idx_vec = nn_obj.kneighbors(sub_samples_x,
n_neighbors=self.size_ngh)
sel_x, sel_y = self.__SelectionDistBased__(dist_vec,
num_samples,
key,
sel_strategy='farthest')
underx = concatenate((underx, sel_x), axis=0)
undery = concatenate((undery, sel_y), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
def __SelectionDistBased__(self,
dist_vec,
num_samples,
key,
sel_strategy='nearest'):
# Compute the distance considering the farthest neighbour
dist_avg_vec = np.sum(dist_vec[:, -self.size_ngh:], axis=1)
# Sort the list of distance and get the index
if sel_strategy == 'nearest':
sort_way = False
elif sel_strategy == 'farthest':
sort_way = True
else:
raise ValueError('Unbalanced.NearMiss: the sorting can be done '
'only with nearest or farthest data points.')
sorted_idx = sorted(range(len(dist_avg_vec)),
key=dist_avg_vec.__getitem__,
reverse=sort_way)
# Select the desired number of samples
sel_idx = sorted_idx[:num_samples]
return self.x[self.y == key][sel_idx], self.y[self.y == key][sel_idx]
class CondensedNearestNeighbour(UnbalancedDataset):
"""
An implementation of Condensend Neareat Neighbour.
See the original paper: CNN - "Addressing the Curse of Imbalanced Training
Set: One-Sided Selection" by Khubat et al. for more details.
"""
def __init__(self, random_state=None,
size_ngh=1, n_seeds_S=1, verbose=True,
**kwargs):
"""
:param size_ngh
Size of the neighbourhood to consider to compute the
average distance to the minority point samples.
:param n_seeds_S
Number of samples to extract in order to build the set S.
:param **kwargs
Parameter to use for the Neareast Neighbours.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self, random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
self.size_ngh = size_ngh
self.n_seeds_S = n_seeds_S
self.kwargs = kwargs
def resample(self):
"""
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Import the K-NN classifier
from sklearn.neighbors import KNeighborsClassifier
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it
if key == self.minc:
continue
# Randomly get one sample from the majority class
maj_sample = sample(self.x[self.y == key],
self.n_seeds_S)
# Create the set C
C_x = np.append(self.x[self.y == self.minc],
maj_sample,
axis=0)
C_y = np.append(self.y[self.y == self.minc],
[key] * self.n_seeds_S)
# Create the set S
S_x = self.x[self.y == key]
S_y = self.y[self.y == key]
# Create a k-NN classifier
knn = KNeighborsClassifier(n_neighbors=self.size_ngh,
**self.kwargs)
# Fit C into the knn
knn.fit(C_x, C_y)
# Classify on S
pred_S_y = knn.predict(S_x)
# Find the misclassified S_y
sel_x = np.squeeze(S_x[np.nonzero(pred_S_y != S_y), :])
sel_y = S_y[np.nonzero(pred_S_y != S_y)]
underx = concatenate((underx, sel_x), axis=0)
undery = concatenate((undery, sel_y), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
class OneSidedSelection(UnbalancedDataset):
"""
An implementation of One-Sided Selection.
See the original paper: OSS - "Addressing the Curse of Imbalanced Training
Set: One-Sided Selection" by Khubat et al. for more details.
"""
def __init__(self, random_state=None,
size_ngh=1, n_seeds_S=1, verbose=True,
**kwargs):
"""
:param size_ngh
Size of the neighbourhood to consider to compute the
average distance to the minority point samples.
:param n_seeds_S
Number of samples to extract in order to build the set S.
:param **kwargs
Parameter to use for the Neareast Neighbours.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self, random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
self.size_ngh = size_ngh
self.n_seeds_S = n_seeds_S
self.kwargs = kwargs
def resample(self):
"""
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Import the K-NN classifier
from sklearn.neighbors import KNeighborsClassifier
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it
if key == self.minc:
continue
# Randomly get one sample from the majority class
maj_sample = sample(self.x[self.y == key],
self.n_seeds_S)
# Create the set C
C_x = np.append(self.x[self.y == self.minc],
maj_sample,
axis=0)
C_y = np.append(self.y[self.y == self.minc],
[key] * self.n_seeds_S)
# Create the set S
S_x = self.x[self.y == key]
S_y = self.y[self.y == key]
# Create a k-NN classifier
knn = KNeighborsClassifier(n_neighbors=self.size_ngh,
**self.kwargs)
# Fit C into the knn
knn.fit(C_x, C_y)
# Classify on S
pred_S_y = knn.predict(S_x)
# Find the misclassified S_y
sel_x = np.squeeze(S_x[np.nonzero(pred_S_y != S_y), :])
sel_y = S_y[np.nonzero(pred_S_y != S_y)]
underx = concatenate((underx, sel_x), axis=0)
undery = concatenate((undery, sel_y), axis=0)
from sklearn.neighbors import NearestNeighbors
# Find the nearest neighbour of every point
nn = NearestNeighbors(n_neighbors=2)
nn.fit(underx)
nns = nn.kneighbors(underx, return_distance=False)[:, 1]
# Send the information to is_tomek function to get boolean vector back
if self.verbose:
print("Looking for majority Tomek links...")
links = self.is_tomek(undery, nns, self.minc, self.verbose)
if self.verbose:
print("Under-sampling "
"performed: " + str(Counter(undery[logical_not(links)])))
# Return data set without majority Tomek links.
return underx[logical_not(links)], undery[logical_not(links)]
class NeighbourhoodCleaningRule(UnbalancedDataset):
"""
An implementation of Neighboorhood Cleaning Rule.
See the original paper: NCL - "Improving identification of difficult small
classes by balancing class distribution" by Laurikkala et al. for more details.
"""
def __init__(self, random_state=None,
size_ngh=3, verbose=True, **kwargs):
"""
:param size_ngh
Size of the neighbourhood to consider in order to make
the comparison between each samples and their NN.
:param **kwargs
Parameter to use for the Neareast Neighbours.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self, random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
self.size_ngh = size_ngh
self.kwargs = kwargs
def resample(self):
"""
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Import the k-NN classifier
from sklearn.neighbors import NearestNeighbors
# Create a k-NN to fit the whole data
nn_obj = NearestNeighbors(n_neighbors=self.size_ngh)
# Fit the whole dataset
nn_obj.fit(self.x)
idx_to_exclude = []
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# Get the sample of the current class
sub_samples_x = self.x[self.y == key]
# Get the samples associated
idx_sub_sample = np.nonzero(self.y == key)[0]
# Find the NN for the current class
nnhood_idx = nn_obj.kneighbors(sub_samples_x, return_distance=False)
# Get the label of the corresponding to the index
nnhood_label = (self.y[nnhood_idx] == key)
# Check which one are the same label than the current class
# Make an AND operation through the three neighbours
nnhood_bool = np.logical_not(np.all(nnhood_label, axis=1))
# If the minority class remove the majority samples (as in politic!!!! ;))
if key == self.minc:
# Get the index to exclude
idx_to_exclude += nnhood_idx[np.nonzero(nnhood_label[np.nonzero(nnhood_bool)])].tolist()
else:
# Get the index to exclude
idx_to_exclude += idx_sub_sample[np.nonzero(nnhood_bool)].tolist()
# Create a vector with the sample to select
sel_idx = np.ones(self.y.shape)
sel_idx[idx_to_exclude] = 0
# Get the samples from the majority classes
sel_x = np.squeeze(self.x[np.nonzero(sel_idx), :])
sel_y = self.y[np.nonzero(sel_idx)]
underx = concatenate((underx, sel_x), axis=0)
undery = concatenate((undery, sel_y), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
|
I’ve been reading a book by Randall Collins. His book The Sociology of Philosophies (1998) made a major impact on me. In a direct way, and it was for this reason a dutch colleague referred me to the book, his demonstration of splits and concretizings of intellectual circles gave us an hypothesis to use in domain analytical research. It boiled down to the notion that no school of thought can tolerate more than six theoretical paradigms (or ideas if you prefer) at once without either splitting apart (schism, as it were) or ejecting competing points of view until a tolerable level is restored. I first went looking for this in the evolving domain of music information retrieval and sure enough, as a new research interest it opened its arms widely and expanded rapidly, but once query-by-humming had been achieved the society (it had been first a symposium, but at this point it became a society with bylaws, officers, and a scope statement) quickly reigned in acceptable research. Others have found similar evidence (Hoeffner in Social informatics, for example, LIU diss. 2012).
It’s a marvelous book all by itself of course, if very long. The critical realization I had while reading it was that I had spent almost two decades working with doctoral students who not only were not forming a school of thought around my work, but likely would never conduct any research after their dissertations, and certainly would never contribute to growth of my theory of instantiation. (Although, to be fair, near the end of my time at LIU several students did take up domain analysis and contribute to the cumulative effect of domain analytic research in knowledge organization, although to the best of my knowledge none of them has conducted or published any follow-up work.) And with that I determined to move elsewhere while I still had energy to take on new students.
Somewhere last fall I read about Collins’ much shorter 1979 work The Credential Society, and I’ve just finished reading it after proudly hauling it to Copenhagen, Heraklion, Amsterdam and back. It also is a remarkable work, hence this post, and I think it will have something to contribute to domain analysis, although at present I’m not quite sure how.
The book is about the myth of technocracy, that as society evolves and technology becomes more complex and we become ever more highly educated so as to deal with technology and (here’s the myth) therefore society gets better, people get richer, everything becomes more egalitarian, etc., etc. You’ll recognize the myth. In a short 220some pages Collins shatters this myth, demonstrating that no amount of education has made any difference and neither has technology. In fact, the only evidence about career and social status that makes sense is the age-old truth that (male) children follow in their father’s footsteps in both career and social status. I know, you want to protest, and so do I. My parents weren’t professors (but, my biological father was a musician and my biological maternal great-grandfather was an ordained pastor … hmmm). As usual, I’m not doing the book justice, you’ll have to read it.
Two things stood out for me. First the notion that we have accomplished a sinecure society. Sinecure, he writes, means literally “without care” and refers to a job in which there is actually little work. Collins points out that society has succeeded at installing a sinecure society by making most work into what once would have been leisure. Most of us read, write and think as work these days. Once upon a time that would have been a life reserved to only those who did not need to work.
The other idea, and here probably is the connection to domain analysis, is that professions secure their concretization and hence their survival with rather medieval approaches to credentialism. The easiest example from the book is the practice of medicine, which has the highest status and salary potential in our society, and for which the education (which is lengthy and expensive) has almost nothing to do with the practice except to confer the credential. As Collins shows, most medical practitioners learn on the job from other practitioners. But the mostly upperclass male medical doctors have succeeded with their credentialing in shutting out all other actors in healthcare from orderlies to nurses to social workers, most of whom labor for little in feminized professions. I know, this isn’t pretty. Collins takes on engineering and law too, but I won’t go into that here.
Along the way his narrative about the evolution of higher education in the US through the 18th, 19th and 20th centuries is about the clearest explanation I’ve seen, although it is consistent with the same trajectory painted by Louis Menand in The Metaphysical Club (erm, can you paint a trajectory? oh well, a topic for another time). All of us who make our livings and livelihoods as professors ought to have a better sense of how things got this way and I recommend both books for that reason.
I’ll keep pondering this of course. I think somehow we might be able to discover that a concretized domain is also somehow credentialed. Evidence of that might be useful for determining who the relevant actors are in the evolution of a domain ontology at any given moment. As it said, it needs some pondering.
I was recently one of a handful of keynote speakers at ISKO Brazil, meeting in Rio de Janeiro, May 27-29. It was my first trip to Brazil, and I was just a little shocked to find myself sitting at a bar at Ipanema listening to Bossa Nova. I texted my sister, because our mother (who passed away about a year ago) used to dream of such a thing. Well, be that as it might, I won’t write here about culture shock, I’ll come back to that.
I spent some time musing about dimensions and how epistemology could be a dimension of knowledge organization. In the end my presentation became rather pedantic, but that is because I think there is too much wiggle room in ISKO about just what knowledge organization is. And I think that is problematic for a domain that thinks of itself as a science.
I’ll try to write more about this soon.
My editorial based on the conference proceedings from Mysore was just published in Knowledge Organization, v. 40, no. 1 (2013): 1-10. I gave it the subtitle “evolving intension,” because from what I could see in the statistical evidence, the theoretical core of knowledge organization is stable and is represented in these papers, but there was less granualarity than in recent biennial ISKO conferences, suggesting differences peculiar to this specific mixture of scholars which appear to be sort of pushing and pulling the boundaries inside the domain, thus evolution is taking place in the intension. Of course, it is hard to take one moment in time represented by a single conference by itself; so it will be interesting to see how this dataset fits into the domain analysis of knowledge organization over time.
As I commented about earlier, there was quite a different mix of scholars at this conference, probably because of the exotic location. It did seem as though many of the usual suspects were not present, but the conference was well-attended anyway, by new people, which was good. The effect of this shows up in my analysis in the prevalence of papers from Brazil and India, which had the largest presence together with the US. I expect there is therefore some influence of the emerging economic powerhouses of Brazil and India on the thematic emphases of the conference, with digital solutions at the top of the list, relationships and domains rising up the thematic distribution, and categories and general classifications falling to the bottom. I was not able to demonstrate this statistically, however, as there were too few cases in the cells of a cross-tabulation.
The tug-and-pull between empirical scientific methods and humanistic methods, or epistemologies, was evident in the heavy reliance on monographic citations; only about half of the citations were to journal articles.
Of course, it was no surprise that S.R. Ranganathan had clear influence on the conference participants; but it also is true that facets are increasingly being found useful in knowledge organization systems.
In my experience of ISKO, which now is a bit more than a decade, it was the first time I had seen “official ISKO vehicles.” I thought that was delightful!
|
from warnings import warn
import pandas as pd
from ..utils import make_iterable, order_as_mapping_data
from ..exceptions import PlotnineWarning
from ..doctools import document
from ..mapping import aes
from .geom import geom
from .geom_segment import geom_segment
@document
class geom_hline(geom):
"""
Horizontal line
{usage}
Parameters
----------
{common_parameters}
"""
DEFAULT_AES = {'color': 'black', 'linetype': 'solid',
'size': 0.5, 'alpha': 1}
REQUIRED_AES = {'yintercept'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity',
'na_rm': False, 'inherit_aes': False}
legend_geom = 'path'
def __init__(self, mapping=None, data=None, **kwargs):
mapping, data = order_as_mapping_data(mapping, data)
yintercept = kwargs.pop('yintercept', None)
if yintercept is not None:
if mapping:
warn("The 'yintercept' parameter has overridden "
"the aes() mapping.", PlotnineWarning)
data = pd.DataFrame({'yintercept': make_iterable(yintercept)})
mapping = aes(yintercept='yintercept')
kwargs['show_legend'] = False
geom.__init__(self, mapping, data, **kwargs)
def draw_panel(self, data, panel_params, coord, ax, **params):
"""
Plot all groups
"""
ranges = coord.backtransform_range(panel_params)
data['y'] = data['yintercept']
data['yend'] = data['yintercept']
data['x'] = ranges.x[0]
data['xend'] = ranges.x[1]
data = data.drop_duplicates()
for _, gdata in data.groupby('group'):
gdata.reset_index(inplace=True)
geom_segment.draw_group(gdata, panel_params,
coord, ax, **params)
|
Have you ever watched any of our monthly training and really wished you could keep a copy to reference back to? Or has a busy life taken over and our monthly content has expired before you have chance to watch it?
The simplest way to ensure you have a full library of our content which doesn’t ever expire is to upgrade to our Premium membership level. This gives you access to all of our monthly training tutorials to watch for the lifetime of your membership. They don’t expire and you can watch them at any time. As soon as you take premium membership you get access to the entire library – including tutorials from before your membership even started.
The other option you have is to upgrade to Pro membership.
What Are The Benefits Of Being A Pro Level Member?
There is an admin fee of £25 to apply for Pro membership.
If you achieve pro Level, your subscription payment will increase to either £22 per month or £220 per year.
A gallery will be opened for you to upload your images to for scoring.
If you do not upload your images within 7 days your gallery will expire.
Members are welcome to apply for Pro level membership at any point of their membership, even if you have only just become a member.
How Will Upgrading Affect My Payments?
What Images Do I Need To Submit?
* 12 anonymous (un-watermarked) images for individual scoring by our panel of Ambassadors.
* Sessions can be newborn up to 12 months – no maternity.
How Are My Images Scored?
The ambassadors who score the panels that are submitted don’t know whose panel they are scoring and scoring is done independently – they are not permitted to discuss images or scoring.
This ensures that we get a fair score across the board.
We give you 7 days to upload your images to your gallery.
Once images have been uploaded, we then open the gallery to our Ambassadors for scoring and ask them to score within 14 days.
However, please be advised the scoring process can occasionally take longer during holiday periods.
We often get members asking if they can see actual panel submissions to give them an idea of the level required. The gallery below is the panel submission for Kirsten Reddington of Little Crumpets Photography who was recently awarded the Commended badge to go with her Pro status. It’s a stunning gallery and gives you a clear idea of the quality of work our Pro Commended members are producing.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Iotic Labs Ltd. All rights reserved.
from __future__ import unicode_literals, print_function
import logging
logging.basicConfig(format='%(asctime)s %(levelname)s [%(name)s] {%(threadName)s} %(message)s', level=logging.WARNING)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
from IoticAgent.Core.compat import monotonic
from IoticAgent.ThingRunner import RetryingThingRunner
from IoticAgent import Datatypes, Units
class FollowBasicCatchall(RetryingThingRunner):
LOOP_TIMER = 10 # minimum number of seconds duration of the main loop
def __init__(self, config=None):
"""Instantiation code in here, after the call to super().__init__()
"""
super(FollowBasicCatchall, self).__init__(config=config)
self.__thing = None
@staticmethod
def __catchall(args):
logger.debug("Catchall data received. Shared at %s", args['time'])
# Used for any catchall data that can't be parsed
try:
logger.debug('Found recent data for key %s: value: %s', 'count', args['data']['count'])
except KeyError as exc:
logger.warning('Failed to find key %s in recent data %s', exc, args)
raise
@staticmethod
def __catchall_parsed(args):
logger.debug("Feed data received. Shared at %s", args['time'])
values = args['parsed'].filter_by(types=(Datatypes.INTEGER,), units=(Units.COUNTS_PER_MIN,), text=("random",))
if values:
logger.debug('Found parsed data for key %s: value: %s', values[0].label, values[0].value)
else:
logger.debug('Parsed data not found')
def on_startup(self):
"""Called once at the beginning, before main().
Use this method to create your things, rebind connections, setup hardware, etc.
"""
print("Started. Press CTRL+C to end")
self.__thing = self.client.create_thing('follow_basic')
# register catchall for any data that's been queued for you before you start
# or any feed data that's not got a specific callback
self.client.register_catchall_feeddata(self.__catchall, callback_parsed=self.__catchall_parsed)
def main(self):
"""Called after on_startup.
Use this method for your main loop (we don't need one here).
Set self.LOOP_TIMER for your regular tick
"""
while True:
start = monotonic()
# loop code in here
stop = monotonic()
if self.wait_for_shutdown(max(0, self.LOOP_TIMER - (stop - start))):
break
def main():
FollowBasicCatchall(config="agent2.ini").run()
if __name__ == '__main__':
main()
|
At Spire Washington Hospital we provide support throughout your weight loss journey both before and after your surgery. You will meet with one of our consultant surgeons who will discuss the different weight loss procedures available. Together, you will decide on the procedure which is best suited to your needs. The cost of this initial consultation is not included in the price of the surgery.
Your first call or email regarding weight loss surgery will be handled by one of our friendly customer sales advisers, led by Sharon Day, who will answer any initial questions and provide estimate costs for surgery for you.
If you are interested in having a weight loss consultation, an appointment will be made with one of our consultant bariatric surgeons here at the hospital. Following this consultation, if you have decided to opt for surgery, you will have a meeting with one of our dietitians who will advise on the dietary implications of your surgery and you may also have an appointment with our psychologist.
A final consultation with your surgeon will be made to talk through any final questions you may have and to confirm a date for your surgery.
At Spire Washington Hospital we have a quarterly weight loss support group which meet at the hospital. These meetings are free to attend and are open to patients and relatives before and after surgery where you can share information and concerns and ask any questions you may have.
Your price quotation will be made clear to you before you have any consultations, tests or proceed with surgery. Our guide prices should give you a good idea of what you can expect to pay, but note that these are based on what you would be expected to pay without a medical history or previous conditions which would require additional tests or treatment. The price that you will actually pay will be confirmed to you by the hospital.
Your stay and care in hospital will be explained to you by our bariatric pre-assessment nurse including expected length of stay, immediate post surgery observations carried out, drug requirements, physiotherapy involvement and your provision of fluids while you are on our ward.
You will be given instructions before you go home regarding wound care, medicines, mobilisation and exercise, your dietary needs, vitamin supplementation and follow-up appointments. You will also be given contact details for our ward if you feel unwell or have any concerns when you return home.
This is an essential part of your weight loss surgery to help you adjust to the changes made to your digestive system. We provide long term follow-up to support you after your procedure to help with the necessary changes in your lifestyle including diet and exercise to enable you to achieve maximum benefit from the surgery.
Follow-up appointments are usually led by our dietitian with access to the rest of the team where necessary. Appointments are flexible and the frequency will depend on your needs and progress.
For the gastric band procedure some patients may need to see the consultant again to have adjustments made to the band. The number of adjustments varies from patient to patient and a minimum of three are included in the price but not all patients need this many. Please speak to your consultant about band adjustments when you have your initial consultation.
We also would be able to advise or re operate after investigations in post bariatric patients who have regained weight. The procedure may be endoscopic or surgical depending on reevaluation of primary surgery.
|
# Copyright: Ankitects Pty Ltd and contributors
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
from __future__ import annotations
import re
from re import Match
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import anki
# DBValue is actually Union[str, int, float, None], but if defined
# that way, every call site needs to do a type check prior to using
# the return values.
ValueFromDB = Any
Row = Sequence[ValueFromDB]
ValueForDB = Union[str, int, float, None]
class DBProxy:
# Lifecycle
###############
def __init__(self, backend: anki._backend.RustBackend) -> None:
self._backend = backend
self.mod = False
self.last_begin_at = 0
# Transactions
###############
def begin(self) -> None:
self.last_begin_at = self.scalar("select mod from col")
self._backend.db_begin()
def commit(self) -> None:
self._backend.db_commit()
def rollback(self) -> None:
self._backend.db_rollback()
# Querying
################
def _query(
self,
sql: str,
*args: ValueForDB,
first_row_only: bool = False,
**kwargs: ValueForDB,
) -> List[Row]:
# mark modified?
s = sql.strip().lower()
for stmt in "insert", "update", "delete":
if s.startswith(stmt):
self.mod = True
sql, args2 = emulate_named_args(sql, args, kwargs)
# fetch rows
return self._backend.db_query(sql, args2, first_row_only)
# Query shortcuts
###################
def all(self, sql: str, *args: ValueForDB, **kwargs: ValueForDB) -> List[Row]:
return self._query(sql, *args, first_row_only=False, **kwargs)
def list(
self, sql: str, *args: ValueForDB, **kwargs: ValueForDB
) -> List[ValueFromDB]:
return [x[0] for x in self._query(sql, *args, first_row_only=False, **kwargs)]
def first(self, sql: str, *args: ValueForDB, **kwargs: ValueForDB) -> Optional[Row]:
rows = self._query(sql, *args, first_row_only=True, **kwargs)
if rows:
return rows[0]
else:
return None
def scalar(self, sql: str, *args: ValueForDB, **kwargs: ValueForDB) -> ValueFromDB:
rows = self._query(sql, *args, first_row_only=True, **kwargs)
if rows:
return rows[0][0]
else:
return None
# execute used to return a pysqlite cursor, but now is synonymous
# with .all()
execute = all
# Updates
################
def executemany(self, sql: str, args: Iterable[Sequence[ValueForDB]]) -> None:
self.mod = True
if isinstance(args, list):
list_args = args
else:
list_args = list(args)
self._backend.db_execute_many(sql, list_args)
# convert kwargs to list format
def emulate_named_args(
sql: str, args: Tuple, kwargs: Dict[str, Any]
) -> Tuple[str, Sequence[ValueForDB]]:
# nothing to do?
if not kwargs:
return sql, args
print("named arguments in queries will go away in the future:", sql)
# map args to numbers
arg_num = {}
args2 = list(args)
for key, val in kwargs.items():
args2.append(val)
n = len(args2)
arg_num[key] = n
# update refs
def repl(m: Match) -> str:
arg = m.group(1)
return f"?{arg_num[arg]}"
sql = re.sub(":([a-zA-Z_0-9]+)", repl, sql)
return sql, args2
|
so i asked him what all he wanted at the party and the list was: a beer truck, crawfish, donuts, cake, a horse, a firetruck, cowboy boots, swimming, all with moon taxi playing the party. this kid thought he was having a mtv super sweet 16 party i guess! hahah! he facetimed "uncle" wes and begged for moon taxi to play the party. wes told him they couldn't because he would be in europe. when you ask simms what uncle wes said about playing the party he simply says "he said no" :| we ended up coming up with a happy medium for the party. a cowboy pool party at uncle scott and aunt hope's house, complete with a golf cart ride.
i started showing simms pictures of cowboy cakes online and he told me he would work really hard to make money to pay for it. if you thought i was not going to ride this out, you're crazy. he did chores every single day to help pay for the AMAZING cake by connie at sugar buzz bakers. she does the best job at making my visions of a cake come to life, while still tasting ridiculously good. if you are familiar with knoxville, she does them out of her home in sequoyah hills! i also wanted to do cookies because the twins aren't big cake eaters. we used kj cookies, another local baker (she actually also ships!) the cookies were divine y'all!!!! i might actually just use her and do cookies for the twins birthday next month. like connie, she also does an amazing job at making beautiful but great tasting cookies!
thanks so much to everyone that came to celebrate my big boy! he had a great time and loved seeing all his favorite people! it takes a village and y'all are one hell of one! and another big thanks to scott and hope for hosting it at their house! also thanks to amazon prime for helping me order 90% of the decor from my couch.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy.http import Request
from my_settings import name_file, keyword, test_mode, difference_days
from datetime import datetime, timedelta
import re
print "Run spider Mandy"
added_email = []
keyword = list(map(lambda x: re.sub(' ', '+', x), keyword))
if test_mode:
current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%d-%b-%Y')
else:
current_date = datetime.today().strftime('%d-%b-%Y')
file = open(name_file, 'a')
email_in_file = open(name_file, 'r').readlines()
class Mandy(Spider):
name = 'mandy'
allowed_domains = ["mandy.com"]
start_urls = ["http://mandy.com/1/search.cfm?fs=1&place=wld&city=&what={}&where=Worldwide".format(key)
for key in keyword]
def parse(self, response):
sel = Selector(response)
date = sel.xpath('//*[@id="resultswrapper"]/section/div/div/div/div/span/text()').extract()
link = sel.xpath('//*[@id="resultswrapper"]/section/div/div/div/div/a/@href').extract()
date = list(map(lambda x: re.findall('\w+:\D([A-Za-z0-9-]+)', x)[0], date))
dic = dict(zip(link, date))
for key in dic.keys():
if dic[key] == current_date:
yield Request(url='http://mandy.com'+key, callback=self.parse_page)
def parse_page(self, response):
sel = Selector(response)
email = sel.re('(\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,6})')
if bool(email):
email = email[0]
if email + "\n" not in email_in_file and email not in added_email:
file.write(email+'\n')
added_email.append(email)
print "Spider: Mandy. Email {0} added to file".format(email)
else:
print "Spider: Mandy. Email {0} already in the file".format(email)
|
NEW DELHI: Railways is mulling to run special trains with costlier tickets in a bid to tide over the losses in passenger business and also to cater to the growing demand for confirmed tickets.
Dubbed as "dynamic pricing", the proposal entails offering tickets at an increased rate for some special trains in AC-2 and AC-3 classes with catering facility on certain busy routes like Delhi-Mumbai and Delhi-Kolkata sectors.
The issue of dynamic fares prevalent in air fares was discussed by senior railway officials at the Railway Board and it was suggested to experiment it in the Delhi-Mumbai sector on a pilot basis, sources in Railways said.
According to the proposal, tickets at higher rates should be made available at a separate portal of IRCTC ticketing website and the special train with Rajdhani like facility should be launched before Christmas to encash the festive rush.
At present, there are two Rajdhani trains for Mumbai and if the dynamic fare proposal gets the nod then it will be the third Rajdhani for all practical purpose.
"It (dynamic pricing) is just a proposal as we are exploring various options available for increasing the earnings and also to ensure seats for growing number of travelling passengers in some high demand routes," Railway Board Member (Traffic) DP Pandey told PTI.
Pandey further said, "We just had first round of meeting discussing various options... the dynamic pricing of ticket is one option. But nothing has been finalised yet as it requires more discussion before shaping up the policy."
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from urllib import quote
from json import dumps
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885499.317004
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:39 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/ajax/event.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class event(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(event, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
etime = time.localtime(VFFSL(SL,"event",True)['begin'])
channel = VFN(VFFSL(SL,"event",True)['channel'],"replace",False)("'", r"\'")
write(u'''
<!-- Icons from: http://findicons.com/pack/1987/eico -->
<div id="leftmenu_main">
\t<div id="leftmenu_top" class="handle" style="cursor:move">''')
_v = VFFSL(SL,"event",True)['channel'] # u"$event['channel']" on line 9, col 60
if _v is not None: write(_filter(_v, rawExpr=u"$event['channel']")) # from line 9, col 60.
write(u'''
\t\t<div id="leftmenu_expander_main" class="leftmenu_icon leftmenu_icon_collapse" onclick="$(\'#eventdescription\').hide(200)"></div>
\t</div>
\t<div id="leftmenu_container_main" style="padding:6px">
\t\t<div style="float:left; width:80px;">
\t\t\t<div id="station" style="background-color: #1c478e; padding:2px; width:75px; text-align:center; overflow:hidden">''')
_v = VFFSL(SL,"event",True)['channel'] # u"$event['channel']" on line 14, col 117
if _v is not None: write(_filter(_v, rawExpr=u"$event['channel']")) # from line 14, col 117.
write(u'''</div>
\t\t\t<div style="background-color: #1c478e; color:#fff; width:79px; font-size:23px; margin-top: 5px; text-align:center">
\t\t\t\t''')
_v = VFN(VFFSL(SL,"time",True),"strftime",False)("%H:%M", VFFSL(SL,"etime",True)) # u'$time.strftime("%H:%M", $etime)' on line 16, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$time.strftime("%H:%M", $etime)')) # from line 16, col 5.
write(u'''<br/>
\t\t\t\t<span style="font-size:12px; color:#A9D1FA">''')
_v = VFFSL(SL,"int",False)(VFFSL(SL,"event",True)['duration']/60) # u"$int($event['duration']/60)" on line 17, col 49
if _v is not None: write(_filter(_v, rawExpr=u"$int($event['duration']/60)")) # from line 17, col 49.
write(u''' min</span>
\t\t\t</div>
\t\t\t<div style="background-color: #1c478e; color:#fff; width:79px;margin:5px 0">
\t\t\t\t<div style="font-size:23px; text-align:center">''')
_v = VFFSL(SL,"tstrings",True)[("day_" + (VFN(VFFSL(SL,"time",True),"strftime",False)("%w", VFFSL(SL,"etime",True))))] # u'$tstrings[("day_" + ($time.strftime("%w", $etime)))]' on line 21, col 52
if _v is not None: write(_filter(_v, rawExpr=u'$tstrings[("day_" + ($time.strftime("%w", $etime)))]')) # from line 21, col 52.
write(u'''</div>
\t\t\t\t<div style="color:#A9D1FA; text-align:center">''')
_v = VFN(VFFSL(SL,"time",True),"strftime",False)("%d", VFFSL(SL,"etime",True)) # u'$time.strftime("%d", $etime)' on line 22, col 51
if _v is not None: write(_filter(_v, rawExpr=u'$time.strftime("%d", $etime)')) # from line 22, col 51.
write(u''' ''')
_v = VFFSL(SL,"tstrings",True)[("month_" + (VFN(VFFSL(SL,"time",True),"strftime",False)("%m", VFFSL(SL,"etime",True))))] # u'$tstrings[("month_" + ($time.strftime("%m", $etime)))]' on line 22, col 80
if _v is not None: write(_filter(_v, rawExpr=u'$tstrings[("month_" + ($time.strftime("%m", $etime)))]')) # from line 22, col 80.
write(u'''</div>
\t\t\t</div>
\t\t\t<div>
\t\t\t <img src="/images/ico_timer.png" alt="''')
_v = VFFSL(SL,"tstrings",True)['add_timer'] # u"$tstrings['add_timer']" on line 26, col 46
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['add_timer']")) # from line 26, col 46.
write(u'''" title="''')
_v = VFFSL(SL,"tstrings",True)['add_timer'] # u"$tstrings['add_timer']" on line 26, col 77
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['add_timer']")) # from line 26, col 77.
write(u'''" style="cursor:pointer" onclick="addTimer(theevent)" />
\t\t\t <img src="/images/ico_zap.png" alt="Zap" title="''')
_v = VFFSL(SL,"tstrings",True)['zap'] # u"$tstrings['zap']" on line 27, col 56
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['zap']")) # from line 27, col 56.
write(u'''" style="cursor:pointer" onclick="zapChannel(\'''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"event",True)['sref']) # u"$str($event['sref'])" on line 27, col 118
if _v is not None: write(_filter(_v, rawExpr=u"$str($event['sref'])")) # from line 27, col 118.
write(u"""', '""")
_v = VFFSL(SL,"channel",True) # u'$channel' on line 27, col 142
if _v is not None: write(_filter(_v, rawExpr=u'$channel')) # from line 27, col 142.
write(u'''\')" />
\t\t\t\t<a href="/web/stream.m3u?ref=''')
_v = VFFSL(SL,"quote",False)(VFFSL(SL,"event",True)['sref']) # u"$quote($event['sref'])" on line 28, col 34
if _v is not None: write(_filter(_v, rawExpr=u"$quote($event['sref'])")) # from line 28, col 34.
write(u'''&name=''')
_v = VFFSL(SL,"quote",False)(VFFSL(SL,"event",True)['channel']) # u"$quote($event['channel'])" on line 28, col 62
if _v is not None: write(_filter(_v, rawExpr=u"$quote($event['channel'])")) # from line 28, col 62.
write(u'''" target="_blank"><img
\t\t\t\t\tsrc="/images/ico_stream2.png" alt="Stream ''')
_v = VFFSL(SL,"channel",True) # u'$channel' on line 29, col 48
if _v is not None: write(_filter(_v, rawExpr=u'$channel')) # from line 29, col 48.
write(u'''" title="''')
_v = VFFSL(SL,"tstrings",True)['stream'] # u"$tstrings['stream']" on line 29, col 65
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['stream']")) # from line 29, col 65.
write(u''' ''')
_v = VFFSL(SL,"channel",True) # u'$channel' on line 29, col 85
if _v is not None: write(_filter(_v, rawExpr=u'$channel')) # from line 29, col 85.
write(u'''" style="cursor:pointer" /></a>
\t\t\t</div>
\t\t</div>
\t\t<div style="float:left; width:250px; margin-left: 5px">
\t\t\t<div style="font-size: 13px; font-weight: bold">''')
_v = VFFSL(SL,"event",True)['title'] # u"$event['title']" on line 33, col 52
if _v is not None: write(_filter(_v, rawExpr=u"$event['title']")) # from line 33, col 52.
write(u'''</div>
''')
if VFFSL(SL,"event",True)['title'] != VFFSL(SL,"event",True)['shortdesc']: # generated from line 34, col 1
write(u'''\t\t\t<div style="font-size: 12px; font-weight: bold">''')
_v = VFFSL(SL,"event",True)['shortdesc'] # u"$event['shortdesc']" on line 35, col 52
if _v is not None: write(_filter(_v, rawExpr=u"$event['shortdesc']")) # from line 35, col 52.
write(u'''</div>
''')
write(u'''\t\t\t<div style="max-height:400px; overflow:auto"><p>''')
_v = VFN(VFFSL(SL,"event",True)['longdesc'],"replace",False)("\n","<br/>") # u'$(event[\'longdesc\'].replace("\\n","<br/>"))' on line 37, col 52
if _v is not None: write(_filter(_v, rawExpr=u'$(event[\'longdesc\'].replace("\\n","<br/>"))')) # from line 37, col 52.
write(u'''</p></div>
\t\t</div>
\t\t<div style="clear:left"></div>
\t</div>
</div>
<script>
var theevent = ''')
_v = VFFSL(SL,"dumps",False)(VFFSL(SL,"event",True)) # u'$dumps($event)' on line 43, col 16
if _v is not None: write(_filter(_v, rawExpr=u'$dumps($event)')) # from line 43, col 16.
write(u''';
if (picons[theevent[\'channel\']])
\t$(\'#station\').html(\'<img src="\'+picons[theevent[\'channel\']]+\'" width="75" />\');
</script>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_event= 'respond'
## END CLASS DEFINITION
if not hasattr(event, '_initCheetahAttributes'):
templateAPIClass = getattr(event, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(event)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=event()).run()
|
If you use Google services you should know by now that you are the product that is being sold to advertisers. Google gathers your data, compiles it, and sells it to advertising agencies and other companies that find this data useful. In return, users get some really great services. We are not total shills, but these services don't come without a cost. We are definitely not getting something for nothing. Google has historically been very good about protecting personal information and is pretty open about how they use our data. Not all companies are as open about this. Starting November 11, Google is changing their Terms of Service again, and you should know what those changes are all about.
This Google Terms of Service update is a pretty big deal. Your profile may start showing up in ads all over the web. Google is calling these ads "shared endorsements" and they will leverage any comments, reviews, follows, +1s or stars that you may have given to a brand or business. This info will be used to create more personal advertising by placing your name, photo and any relevant information in or alongside an ad.
As usual, Google is giving users time to make changes if they don't want to participate in these personalized ads. If you have already set restrictions on your shared comments, personalized ads will only show up for your friends that are in the Google+ circle where the comment was shared. If you want to opt-out, Google is making that easy as well. All you need to do it head over to the Shared Endorsements settings page and uncheck the box that says "Based upon my activity, Google may show my name and profile photo in shared endorsements that appear in ads." Then just click "Save" and you're good to go. I found that this box was already unchecked for me, based on the previous changes I had made to my Google privacy settings. If you previously opted out of +1s, you should find the box unchecked already, too. In addition, Google is automatically excluding all users who are under the age of 18. Because the new ads are not set to go live until November 11, users have plenty of time to opt-out of this if they want.
Google is always looking for new ways to use our info to make money. That's how they stay in business, after all. Thankfully, they are transparent with how our data is used and they give us an easy out if we don't want to participate. Unless it's the U.S. government asking for our data.
|
# This programs calculates the L2 error for a given velocity file
#
# Usage : python L2ErrorUCouette.py Velocity file
#
# Author : Bruno Blais
# Last modified : December 3rd
#Python imports
import os
import math
import numpy
import matplotlib.pyplot as plt
import sys
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
#***************************
# Case chooser
#***************************
case = "finalVANS"
# Possible cases, void1, swirlVans, expVans, nonFreeDiv, unsteadyNS, finalVANS
#Amplitude of velocity field
A =0.01
#A = 1
#===========================
# Main program
#===========================
fname = sys.argv[1]
pi = numpy.pi
# read the file
#print "R-> Velocity file has been read"
[x,y,z,u,v,w] = numpy.loadtxt(fname, unpack=True)
nt = len(x)
nx = int(numpy.sqrt(nt))
uth = numpy.zeros([nt])
vth = numpy.zeros([nt])
#Analytical solution for theta velocity in eulerian frame of reference
if case=="void1":
for i in range(0,nt):
uth[i] = -2 * numpy.sin(pi*x[i])**2 * numpy.sin(pi*y[i]) * numpy.cos(pi*y[i])
vth[i] = 2 * numpy.sin(pi*y[i])**2 * numpy.sin(pi*x[i]) * numpy.cos(pi*x[i])
if case=="swirlVANS":
for i in range(0,nt):
uth[i] = -2 *A* numpy.sin(pi*x[i]) * numpy.cos(pi*y[i])
vth[i] = 2 *A* numpy.sin(pi*y[i]) * numpy.cos(pi*x[i])
if case=="expVANS":
for i in range(0,nt):
uth[i] = A * numpy.cos(x[i]*y[i])
vth[i] = -A * numpy.sin(x[i]*y[i])
if case=="nonFreeDiv":
for i in range(0,nt):
uth[i] = A * numpy.exp(-x[i]**2) * numpy.sin(pi*y[i]) * numpy.cos(pi*y[i])
vth[i] = A * numpy.exp(-y[i]**2) * numpy.sin(pi*x[i]) * numpy.cos(pi*x[i])
if case=="unsteadyNS":
for i in range(0,nt):
uth[i] = -2 * numpy.sin(pi*x[i])**2 * numpy.sin(pi*y[i]) * numpy.cos(pi*y[i]) * numpy.cos(numpy.pi/4.)
vth[i] = 2 * numpy.sin(pi*y[i])**2 * numpy.sin(pi*x[i]) * numpy.cos(pi*x[i]) * numpy.cos(numpy.pi/4.)
if case=="finalVANS":
for i in range(0,nt):
uth[i] = A * numpy.exp(-x[i]**2) * numpy.sin(pi*y[i]) * numpy.cos(pi*y[i]) * numpy.cos(numpy.pi/4.)
vth[i] = A * numpy.exp(-y[i]**2) * numpy.sin(pi*x[i]) * numpy.cos(pi*x[i]) * numpy.cos(numpy.pi/4.)
err = ((u-uth)**2 + (v-vth)**2)/A**2
L2err = numpy.sqrt(numpy.sum(err)/nt)
print "L2 Error is : %5.5e" %(L2err)
Z = numpy.reshape(err,(-1,nx))
#cs = plt.contour(Z,levels=numpy.arange(numpy.min(Z),numpy.max(Z),numpy.max(Z)/10.))
#plt.clabel(cs,inline=1,fontsize=10)
#plt.show()
|
Thanks to a reader of my blog, who made me aware of another slow film, I had the chance to dive into a hugely photographic piece of cinema. A while ago, I wrote a short post on Bovines by Emmanuel Gras. It is a film without dialogue. Just pure beauty. And cows. It traces the often overlooked lives of cows throughout the four seasons. It still is the most peaceful slow film I know.
Bestiaire by Canadian filmmaker Denis Coté is not much different at first sight. It is about (wild) animals in Parc Safari in Quebec (“Africa in the Heart of Canada”). Again, the changing seasons play an interesting role. Bestiary, or The Book of Beasts, was a medieval collection of physical descriptions of animals, often written in such a way as to highlight an animal’s special meaning or position in the world.
This seemingly little detail is in fact very significant. The animals we see in the film – horses, giraffes, bears, zebras – have been deprived of their special meaning in the world. They are all the same. They are an object of attraction for both the employees, and the tourists, who flock to the park in spring and summer.
They have been deprived of their special meaning because they have been put into captivity, where they cannot be the animals they really are. They cannot be wild. In winter, especially, when the animals are put into indoor shelters, we see, for example, zebras wanting to break out of their cage. Thus, the first half of the film is a bit depressing if you have a heart for animals.
Aesthetically, Bestiaire is stunning, though. It feels like a photo album from time to time. Coté is certainly one of those filmmakers with an incredible eye for frame composition. The camera is always static, as is often the case in Slow Cinema. I suppose that many shots happened by pure chance because it looked as if Coté had put the camera somewhere and had hoped that an animal or two would cross the frame. So while Coté certainly tried to set up the camera in such a way that he could get interesting shots, it is not all due to his work as director / cinematographer. He was very much dependent on the movements of the animals. I therefore see Bestiaire is a collaboration of man and beast, rather than “a film by Denis Coté” alone.
Watching Bestiaire might make you think that Coté is a slow-film director. In fact, he is, but his films are less Slow Cinema. I watched his film Curling yesterday, and though it did start off like a Slow Cinema film with regards to its aesthetics (long-take, static camera, medium or long shots etc), Coté moved away from those aesthetics halfway through the film, which confused me a bit. I don’t think there was anything in the narrative that could have asked for it, but then, don’t question a director’s aesthetic choices. You’re wrong about it more often than not.
Interesting comment about the animals who lost their place in the world. He films them against non-natural walls, tin roof, barriers. I really liked it. It’s very aesthetic.
Côté is not contemplative, in an interview he said that he’s willing to try every styles and genre, depending on the story to tell.
Well, precisely what I say in the post. He’s not really a Slow Cinema director because he’s got other films that don’t fit. But this particular film would be part of the canon, to me personally anyway.
Yes I agree. I like how you say the animals are co-authors of this documentary.
Yes I agree. I like how you say the animals are co-authors of this documentary. This is probably true of all documentaries, the directors are dependent on what the subjects will do (when it is not staged).
|
from typing import Dict, Optional, Tuple, Callable, Any, Union
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import json
import requests
import dateparser
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
# key = field of a ticket , val = dict of (name,id) of options
TICKETS_OBJECTS = {
'impact': {
'1 person cannot work': 1,
'Many people cannot work': 2,
'1 person inconvenienced': 3,
'Many people inconvenienced': 4
},
'category': {
"Network": 1,
"Other": 2,
"Software": 4,
"Hardware": 3
},
'priority': {
"Medium": 1,
'High': 2,
'Low': 3
},
'status': {
'Opened': 1,
'Closed': 2,
'Need More Info': 3,
'New': 4,
'Reopened': 5,
'Waiting Overdue': 6,
'Waiting on Customer': 7,
'Waiting on Third Party': 8
}
}
def convert_snake_to_camel(snake_str: str) -> str:
"""Convert a specific string of snake case to camel case.
Args:
snake_str: The string that we would like to convert.
Returns:
converted string.
"""
snake_split = snake_str.split("_")
camel_string = "".join(map(str.capitalize, snake_split))
camel_string = convert_specific_keys(camel_string)
return camel_string
def convert_specific_keys(string: str):
"""
Convert specific keys to demisto standard
Args:
string: the text to transform
Returns:
A Demisto output standard string
"""
if string == 'OsName':
return 'OSName'
if string == 'OsNumber':
return 'OSNumber'
if string == 'Ram total':
return 'RamTotal'
if string == 'AssetDataId':
return 'AssetDataID'
if string == 'AssetClassId':
return 'AssetClassID'
if string == 'AssetStatusId':
return 'AssetStatusID'
if string == 'AssetTypeId':
return 'AssetTypeID'
if string == 'MappedId':
return 'MappedID'
if string == 'OwnerId':
return 'OwnerID'
if string == 'HdQueueId':
return 'HdQueueID'
if string == 'Ip':
return 'IP'
return string
def convert_dict_snake_to_camel(dic: dict) -> dict:
"""Convert a dictionary of snake case to camel case.
Args:
dic: The dictionary that we would like to convert.
Returns:
converted dictionary.
"""
context_dict = {}
for snake_str in dic:
if type(dic[snake_str]) is dict:
inner_dict = convert_dict_snake_to_camel(dic[snake_str])
camel = convert_snake_to_camel(snake_str)
context_dict[camel] = inner_dict
elif type(dic[snake_str]) is list:
inner_dict = parse_response(dic[snake_str])
camel = convert_snake_to_camel(snake_str)
context_dict[camel] = inner_dict
elif snake_str in ['id', 'Id']:
context_dict['ID'] = dic.get(snake_str, '')
else:
camel = convert_snake_to_camel(snake_str)
context_dict[camel] = dic.get(snake_str, '')
return context_dict
def parse_response(lst: list):
"""Convert a Api response to wanted format.
Args:
lst: A list of dictionaries that return from api call.
Returns:
converted list of dictionaries from snake case to camel case.
"""
list_res = []
for dic in lst:
context_dict = convert_dict_snake_to_camel(dic)
list_res.append(context_dict)
return list_res
class Client(BaseClient):
"""
Client to use in the integration, overrides BaseClient.
Used for communication with the api.
"""
def __init__(self, url: str, username: str, password: str, verify: bool, proxy: bool):
super().__init__(base_url=f"{url}/api", verify=verify, proxy=proxy)
self._url = url
self._username = username
self._password = password
self._token, self._cookie = self.get_token()
def get_token(self) -> Tuple[str, str]:
"""Get a token for the connection.
Returns:
token , cookie for the connection.
"""
token = ''
cookie = ''
data = {
"userName": self._username,
"password": self._password
}
login_url = f"{self._url}/ams/shared/api/security/login"
body = json.dumps(data)
headers = {'Content-Type': 'application/json'}
response = self.token_request(login_url, headers=headers, data=body)
# Extracting Token
response_cookies = response.get('cookies').__dict__.get('_cookies')
if response_cookies:
cookie_key = list(response_cookies.keys())[0]
if cookie_key:
ret_cookie = response_cookies.get(cookie_key).get("/")
cookie = self.get_cookie(ret_cookie)
token = ret_cookie.get("KACE_CSRF_TOKEN").__dict__.get('value')
if not token:
raise DemistoException("Could not get token")
if not cookie:
raise DemistoException("Could not get cookie")
return token, cookie
def update_token(self):
"""Update cookie and token.
Returns:
Tuple of token and cookie.
"""
self._token, self._cookie = self.get_token()
def get_cookie(self, res_cookie: dict) -> str:
"""Get a cookie from an cookie object in the needed format for the requests.
Args:
res_cookie: part of the response that the cookie is inside it.
Returns:
string that will be sent in the requests which represents the cookie in the header.
"""
KACE_CSRF_TOKEN = res_cookie.get("KACE_CSRF_TOKEN").__dict__.get('value')
x_dell_auth_jwt = res_cookie.get("x-dell-auth-jwt").__dict__.get('value')
kboxid = res_cookie.get("kboxid").__dict__.get('value')
KACE_LAST_USER_SECURE = res_cookie.get("KACE_LAST_USER_SECURE").__dict__.get('value')
KACE_LAST_ORG_SECURE = res_cookie.get("KACE_LAST_ORG_SECURE").__dict__.get('value')
cookie = f'KACE_LAST_USER_SECURE={KACE_LAST_USER_SECURE}; KACE_LAST_ORG_SECURE={KACE_LAST_ORG_SECURE};' \
f' kboxid={kboxid}; x-dell-auth-jwt={x_dell_auth_jwt}; KACE_CSRF_TOKEN={KACE_CSRF_TOKEN}'
return cookie
def token_request(self, url: str, headers: Optional[dict] = None, data: Optional[str] = None) -> dict:
"""login request for initiating a connection with the product.
Args:
url: full url that the request will be sent to.
headers: headers of the request.
data: data of the request which includes username and password.
Returns:
Dictionary of the response from the product.
"""
try:
response = requests.request("POST", url, headers=headers, data=data, verify=self._verify)
except requests.exceptions.SSLError:
err_msg = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' checkbox in' \
' the integration configuration.'
raise DemistoException(err_msg)
except requests.exceptions.ConnectionError:
raise DemistoException("Invalid url , Failed to establish a connection")
if response.status_code == 401:
raise DemistoException("Error Code 401 - Invalid user or password")
return response.__dict__
def machines_list_request(self, filter_fields: Optional[str] = None):
"""List of machines.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
url_suffix = '/inventory/machines'
if filter_fields:
url_suffix += f'?filtering={filter_fields}'
return self._http_request("GET", url_suffix=url_suffix, headers=headers)
def assets_list_request(self, filter_fields: Optional[str] = None) -> dict:
"""List of assets.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
url_suffix = '/asset/assets'
if filter_fields:
url_suffix += f'?filtering={filter_fields}'
return self._http_request("GET", url_suffix=url_suffix, headers=headers)
def queues_list_request(self, filter_fields: Optional[str] = None) -> dict:
"""List of queues.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
url_suffix = '/service_desk/queues?shaping=fields all'
if filter_fields:
url_suffix += f'&filtering={filter_fields}'
return self._http_request("GET", url_suffix=url_suffix, headers=headers)
def queues_list_fields_request(self, queue_number: str) -> dict:
"""List of fields in specific queue.
Args:
queue_number: queue nubmer for the request.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
return self._http_request("GET", url_suffix=f"/service_desk/queues/{queue_number}/fields", headers=headers)
def tickets_list_request(self, shaping_fields: str = None, filter_fields: str = None) -> dict:
"""List of Tickets.
Args:
shaping_fields: str of the shaping that will be sent in the request.
filter_fields: str of filter that will be sent in the request.
Returns:
Response from API.
"""
if not shaping_fields:
shaping_fields = set_shaping(self)
self.update_token()
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
url_suffix = f"/service_desk/tickets?shaping={shaping_fields}"
if filter_fields:
url_suffix += f'&filtering={filter_fields}'
return self._http_request("GET", url_suffix=url_suffix, headers=headers)
def create_ticket_request(self, data: str) -> dict:
"""Create Ticket
Args:
data (str): the body of the request.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie,
'Content-Type': 'application/json'
}
return self._http_request("POST", url_suffix="/service_desk/tickets", headers=headers, data=data)
def update_ticket_request(self, ticket_id: str, data: str) -> dict:
"""Update Ticket.
Args:
ticket_id (str): ticket id that will be updated.
data (str): the body of the request.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie,
'Content-Type': 'application/json'
}
return self._http_request("POST", url_suffix=f"/service_desk/tickets/{ticket_id}", headers=headers, data=data)
def delete_ticket_request(self, ticket_id: str) -> dict:
"""Delete Ticket.
Args:
ticket_id (str): ticket id that will be deleted.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie,
'Content-Type': 'application/json'
}
return self._http_request("DELETE", url_suffix=f"/service_desk/tickets/{ticket_id}", headers=headers)
def ticket_by_id_request(self, filtering_id: int) -> dict:
"""Specific ticket details by ID.
Args:
filtering_id: id for filtering by it.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
filter_fields = f"id eq {filtering_id}"
return self._http_request("GET", url_suffix=f"/service_desk/tickets?filtering={filter_fields}", headers=headers)
def test_module(client: Client, *_) -> Tuple[str, dict, dict]:
"""Function which checks if there is a connection with the api.
Args:
client : Integration client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
_ = client.machines_list_request()
client.update_token()
response = client.tickets_list_request()
list_tickets_res = response.get('Tickets')
if list_tickets_res and demisto.params().get('isFetch'):
parse_date_range(demisto.params().get('fetch_time'), date_format='%Y-%m-%dT%H:%M:%SZ')
parsed_time = (datetime.utcnow() - timedelta(days=20))
incidents, _ = parse_incidents(list_tickets_res, "1", '%Y-%m-%dT%H:%M:%SZ', parsed_time)
return 'ok', {}, {}
def get_machines_list_command(client, args) -> Tuple[str, dict, dict]:
"""Function which returns all machines in the system.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
limit = int(args.get('limit', 50))
filter_fields = args.get('custom_filter')
response = client.machines_list_request(filter_fields)
raw_response = response.get('Machines')[:limit]
context = parse_response(raw_response)
human_readable_markdown = tableToMarkdown('Quest Kace Machines', context, removeNull=True, headers=['ID', 'Name',
'IP', 'Created',
'Modified',
'LastInventory',
'LastSync',
'ManualEntry',
'PagefileMaxSize',
'PagefileSize',
'RamTotal',
'RamUsed'])
context = {
'QuestKace.Machine(val.ID === obj.ID)': context
}
return human_readable_markdown, context, raw_response
def get_assets_list_command(client, args) -> Tuple[str, dict, dict]:
"""Function which returns all assets in the system.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
limit = int(args.get('limit', 50))
filter_fields = args.get('custom_filter')
response = client.assets_list_request(filter_fields)
raw_response = response.get('Assets')[:limit]
context = parse_response(raw_response)
human_readable_markdown = tableToMarkdown('Quest Kace Assets', context, removeNull=True,
headers=['ID', 'Name', 'Created', 'Modified', 'OwnerID', 'MappedID',
'AssetClassID', 'AssetDataID', 'AssetStatusID', 'AssetTypeID',
'AssetTypeName'])
context = {
'QuestKace.Asset(val.ID === obj.ID)': context
}
return human_readable_markdown, context, raw_response
def get_queues_list_command(client, args) -> Tuple[str, dict, dict]:
"""Function which returns all queues in the system.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
filter_fields = args.get('custom_filter')
limit = int(args.get('limit', 50))
response = client.queues_list_request(filter_fields)
raw_response = response.get('Queues')[:limit]
context = parse_response(raw_response)
human_readable_markdown = tableToMarkdown('Quest Kace Queues', context, removeNull=True,
headers=['ID', 'Name', 'Fields'])
context = {
'QuestKace.Queue(val.ID === obj.ID)': context
}
return human_readable_markdown, context, raw_response
def get_tickets_list_command(client, args) -> Tuple[str, dict, dict]:
"""Function which returns all tickets in the system.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
limit = int(args.get('limit', 50))
custom_shaping = args.get("custom_shaping")
custom_filter = args.get("custom_filter")
response = client.tickets_list_request(custom_shaping, custom_filter)
raw_response = response.get('Tickets')[:limit]
context = parse_response(raw_response)
for response in context:
response['IsDeleted'] = False
human_readable_markdown = tableToMarkdown('Quest Kace Tickets', context, removeNull=True,
headers=['ID', 'Title', 'Created', 'Modified', 'HdQueueID', 'DueDate'])
context = {
'QuestKace.Ticket(val.ID === obj.ID)': context
}
return human_readable_markdown, context, raw_response
def create_ticket_command(client, args) -> Tuple[str, dict, dict]:
"""Function which creates a new ticket to the system according to users arguments.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
impact = None
category = None
status = None
priority = None
hd_queue_id = args.get('queue_id')
custom_fields = args.get('custom_fields')
if (custom_fields and "hd_queue_id" not in custom_fields) and (not hd_queue_id):
raise DemistoException("hd_queue_id is a mandatory value, please add it.")
title = args.get("title")
summary = args.get('summary')
if args.get('impact'):
dict_of_obj = TICKETS_OBJECTS.get('impact')
impact = args.get('impact')
if dict_of_obj:
impact = dict_of_obj.get(args.get('impact'), args.get('impact'))
if args.get('category'):
dict_of_obj = TICKETS_OBJECTS.get('category')
impact = args.get('category')
if dict_of_obj:
impact = dict_of_obj.get(args.get('category'), args.get('category'))
if args.get('status'):
dict_of_obj = TICKETS_OBJECTS.get('status')
impact = args.get('status')
if dict_of_obj:
impact = dict_of_obj.get(args.get('status'), args.get('status'))
if args.get('priority'):
dict_of_obj = TICKETS_OBJECTS.get('priority')
impact = args.get('priority')
if dict_of_obj:
impact = dict_of_obj.get(args.get('priority'), args.get('priority'))
machine = args.get('machine')
asset = args.get('asset')
body_from_args = create_body_from_args(hd_queue_id, title, summary, impact, category, status, priority, machine,
asset)
if custom_fields:
splited = split_fields(custom_fields)
body_from_args.update(splited)
temp_data = {'Tickets': [body_from_args]}
data = json.dumps(temp_data)
response = client.create_ticket_request(data)
if response.get('Result') != 'Success':
raise DemistoException('Error while adding a new ticket.')
try:
id = response.get('IDs')[0]
except Exception as e:
raise DemistoException(e)
client.update_token()
res = client.ticket_by_id_request(id)
ticket = res.get('Tickets')
ticket_view = tableToMarkdown(f'New ticket was added successfully, ticket number {id}.\n', ticket)
return ticket_view, {}, {}
def create_body_from_args(hd_queue_id: Union[str, int] = None, title: Union[str, int] = None,
summary: Union[str, int] = None, impact: Union[str, int] = None,
category: Union[str, int] = None, status: Union[str, int] = None,
priority: Union[str, int] = None, machine: Union[str, int] = None,
asset: Union[str, int] = None) -> dict:
"""Function which creates the body of the request from user arguments.
Args:
hd_queue_id: the queue number to insert the ticket to.
title: title of the ticket.
summary: summary of the ticket.
impact: impact of the ticket.
category: category of the ticket.
status: status of the ticket.
priority: priority of the ticket.
machine: machine of the ticket.
asset: asset of the ticket.
Returns:
body of the request as a dict.
"""
body = {}
if hd_queue_id:
body.update({'hd_queue_id': hd_queue_id})
if title:
body.update({'title': title})
if summary:
body.update({'summary': summary})
if impact:
body.update({'impact': impact})
if category:
body.update({'category': category})
if status:
body.update({'status': status})
if priority:
body.update({'priority': priority})
if machine:
body.update({'machine': machine})
if asset:
body.update({'asset': asset})
return body
def update_ticket_command(client, args) -> Tuple[str, dict, dict]:
"""Function which updates the body of the request from user arguments.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
impact = None
category = None
status = None
priority = None
ticket_id = args.get('ticket_id')
title = args.get("title")
summary = args.get('summary')
if args.get('impact'):
impact = TICKETS_OBJECTS['impact'][args.get('impact')]
if args.get('category'):
category = TICKETS_OBJECTS['category'][args.get('category')]
if args.get('status'):
status = TICKETS_OBJECTS['status'][args.get('status')]
if args.get('priority'):
priority = TICKETS_OBJECTS['priority'][args.get('priority')]
machine = args.get('machine')
asset = args.get('asset')
custom_fields = args.get('custom_fields')
body_from_args = create_body_from_args(title=title, summary=summary, impact=impact, category=category,
status=status,
priority=priority, machine=machine, asset=asset)
if custom_fields:
splited = split_fields(custom_fields)
body_from_args.update(splited)
temp_data = {'Tickets': [body_from_args]}
data = json.dumps(temp_data)
response = client.update_ticket_request(ticket_id, data)
if response.get('Result') != 'Success':
raise DemistoException('Error while updating the ticket.')
client.update_token()
res = client.ticket_by_id_request(ticket_id)
ticket = res.get('Tickets')
ticket_view = tableToMarkdown(f'Ticket number {ticket_id} was updated successfully.\n', ticket)
return ticket_view, {}, {}
def delete_ticket_command(client, args) -> Tuple[str, dict, dict]:
"""Function which deleted a specific ticket by ticket id.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
ticket_id = args.get('ticket_id')
try:
response = client.delete_ticket_request(ticket_id)
except Exception as e:
raise DemistoException(e)
if response.get('Result') == 'Success':
context = {}
old_context = demisto.dt(demisto.context(), f'QuestKace.Ticket(val.ID === {ticket_id})')
if old_context:
if isinstance(old_context, list):
old_context = old_context[0]
old_context['IsDeleted'] = True
context = {
'QuestKace.Ticket(val.ID === obj.ID)': old_context
}
return f'Ticket was deleted successfully. Ticket number {ticket_id}', context, {}
else:
raise DemistoException('Error while deleting the ticket.')
def fetch_incidents(client: Client, fetch_time: str, fetch_shaping: str, last_run: Dict, fetch_limit: str,
fetch_queue_id: Optional[list] = None, fetch_filter: Optional[str] = None) -> list:
"""
This function will execute each interval (default is 1 minute).
Args:
client (Client): Quest Kace Client
fetch_time: time interval for fetch incidents.
fetch_shaping: shaping for the request.
fetch_filter: custom filters for the request.
fetch_limit: limit for number of fetch incidents per fetch.
fetch_queue_id: queue id for fetch, if not given then fetch runs on all tickets in the system
last_run (dateparser.time): The greatest incident created_time we fetched from last fetch
Returns:
incidents: Incidents that will be created in Demisto
"""
if not fetch_queue_id or fetch_queue_id[0] == 'All':
fetch_queue_id = get_queue_ids(client)
time_format = '%Y-%m-%dT%H:%M:%SZ'
if not last_run: # if first time running
new_last_run = {'last_fetch': parse_date_range(fetch_time, date_format=time_format)[0]}
else:
new_last_run = last_run
if not fetch_shaping:
fetch_shaping = shaping_fetch(client, fetch_queue_id)
parsed_last_time = datetime.strptime(new_last_run.get('last_fetch', ''), time_format)
fetch_filter_for_query = f'created gt {parsed_last_time}'
if fetch_queue_id:
queue_id_str = ';'.join(fetch_queue_id)
filter_by_queue_id = f'hd_queue_id in {queue_id_str}'
fetch_filter_for_query = f'{fetch_filter_for_query},{filter_by_queue_id}'
if fetch_filter:
fetch_filter_for_query = f'{fetch_filter_for_query},{fetch_filter}'
demisto.info(f"Fetching Incident has Started,\n"
f"Fetch filter is {fetch_filter_for_query}\n"
f"Last fetch was on {str(parsed_last_time)}")
client.update_token()
items: dict = client.tickets_list_request(fetch_shaping, fetch_filter_for_query)
items: list = items.get('Tickets', [])
incidents, last_incident_time = parse_incidents(items, fetch_limit, time_format, parsed_last_time)
last_incident_time = last_incident_time.strftime(time_format)
demisto.info(f"Fetching Incident has Finished\n"
f"Fetch limit was {fetch_limit}"
f"Last fetch was on {str(last_incident_time)}\n"
f"Number of incidents was {len(incidents)}")
demisto.setLastRun({'last_fetch': last_incident_time})
return incidents
def shaping_fetch(client: Client, fetch_queue_id: list) -> str:
"""
Create and Update shaping fields once a day and saves them in integration context.
Args:
client: Client for the api.
fetch_queue_id:
Returns:
the current shaping.
"""
integration_context = demisto.getIntegrationContext()
if integration_context:
valid_until = integration_context.get('valid_until')
time_now = int(time.time())
if time_now < valid_until:
fetch_shaping = integration_context.get('shaping_fields')
else:
fetch_shaping = set_shaping(client, fetch_queue_id)
integration_context = {
'shaping_fields': fetch_shaping,
'valid_until': int(time.time()) + 3600 * 24
}
demisto.setIntegrationContext(integration_context)
else:
fetch_shaping = set_shaping(client, fetch_queue_id)
integration_context = {
'shaping_fields': fetch_shaping,
'valid_until': int(time.time()) + 3600 * 24
}
demisto.setIntegrationContext(integration_context)
return fetch_shaping
def get_fields_by_queue(client, queue: Optional[list]) -> list:
"""
Creating a list of all queue ids that are in the system.
Args:
client: Client for the api.
Returns:
list of queue ids.
"""
if queue:
queues_id = queue
else:
queues_id = get_queue_ids(client)
fields: list = []
for q in queues_id:
client.update_token()
fields_by_queue = client.queues_list_fields_request(queue_number=str(q))
fields_by_queue = fields_by_queue.get('Fields', [])
for field in fields_by_queue:
if field.get('jsonKey') not in fields:
# get internal error 500 from server with related tickets
if field.get('jsonKey') != 'related_tickets' and field.get('jsonKey') != 'referring_tickets':
fields.append(field.get('jsonKey'))
return fields
def get_queue_ids(client: Client) -> list:
"""
Creating a list of all queue ids that are in the system.
Args:
client: Client for the api.
Returns:
list of queue ids.
"""
queues = client.queues_list_request()
queues = queues.get('Queues', [])
queues_id = []
for q in queues:
queues_id.append(str(q.get('id')))
return queues_id
def shaping_by_fields(fields: list) -> str:
"""
Creating a shaping for the request which is from the fields and seperated by comma's
Args:
fields: List of fields that would be part of the shaping.
Returns:
str of the shaping.
"""
shaping = 'hd_ticket all'
for field in fields:
shaping += f',{field} limited'
return shaping
def set_shaping(client, queue: Optional[list] = None) -> str:
"""
Creating a shaping for the request.
Args:
client: Client in order to get the queue fields.
queue: If specific queue is given for the shaping.
Returns:
str of the shaping.
"""
fields = get_fields_by_queue(client, queue)
shaping = shaping_by_fields(fields)
return shaping
def parse_incidents(items: list, fetch_limit: str, time_format: str, parsed_last_time: datetime) \
-> Tuple[list, Any]:
"""
This function will create a list of incidents
Args:
items : List of tickets of the api response.
fetch_limit: Limit for incidents of fetch cycle.
time_format: Time format of the integration.
parsed_last_time: limit for number of fetch incidents per fetch.
Returns:
incidents: List of incidents.
parsed_last_time: Time of last incident.
"""
count = 0
incidents = []
for item in items:
if count >= int(fetch_limit):
break
incident_created_time = dateparser.parse(item['created'])
incident = {
'name': item['title'],
'occurred': incident_created_time.strftime(time_format),
'rawJSON': json.dumps(item)
}
incidents.append(incident)
count += 1
parsed_last_time = incident_created_time
return incidents, parsed_last_time
def split_fields(fields: str = '') -> dict:
"""Split str fields of Demisto arguments to request fields by the char ';'.
Args:
fields: fields in a string representation.
Returns:
dic_fields object for request.
"""
dic_fields = {}
if fields:
if '=' not in fields:
raise Exception(
f"The argument: {fields}.\nmust contain a '=' to specify the keys and values. e.g: key=val.")
arr_fields = fields.split(';')
for f in arr_fields:
field = f.split('=', 1) # a field might include a '=' sign in the value. thus, splitting only once.
if len(field) > 1:
dic_fields[field[0]] = field[1]
return dic_fields
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials').get("identifier")
password = params.get('credentials').get('password')
base_url = params.get('url')
proxy = demisto.params().get('proxy', False)
verify_certificate = not params.get('insecure', False)
# fetch incidents params
fetch_limit = params.get('fetch_limit', 10)
fetch_time = params.get('fetch_time', '1 day')
fetch_shaping = params.get('fetch_shaping')
fetch_filter = params.get('fetch_filter')
fetch_queue_id = argToList(params.get('fetch_queue_id'))
try:
client = Client(
url=base_url,
username=username,
password=password,
verify=verify_certificate,
proxy=proxy)
command = demisto.command()
LOG(f'Command being called is {command}')
# Commands dict
commands: Dict[str, Callable[[Client, Dict[str, str]], Tuple[str, dict, dict]]] = {
'test-module': test_module,
'kace-machines-list': get_machines_list_command,
'kace-assets-list': get_assets_list_command,
'kace-queues-list': get_queues_list_command,
'kace-tickets-list': get_tickets_list_command,
'kace-ticket-create': create_ticket_command,
'kace-ticket-update': update_ticket_command,
'kace-ticket-delete': delete_ticket_command,
}
if command in commands:
return_outputs(*commands[command](client, demisto.args()))
elif command == 'fetch-incidents':
incidents = fetch_incidents(client, fetch_time=fetch_time, fetch_shaping=fetch_shaping,
fetch_filter=fetch_filter, fetch_limit=fetch_limit,
fetch_queue_id=fetch_queue_id, last_run=demisto.getLastRun())
demisto.incidents(incidents)
else:
raise NotImplementedError(f'{command} is not an existing QuestKace command')
except Exception as e:
return_error(f'Error from QuestKace Integration.\n'
f'Failed to execute {demisto.command()} command.\n\n Error: {str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
Candidates must be willing to travel up to 80%.
Accenture is a global management consulting, technology services and outsourcing company, with more than 336,000 people serving clients in more than 120 countries. Combining unparalleled experience, comprehensive capabilities across all industries and business functions, and extensive research on the world’s most successful companies, Accenture collaborates with clients to help them become high-performance businesses and governments. The company generated net revenues of US$30.0 billion for the fiscal year ended Aug. 31, 2014.
|
import argparse
import os
import shutil
import subprocess as sp
# Args
parser = argparse.ArgumentParser(description='Creates a conda environment from file for a given Python version.')
parser.add_argument('-n', '--name', type=str, nargs=1, help='The name of the created Python environment')
parser.add_argument('-p', '--python', type=str, nargs=1, help='The version of the created Python environment')
parser.add_argument('conda_file', nargs='*', help='The file for the created Python environment')
args = parser.parse_args()
with open(args.conda_file[0], "r") as handle:
script = handle.read()
tmp_file = "tmp_env.yaml"
script = script.replace("- python", "- python {}*".format(args.python[0]))
with open(tmp_file, "w") as handle:
handle.write(script)
conda_path = shutil.which("conda")
print("CONDA ENV NAME {}".format(args.name[0]))
print("PYTHON VERSION {}".format(args.python[0]))
print("CONDA FILE NAME {}".format(args.conda_file[0]))
print("CONDA path {}".format(conda_path))
sp.call("{} env create -n {} -f {}".format(conda_path, args.name[0], tmp_file), shell=True)
os.unlink(tmp_file)
|
Inspiration and knowledge about Danish pot plants.
Large assortment in Garden Mums, cut Chrysanthemum, and cutting raised bedding plants.
Large assortment in Perennials and grasses.
The new brand from Gartneriet Pedersen has been named Capsicum Maya. A name which leads the thoughts to Central America, from where the plants originate.
Cohen Propagation Nurseries are Israel's biggest producer of unrooted cuttings. They produce more than 100 million cuttings per year, covering more than 100 species and 600 varieties.
Their Fuchsia assortment counts approx. 150 varieties.
Herbs and perennialsfrom the biggest producer of young plants in Israel.
Vitro Plus is the world leader in production of vitro ferns. They produce more than 20 million ferns annually, spread over more than 50 varieties.
|
from waldur_core.logging.loggers import EventLogger, event_logger
class TenantQuotaLogger(EventLogger):
quota = 'quotas.Quota'
tenant = 'openstack.Tenant'
limit = float
old_limit = float
class Meta:
event_types = ('openstack_tenant_quota_limit_updated',)
event_groups = {
'resources': event_types,
}
@staticmethod
def get_scopes(event_context):
tenant = event_context['tenant']
project = tenant.project
return {tenant, project, project.customer}
class RouterLogger(EventLogger):
router = 'openstack.Router'
old_routes = list
new_routes = list
tenant_backend_id = str
class Meta:
event_types = ('openstack_router_updated',)
event_groups = {
'resources': event_types,
}
@staticmethod
def get_scopes(event_context):
router = event_context['router']
project = router.project
return {project, project.customer}
class SecurityGroupLogger(EventLogger):
security_group = 'openstack.SecurityGroup'
class Meta:
event_types = (
'openstack_security_group_imported',
'openstack_security_group_created',
'openstack_security_group_updated',
'openstack_security_group_pulled',
'openstack_security_group_deleted',
'openstack_security_group_cleaned',
)
event_groups = {
'resources': event_types,
}
@staticmethod
def get_scopes(event_context):
security_group = event_context['security_group']
return {
security_group,
security_group.tenant,
}
class SecurityGroupRuleLogger(EventLogger):
security_group_rule = 'openstack.SecurityGroupRule'
class Meta:
event_types = (
'openstack_security_group_rule_imported',
'openstack_security_group_rule_created',
'openstack_security_group_rule_updated',
'openstack_security_group_rule_deleted',
'openstack_security_group_rule_cleaned',
)
event_groups = {
'resources': event_types,
}
@staticmethod
def get_scopes(event_context):
security_group_rule = event_context['security_group_rule']
return [
security_group_rule,
security_group_rule.security_group,
]
class NetworkLogger(EventLogger):
network = 'openstack.Network'
class Meta:
event_types = (
'openstack_network_imported',
'openstack_network_created',
'openstack_network_updated',
'openstack_network_pulled',
'openstack_network_deleted',
'openstack_network_cleaned',
)
event_groups = {
'resources': event_types,
}
@staticmethod
def get_scopes(event_context):
network = event_context['network']
return {
network,
network.tenant,
}
class SubNetLogger(EventLogger):
subnet = 'openstack.SubNet'
class Meta:
event_types = (
'openstack_subnet_created',
'openstack_subnet_imported',
'openstack_subnet_updated',
'openstack_subnet_pulled',
'openstack_subnet_deleted',
'openstack_subnet_cleaned',
)
event_groups = {
'resources': event_types,
}
@staticmethod
def get_scopes(event_context):
subnet = event_context['subnet']
return {
subnet,
subnet.network,
}
class PortLogger(EventLogger):
port = 'openstack.Port'
class Meta:
event_types = (
'openstack_port_created',
'openstack_port_imported',
'openstack_port_pulled',
'openstack_port_deleted',
'openstack_port_cleaned',
)
event_groups = {
'resources': event_types,
}
@staticmethod
def get_scopes(event_context):
port = event_context['port']
return {
port,
port.network,
}
class FloatingIPLogger(EventLogger):
floating_ip = 'openstack.FloatingIP'
class Meta:
event_types = (
'openstack_floating_ip_attached',
'openstack_floating_ip_detached',
)
event_groups = {
'resources': event_types,
}
@staticmethod
def get_scopes(event_context):
floating_ip = event_context['floating_ip']
port = event_context['port']
return {floating_ip, floating_ip.tenant, port}
event_logger.register('openstack_tenant_quota', TenantQuotaLogger)
event_logger.register('openstack_router', RouterLogger)
event_logger.register('openstack_network', NetworkLogger)
event_logger.register('openstack_subnet', SubNetLogger)
event_logger.register('openstack_security_group', SecurityGroupLogger)
event_logger.register('openstack_security_group_rule', SecurityGroupRuleLogger)
event_logger.register('openstack_port', PortLogger)
event_logger.register('openstack_floating_ip', FloatingIPLogger)
|
Cugat's study is at 12 months.
TOPIC: Cugat's study is at 12 months.
Does anyone know when or where Cugat might release the 12 month data?
|
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import rcParams
import matplotlib.ticker as ticker
import projection_funcs as pf
import policy_tools as pt
import inspect
from copy import deepcopy
def bigplot(scens, res_df, shapes_df, name=None, _debug=False):
'''Makes three plots
Shapes, based on passed shapes_df (or will make one)
Cumulative spend, based on past results df
Annual diffs vs first scenario
'''
if _debug: print("\nIN FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..called by: ".ljust(20), inspect.stack()[1][3], end="\n\n")
if shapes_df is None:
shapes_df = pt.make_shapes1(scens, flat=True, multi_index=True).sort_index(axis=1)
# MAKE A TABLE WITH PARAMETERS & SUMMARY
params_table1 = pt.make_params_table(scens).append(res_df.groupby(res_df.index.year).sum().iloc[:5,:])
params_table1
fig = plt.figure(figsize=(10,10), dpi=200)
legend = list(shapes_df.columns.levels[0])
max_y = shapes_df.max().max()*1.1*144
pad = 25
if _debug: print('columns to plot are'.ljust(pad), shapes_df.columns)
# get only lines we want
right_lines = [x for x in shapes_df.columns.levels[1] if '_init' not in x]
if _debug: print("right_lines".ljust(pad), right_lines)
# get the df sorted etc
sorted_df = shapes_df.sort_index(axis=1)
for i, line in enumerate(right_lines):
# this is the crucial operation which reorganises the df across scenarios
# eg grouping together EoL spendlines across baseline, option1, option2
# NB annualising here
if _debug: print("\n" + "+"*10 + "\nLINE is".ljust(pad), line)
if _debug: print("index is".ljust(pad), i)
sub_df = sorted_df.xs(line, level=1, axis=1) *144
if '_init' in line:
if _debug: print('exiting as contains init')
break
if _debug: print('sub_df'); print(sub_df.head(), "\n")
# make the plot
ax = plt.subplot2grid((3, 3),(0,i))
# ax = plt.subplot2grid((4, 4),(3,i), rowspan=0)
for j in sub_df.columns:
if _debug: print('\nnow in sub_df col'.ljust(pad), j)
# these are now double-annualised
if j == 'baseline': # treat the baseline separately
if _debug: print('plotting dfcol (base)'.ljust(pad), j)
if _debug: print('data'); print(sub_df[j].head())
ax.plot(sub_df.index/12, sub_df[j], color='black')
else:
if _debug: print('plotting dfcol (not base)'.ljust(pad), j)
if _debug: print('data'); print(sub_df[j].head())
ax.plot(sub_df.index/12, sub_df[j], alpha=0.75)
ax.set_title(line + " cohorts")
ax.set_xlabel('years post launch')
ax.set_ylim(0,max_y)
if i == 0:
ax.legend(legend)
# if i == 0: ax.legend([p for p in pols])
ax.set_ylabel('£m, annualised')
else: ax.yaxis.set_tick_params(label1On=False)
# SECOND ROW: cumulative spend
ax = plt.subplot2grid((3, 3),(1,0), colspan=2)
# ax = plt.subplot2grid((4, 4),(0,2), rowspan=2, colspan=2)
plot_cumspend_line(res_df, plot_pers=60, annualise=True, ax=ax, _debug=_debug) # annualise
ax.set_title('Annualised net spend on future launches')
ax.legend(legend)
ax.set_ylabel('£m, annualised')
# THIRD ROW: annual diffs
# get data grouped by scenario (aggregating over spendlines)
data = deepcopy(res_df.groupby(axis=1, level=0).sum())
ax = plt.subplot2grid((3, 3),(2,0), colspan=2)
# ax = plt.subplot2grid((4, 4),(2,2), rowspan=3, colspan=2)
plot_ann_diffs(data, ax=ax, net_spend=True, legend=legend[1:], table=True)
fig.subplots_adjust(hspace=0.6, wspace=0.3)
if name is not None:
fig.savefig('figs/' + name + '.png')
##_________________________________________________________________________##
def plot_cumspend_line(res_df, annualise=True, net_spend=False, plot_pers=None,
fig=None, ax=None, figsize=None, return_fig=False, save_path=None, _debug=False):
'''Plots a line graph of scenarios, summing across spendlines.
Input is a dataframe of results. Will be summed for scenarios (level 0 of col multi-index)
Can either generate a new plot, or add to existing axis (in which case pass ax)
Can either generate projections and index from the policy, or use existing if passed
Limit time interval by specifying plot_pers
'''
if _debug: print("\nIN FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..called by: ".ljust(20), inspect.stack()[1][3], end="\n\n")
pad=20
# need to avoid actually changing res_df
ann_factor = 1
if annualise: ann_factor = 12
if plot_pers is None: plot_pers = len(res_df)
if _debug: print('plot pers'.ljust(pad), plot_pers)
ind = res_df.index.to_timestamp()[:plot_pers]
# sum for the scenarios - highest level of column multi-index
scen_lines = res_df.groupby(level=0, axis=1).sum().iloc[:plot_pers, :] * ann_factor
if _debug: print('scen_lines:\n', scen_lines.head())
# create fig and ax, unless passed (which they will be if plotting in existing grid)
if fig is None and ax is None:
fig, ax = plt.subplots(figsize=figsize)
for i, p in enumerate(scen_lines):
if i==0:
ax.plot(ind, scen_lines[p].values, color='black')
else:
ax.plot(ind, scen_lines[p].values, alpha=0.75)
for t in ax.get_xticklabels():
t.set_rotation(45)
ax.legend(scen_lines.columns)
ax.set_yticklabels(['{:,}'.format(int(x)) for x in ax.get_yticks().tolist()])
title_str = ""
if net_spend: title_str = " net"
ax.set_title("Accumulated{} spend".format(title_str))
if save_path is not None:
fig.savefig(save_path)
if _debug: print("\nLEAVING FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..returning to: ".ljust(20), inspect.stack()[1][3], end="\n\n")
if return_fig: return(fig)
##_________________________________________________________________________##
def plot_ann_diffs(projs, max_yrs=5, fig=None, ax=None, figsize=None,
table=False, legend=None, net_spend=False, return_fig=False, save_path=None, _debug=False):
'''Plots a bar chart of annual data, subtracting the first column
Can either generate a new plot, or add to existing axis (in which case pass ax)
'''
if _debug: print("\nIN FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..called by: ".ljust(20), inspect.stack()[1][3], end="\n\n")
diffs = projs.iloc[:,1:].subtract(projs.iloc[:,0], axis=0)
diffs = diffs.groupby(diffs.index.year).sum().iloc[:max_yrs,:]
ind = diffs.index
# set the name of the counterfactual
col_zero = projs.columns[0]
if isinstance(col_zero, tuple):
counterfactual_name = col_zero[0]
else: counterfactual_name = col_zero
# create fig and ax, unless passed (which they will be if plotting in existing grid)
if fig is None and ax is None:
fig, ax = plt.subplots(figsize=figsize)
num_rects = len(diffs.columns)
rect_width = 0.5
gap = 0.45
for i, x in enumerate(diffs):
rect = ax.bar(diffs.index + ((i/num_rects)*(1-gap)), diffs[x],
width=rect_width/num_rects)
title_str = ""
if net_spend: title_str = " net"
ax.set_title("Difference in{} annual spend vs ".format(title_str) + counterfactual_name +", £m")
ax.tick_params(axis='x', bottom='off')
ax.grid(False, axis='x')
# for t in ax.get_xticklabels():
# t.set_rotation(45)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.set_yticklabels(['{:,}'.format(int(x)) for x in ax.get_yticks().tolist()])
if legend is not None:
ax.legend(legend)
else:
ax.legend(diffs.columns)
if len(diffs.columns)>2: ax.legend(diffs.columns)
if table:
ax.set_xticks([])
rows = []
for x in diffs:
rows.append(["{:0,.0f}".format(y) for y in diffs[x]])
row_labs = None
if legend: row_labs = legend
else: row_labs = diffs.columns
c_labels = list(diffs.index)
tab = ax.table(cellText=rows, colLabels=c_labels, rowLabels= row_labs)
tab.set_fontsize(12)
tab.scale(1,2)
tab.auto_set_font_size
if save_path is not None:
fig.savefig(save_path)
if _debug: print("\nLEAVING FUNCTION: ".ljust(20), inspect.stack()[0][3])
if _debug: print("..returning to: ".ljust(20), inspect.stack()[1][3], end="\n\n")
if return_fig: return(fig)
##_________________________________________________________________________##
def plot_impact_grid3(policy, start_m, n_pers, projs=None, diffs=None, max_bar_yrs=5, plot_pers=None, net_spend=False,
save_path=None, plot_bar=True, return_fig=False,
table=False):
'''Plots a grid of charts.
Going to change this to use individual plotting functions
for each chart commonly needed, so can then choose whatever grid layout
'''
if projs is None: projs = project_policy(policy, start_m, n_pers, net_spend=net_spend)
ind = projs.index.to_timestamp()
if diffs is None: diffs = projs.iloc[:,1:].subtract(projs.iloc[:,0], axis=0)
# plot all shapes and cumulated projections
# for diffs, calc vs first columnb
annual_projs = projs.groupby(projs.index.year).sum()
annual_diffs = diffs.groupby(diffs.index.year).sum()
tab_rows = 2
if plot_bar:
tab_rows +=1
if table:
tab_rows +=1
fig = plt.figure(figsize=(12,tab_rows*5))
rcParams['axes.titlepad'] = 12
ax0 = plt.subplot2grid((tab_rows,2), (0, 0))
plot_shapes_line(policy, annualise=True, ax=ax0)
ax1 = plt.subplot2grid((tab_rows,2), (0, 1))
plot_cumspend_line(start_m=start_m, n_pers=n_pers, annualise=True, plot_pers=plot_pers, policy=policy, net_spend=net_spend, ax=ax1)
if plot_bar:
ax2 = plt.subplot2grid((tab_rows,2), (1, 0), colspan=2)
plot_diffs_ann_bar(start_m=start_m, n_pers=n_pers, ax=ax2, projs=projs, diffs=diffs,
table=True, max_yrs=max_bar_yrs, net_spend=net_spend)
# if table:
# tab = plt.subplot2grid((tab_rows,2), (2, 0), colspan=2)
# tab.set_frame_on(False)
# tab.set_xticks([])
# tab.set_yticks([])
# rowvals = ["{:0,.0f}".format(x) for x in annual_diffs.iloc[:,0].values]
# the_table = tab.table(cellText=[rowvals], rowLabels=['spend, £m'],
# loc='top')
# the_table.auto_set_font_size(False)
# the_table.set_fontsize(10)
# fig.text(0.13,0.8,'here is text')
fig.subplots_adjust(hspace=0.4, wspace=0.3)
if save_path is not None:
fig.savefig(save_path)
if return_fig:
return fig
##_________________________________________________________________________##
|
HughesNet Duluth Satellite Broadband is the hi-tech high-speed Internet solution that's accessible to everyone throughout the area of the United States. HughesNET Duluth satellite internet services ideally suit residential customers or telecommuters (those working from home) who seek high-speed internet service at a competitive price.
HughesNET Duluth high-speed internet via satellite brings the World Wide Web to your home much like DIRECTV-from satellites in orbit hovering over our planet. Set up a HughesNET Duluth satellite dish in a place with a clear view of the southern sky. Explore the realm of electronic information and entertainment in an instant.
The new two-way service offers faster uploads than the older one-way service because it uses the satellite to send information, whereas one-way service transferred signals via your phone line.
With two-way service, you're always online, always connected, and because it doesn't use your phone line, view your favorite websites as well as talk to your friends on the phone simultaneously.
The HughesNET Duluth satellite modem is compatible with operating systems supporting TCP/IP protocol such as Windows®, Macintosh®, Unix® and Linux®. The HughesNET satellite modem offers faster speeds than ever before!
HughesNET Duluth Modem is developed to bring higher speeds and enhanced performance for all HughesNet service plans. Get HughesNet Broadband Satellite internet for your Home or Small Home Office. All plans include free Delivery and Professional Installation.
Keep up with the 21st century information technology. Why not to upgrade your internet connection today? Complete the online availability form and see if you qualify for high-speed Hughes Duluth satellite internet service!
|
# Firelet - Distributed firewall management.
# Copyright (C) 2010 Federico Ceratto
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pytest import raises
from webtest import TestApp, AppError
import bottle
import logging
import pytest
from firelet import fireletd
from firelet.flcore import GitFireSet, DemoGitFireSet, Users
from firelet.flssh import MockSSHConnector
from firelet.mailer import Mailer
import firelet.flssh
log = logging.getLogger(__name__)
# TODO: fix skipped tests
skip = pytest.mark.skipif("True")
class Conf(object):
public_url = 'http://localhost'
stop_on_extra_interfaces = False
@pytest.fixture
def mailer(monkeypatch):
mailer = Mailer(
sender = 'bogus@sender.org',
recipients = 'bogus@recipient.org',
smtp_server = 'bogus-email-server',
)
monkeypatch.setattr(mailer, 'send_msg', lambda *a, **kw: None)
return mailer
@pytest.fixture
def mock_ssh(monkeypatch):
# FIXME: broken
monkeypatch.setattr(firelet.flssh, 'SSHConnector', MockSSHConnector)
@pytest.fixture
def raw_app(repodir, mailer, mock_ssh):
"""Create app (without logging in)"""
bottle.debug(True)
app = TestApp(fireletd.app)
assert not app.cookies
fireletd.conf = Conf()
assert fireletd.conf
fireletd.users = Users(d=repodir)
fireletd.mailer = mailer
fireletd.fs = GitFireSet(repodir)
return app
@pytest.fixture
def webapp(raw_app):
"""Create app and log in"""
assert not raw_app.cookies
raw_app.post('/login', {'user': 'Ada', 'pwd': 'ada'})
assert raw_app.cookies.keys() == ['fireletd']
return raw_app
# Unauthenticated tests
def test_bogus_page(raw_app):
with raises(AppError):
raw_app.get('/bogus_page')
def test_index_page_unauth(raw_app):
out = raw_app.get('/')
assert out.status_code == 200
@skip
def test_login_unauth(raw_app):
out = raw_app.get('/login')
assert out.status_code == 200
def test_login_incorrect(raw_app):
assert not raw_app.cookies
out = raw_app.post('/login', {'user': 'bogus', 'pwd': 'bogus'})
assert not raw_app.cookies
def test_login_correct(raw_app):
assert not raw_app.cookies
raw_app.post('/login', {'user': 'Ada', 'pwd': 'ada'})
assert raw_app.cookies.keys() == ['fireletd']
def test_logout_unauth(raw_app):
out = raw_app.get('/logout')
assert out.status_code == 302 # redirect
# Authenticated tests
def test_index_page(webapp):
out = webapp.get('/')
assert out.status_code == 200
assert 'DOCTYPE' in out.text
assert 'body' in out.text
assert 'Distributed firewall management' in out
assert '</html>' in out
def test_logout(webapp):
assert webapp.cookies.keys() == ['fireletd']
webapp.get('/logout')
assert not webapp.cookies.keys()
def test_double_login(webapp):
# log in again
assert webapp.cookies.keys() == ['fireletd']
webapp.post('/login', {'user': 'Ada', 'pwd': 'ada'})
assert webapp.cookies.keys() == ['fireletd']
def test_messages(webapp):
out = webapp.get('/messages')
assert str(out.html) == ''
def test_ruleset(webapp):
out = webapp.get('/ruleset')
assert out.pyquery('table#items')
assert 'Ssh access from the test workstation' in out.text
rules = out.pyquery('table#items tr')
assert len(rules) == 11 # 10 rules plus header
def test_ruleset_post_delete(webapp):
out = webapp.post('/ruleset', dict(
action='delete',
rid=0,
))
assert out.json == {u'ok': True}
out = webapp.get('/ruleset')
rules = out.pyquery('table#items tr')
assert len(rules) == 10 # 9 rules plus header
def test_ruleset_post_moveup(webapp):
out = webapp.post('/ruleset', dict(
action='moveup',
rid=1,
))
assert out.json == {u'ok': True}
@skip
def test_ruleset_post_moveup_incorrect(webapp):
out = webapp.post('/ruleset', dict(
action='moveup',
rid=0,
))
assert out.json == {u'ok': True}
def test_ruleset_post_movedown(webapp):
out = webapp.post('/ruleset', dict(
action='movedown',
rid=1,
))
assert out.json == {u'ok': True}
#TODO: movedown error on last rule
def test_ruleset_post_disable(webapp):
out = webapp.post('/ruleset', dict(
action='disable',
rid=1,
))
assert out.json == {u'ok': True}
def test_ruleset_post_enable(webapp):
out = webapp.post('/ruleset', dict(
action='enable',
rid=1,
))
assert out.json == {u'ok': True}
@skip
def test_ruleset_post_save(webapp):
out = webapp.post('/ruleset', dict(
action='save',
rid=1,
name='newrule',
src='a',
src_serv='SSH',
dst='b',
dst_serv='SSH',
desc='New rule',
))
assert 0, out
assert out.json == {u'ok': True}
def test_ruleset_post_newabove(webapp):
out = webapp.get('/ruleset')
rules = out.pyquery('table#items tr')
assert len(rules) == 11 # 10 rules plus header
out = webapp.post('/ruleset', dict(
action='newabove',
rid=1,
))
#TODO: return an ack
out = webapp.get('/ruleset')
rules = out.pyquery('table#items tr')
assert len(rules) == 12
def test_ruleset_post_newbelow(webapp):
out = webapp.get('/ruleset')
rules = out.pyquery('table#items tr')
assert len(rules) == 11 # 10 rules plus header
out = webapp.post('/ruleset', dict(
action='newbelow',
rid=1,
))
#TODO: return an ack
out = webapp.get('/ruleset')
rules = out.pyquery('table#items tr')
assert len(rules) == 12
def test_ruleset_post_unknown_action(webapp):
with raises(Exception):
webapp.post('/ruleset', dict(action='bogus', rid=1))
def test_sib_names(webapp):
out = webapp.post('/sib_names')
out.json == {u'sib_names': [u'AllSystems', u'BorderFW:eth0', u'BorderFW:eth1', u'BorderFW:eth2', u'Clients', u'InternalFW:eth0', u'InternalFW:eth1', u'SSHnodes', u'Server001:eth0', u'Servers', u'Smeagol:eth0', u'Tester:eth1', u'WebServers']}
def test_hostgroups(webapp):
out = webapp.get('/hostgroups')
assert 'SSHnodes' in out
assert len(out.pyquery('table#items tr')) == 6
def test_hostgroups_post_save_new_hg(webapp):
out = webapp.get('/hostgroups')
assert len(out.pyquery('table#items tr')) == 6
out = webapp.post('/hostgroups', dict(
action = 'save',
childs = 'Border, Localhost',
rid = '',
))
out = webapp.get('/hostgroups')
assert len(out.pyquery('table#items tr')) == 7
def test_hostgroups_post_save_update(webapp):
# update existing hg
out = webapp.get('/hostgroups')
assert len(out.pyquery('table#items tr')) == 6
out = webapp.post('/hostgroups', dict(
action = 'save',
childs = 'Border, Localhost',
rid = '2',
))
out = webapp.get('/hostgroups')
assert len(out.pyquery('table#items tr')) == 6
def test_hostgroups_post_delete(webapp):
out = webapp.get('/hostgroups')
assert len(out.pyquery('table#items tr')) == 6
out = webapp.post('/hostgroups', dict(
action='delete',
rid=1,
))
out = webapp.get('/hostgroups')
assert len(out.pyquery('table#items tr')) == 5
def test_hostgroups_post_fetch(webapp):
out = webapp.post('/hostgroups', dict(
action='fetch',
rid=1,
))
assert out.json == {u'token': u'd74e8fce', u'childs': [u'Smeagol:eth0'], u'name': u'SSHnodes'}
def test_hostgroups_post_unknown_action(webapp):
with raises(Exception):
webapp.post('/hostgroups', dict(action='bogus', rid=''))
def test_hosts(webapp):
out = webapp.get('/hosts')
assert len(out.pyquery('table#items tr')) == 9
def test_hosts_post_delete(webapp):
out = webapp.get('/hosts')
assert len(out.pyquery('table#items tr')) == 9
out = webapp.post('/hosts', dict(
action='delete',
rid=1,
))
out = webapp.get('/hosts')
assert len(out.pyquery('table#items tr')) == 8
def test_hosts_post_save_new_host(webapp):
out = webapp.get('/hosts')
assert len(out.pyquery('table#items tr')) == 9
out = webapp.post('/hosts', dict(
action = 'save',
hostname = 'foo',
iface = 'eth0',
ip_addr = '1.2.3.4',
local_fw = '1',
masklen = '24',
mng = '1',
network_fw = '0',
rid = '',
routed = 'Internet',
))
assert out.json['ok'] == True
out = webapp.get('/hosts')
assert len(out.pyquery('table#items tr')) == 10
def test_hosts_post_save_update_host(webapp):
out = webapp.get('/hosts')
assert len(out.pyquery('table#items tr')) == 9
out = webapp.post('/hosts', dict(
action = 'save',
hostname = 'foo',
iface = 'eth0',
ip_addr = '1.2.3.4',
local_fw = '1',
masklen = '24',
mng = '1',
network_fw = '0',
rid = '2',
routed = 'Internet',
))
assert out.json['ok'] == True
out = webapp.get('/hosts')
assert len(out.pyquery('table#items tr')) == 9
def test_hosts_post_fetch(webapp):
out = webapp.post('/hosts', dict(
action='fetch',
rid=1,
))
assert out.json == {u'masklen': u'24', u'iface': u'eth1', u'ip_addr': u'10.66.2.1', u'hostname': u'InternalFW', u'routed': [], u'local_fw': 1, u'token': u'db9018c1', u'network_fw': 1, u'mng': 1}
def test_hosts_post_unknown_action(webapp):
with raises(Exception):
webapp.post('/hosts', dict(action='bogus', rid=''))
def test_net_names(webapp):
out = webapp.post('/net_names')
assert out.json == {u'net_names': [u'Internet', u'production_net', u'rivendell', u'shire']}
def test_networks(webapp):
out = webapp.get('/networks')
assert len(out.pyquery('table#items tr')) == 5
def test_networks_post_save_new_network(webapp):
out = webapp.get('/networks')
assert len(out.pyquery('table#items tr')) == 5
out = webapp.post('/networks', dict(
action = 'save',
name = 'foo',
ip_addr = '1.2.3.4',
masklen = '24',
rid = '',
))
assert out.json['ok'] == True
out = webapp.get('/networks')
assert len(out.pyquery('table#items tr')) == 6
out = webapp.post('/networks', dict(
action='fetch',
rid=4,
))
assert out.json['name'] == 'foo'
def test_networks_post_save_update_network(webapp):
out = webapp.get('/networks')
assert len(out.pyquery('table#items tr')) == 5
out = webapp.post('/networks', dict(
action = 'save',
name = 'foo',
ip_addr = '1.2.3.4',
masklen = '24',
rid = '2',
))
assert out.json['ok'] == True
out = webapp.get('/networks')
assert len(out.pyquery('table#items tr')) == 5
def test_networks_post_delete(webapp):
out = webapp.get('/networks')
assert len(out.pyquery('table#items tr')) == 5
out = webapp.post('/networks', dict(
action='delete',
rid=1,
))
out = webapp.get('/networks')
assert len(out.pyquery('table#items tr')) == 4
def test_networks_post_fetch(webapp):
out = webapp.post('/networks', dict(
action='fetch',
rid=1,
))
assert out.json == {u'masklen': 24, u'ip_addr': u'10.66.2.0', u'name': u'production_net', u'token': u'657ed9ec'}
def test_networks_post_unknown_action(webapp):
with raises(Exception):
webapp.post('/networks', dict(action='bogus', rid=''))
def test_services(webapp):
out = webapp.get('/services')
assert len(out.pyquery('table#items tr')) == 8
def test_services_post_save_new_network_tcp(webapp):
out = webapp.get('/services')
assert len(out.pyquery('table#items tr')) == 8
out = webapp.post('/services', dict(
action = 'save',
name = 'foo',
protocol = 'TCP',
ports = '80',
rid = '',
))
assert out.json['ok'] == True
out = webapp.get('/services')
assert len(out.pyquery('table#items tr')) == 9
out = webapp.post('/services', dict(
action='fetch',
rid=7,
))
assert out.json['name'] == 'foo'
def test_services_post_save_new_network_icmp(webapp):
out = webapp.post('/services', dict(
action = 'save',
name = 'foo',
protocol = 'ICMP',
icmp_type = '8',
rid = '',
))
assert out.json['ok'] == True
out = webapp.post('/services', dict(
action='fetch',
rid=7,
))
assert out.json['name'] == 'foo'
assert out.json['protocol'] == 'ICMP'
assert out.json['ports'] == '8'
def test_services_post_save_new_network_other_protocol(webapp):
out = webapp.post('/services', dict(
action = 'save',
name = 'foo',
protocol = 'AH',
rid = '',
))
assert out.json['ok'] == True
out = webapp.post('/services', dict(
action='fetch',
rid=7,
))
assert out.json['name'] == 'foo'
assert out.json['protocol'] == 'AH'
def test_services_post_save_update_network(webapp):
out = webapp.get('/services')
assert len(out.pyquery('table#items tr')) == 8
out = webapp.post('/services', dict(
action = 'save',
name = 'foo',
protocol = 'TCP',
ports = '80',
rid = '2',
))
assert out.json['ok'] == True
out = webapp.get('/services')
assert len(out.pyquery('table#items tr')) == 8
def test_services_post_delete(webapp):
out = webapp.get('/services')
assert len(out.pyquery('table#items tr')) == 8
out = webapp.post('/services', dict(
action='delete',
rid=1,
))
out = webapp.get('/services')
assert len(out.pyquery('table#items tr')) == 7
def test_services_post_fetch(webapp):
out = webapp.post('/services', dict(
action='fetch',
rid=1,
))
assert out.json == {u'token': u'89a7c78e', u'protocol': u'TCP', u'ports': u'80', u'name': u'HTTP'}
def test_services_post_unknown_action(webapp):
with raises(Exception):
webapp.post('/services', dict(action='bogus', rid=''))
def test_manage(webapp):
out = webapp.get('/manage')
assert len(out.pyquery('button')) == 3
def test_save_needed(webapp):
out = webapp.get('/save_needed')
assert out.json['sn'] == False
def test_save_post(webapp):
out = webapp.post('/save', dict(
msg='test',
))
assert out.json['ok'] == True
def test_reset_post(webapp):
out = webapp.post('/reset')
assert out.json['ok'] == True
@skip
def test_check_post(webapp):
out = webapp.post('/api/1/check')
assert out.json['ok'] == True
def test_rss(webapp):
out = webapp.get('/rss')
assert 'rss/deployments' in out
def test_rss_channel(webapp):
out = webapp.get('/rss/deployments')
assert 'http://localhost/rss/deployments' in out
assert 'rss' in out
|
A religion class brings to life Latter-day Saints from the Church’s beginning to present day.
It’s story time in 174 JSB, and students have gathered to hear David F. Boone (BA ’78, MA ’81, EdD ’92), associate professor of Church history, teach important gospel principles while spinning yarns about cowboys, poets, pioneers, and civic leaders.
This is Men and Women of Mormondom, a one-credit class that studies how individuals, prominent and obscure, have lived the gospel since 1830.
Some of the names Boone teaches are familiar—like Parley Pratt, missionary; Jane James, African-American pioneer; and Eliza Snow, Relief Society president. Others aren’t so recognizable—like Willard Bean, boxing missionary; Sam Cowley, special agent; and Ellis Shipp, one of Utah’s first female doctors.
Boone has long been interested in biography. As an undergraduate, he enrolled in Men and Women of Mormondom, instructed by Ivan J. Barrett (MA ’47), who pioneered the class and taught it in his 40-year tenure. “He was a hero of mine,” says Boone, who joined the staff in 1984 and begged to continue the class.
Considering that “Mormondom” constitutes a planet-sized geographical span and 183 years, he has quite the task choosing the 12 or 13 people he’ll lecture on each semester. While Boone is particularly fascinated with 19th-century Church history, he also scours personal histories from the 20th and 21st centuries. When he finds an intriguing thread about someone, he’ll grab a new folder and start collecting stories. When the folder is bursting with sources, it’s time.
Historic settings help vivify history. At the end of a semester, the class takes a field trip to Echo Canyon, Mormon Flat, and various sites in Salt Lake proper to see for themselves where Saints came and went, lived and died.
In 1890s Tennessee, a bloodthirsty mob tied two Mormon missionaries to a tree one afternoon and debated what to do with them. One of the missionaries called out, “You’re the biggest bunch of cowards I’ve ever seen. Where I’m from, they let a man fight. Turn me loose,” he challenged. “I’ll fight you two at a time—and I’ll whip the whole bunch of you.” Eager to test this claim, the mobsters cut him loose.
“This puny little guy,” says Professor Boone, clobbered four pairs of them before the group finally started to disintegrate. But before they could get very far, the missionary said, “Don’t go away mad. I can preach better than I can fight. Now sit down.” They did.
Meet Willard Washington Bean, boxer-missionary extraordinaire. “His tactics of teaching the gospel were second to nobody’s,” says Boone. And it was a good thing, too, since between his stints as a Brigham Young Academy student, a PE teacher in Utah, a professional boxer in San Francisco, and the coach of future heavyweight champion Jack Dempsey, Bean had a knack for getting called to the most hostile, anti-Mormon missions of his time.
Years after his mission to the South, Bean and his wife, Rebecca, were called to reintroduce the Church to Palmyra, N.Y. In 1915 they moved into the old Smith farmhouse, where a “welcome” party soon arrived and hinted they had better move on. When it was clear the Beans weren’t going to budge, the town changed tactics, ostracizing the entire family instead. Boone says the townspeople crossed the street when they saw the family coming, forced one child to wear the dunce cap in school, and refused to sell them groceries or even midwife for Rebecca.
After several years of work and little progress, Bean decided it was time to resurrect “Kid Bean,” the fightin’ parson. He rented a hall, set up a ring, and challenged the men of Palmyra to come and take their chances to finally whip a Mormon.
It was Tennessee all over again. Bean not only took his foes out in quick succession but also started doing somersaults and calisthenics between opponents. “He’s an old man,” Boone exclaims, “and he’s doing somersaults!“ People were suddenly kinder to the Beans and more open to attending the Mormon Sunday School.
This pattern had clear echoes in Bean’s later life. When he and Rebecca returned to Utah 24 years after arriving in Palmyra, they left behind not only a thriving branch but also an entire town of bosom friends.
Eager to join the Saints, James and her family set out for Nauvoo a year after their baptisms. But at one port, the ship captain demanded papers proving their status as free blacks—papers that none of them had, having never been slaves. They were miraculously let on without them, but at the next port in Buffalo, N.Y., the ship sailed away with their bags, leaving them destitute.
James’ faith and desire for spiritual community were so strong, says Boone, that the family continued the journey of several hundred miles—on foot.
“A lot of the members would have fallen off the trail before that,” Boone notes, echoing the sentiments of one Dr. Bernhisel, who was with the Prophet when the devoted band arrived penniless. Emma and Joseph eventually invited James to stay in their home, and James grew close to them, says Boone. She was heartbroken when the Prophet was martyred, but she and her new husband, Isaac James, pressed forward into the West, becoming two of the first African Americans to settle in Utah.
Throughout her life she had great spiritual gifts. During her time in the Prophet’s home, she received a spiritual witness about the significance of temple clothing just by seeing it among the laundry and pondering it at length. Although James wasn’t able to receive her temple blessings during her lifetime, she remained faithful.
When her husband, Lloyd, died after almost 11 years of marriage, Stella Harris Oaks (BA ’28) faced the prospect of raising three children alone and felt strongly that she needed more education. The following years demanded every ounce of her strength, but with family support, Oaks received a master’s in guidance and personnel administration from Columbia University.
But Stella Oaks was a woman passionate about education—and doggedly determined.
Oaks graduated at the top of her class in high school; received a teaching certificate along with her bachelor’s in dramatic arts; helped put her younger sister through college; worked as a high school teacher; and served as director for adult education in Provo. Work was fun and fulfilling for her from a young age: while thinning beets with her siblings, she’d entertain them with stories, and they had to work hard to keep up with her or they would miss out. Even while battling lymphoma in her later years, Oaks was full of energy and purpose—once giving a presentation to a committee that included the Relief Society general presidency the same day she’d had a hospital treatment.
Beginning in 1955, Oaks served two terms as the only city councilwoman of Provo and one term as assistant mayor, a visible career that made her widely known and respected, says her grandson. She was constantly requested as a guest speaker. Once, the postal service directed a letter whose envelope read only, “Stella Oaks, Salt Lake City,” to her home in Provo. Another time, a teenage girl called her to ask when it was all right to kiss boys.
But above all else, says Professor Oaks, Stella served her family. In one family story, she sent her brother and his family on a weeklong getaway to Yellowstone, ignoring his protests that he had a farm to run. “Do you think I’ve forgotten how to milk a cow?” she asked. In another story, she trusted her 14-year-old son, now Elder Dallin H. Oaks (BS ’54), to bus himself to Denver to take a radio-licensing test.
Most Mormons equate Porter Rockwell with the “rough-and-ready, shoot-’em-up, Wild West persona,” says Professor Boone. To be sure, Rockwell’s role as a territorial deputy U.S. marshal and reputed sharpshooter branded him as an Old West hero (disputed hero, that is—“If you weren’t an outlaw,” says Boone). But there was certainly more to Rockwell than guns and horses and draws.
He had a delightful sense of humor. For nine months, Rockwell sat in a Missouri prison without trial and without sentence. At the time neither the city nor the state was legally responsible for feeding Rockwell or any other prisoner, so the good citizens of Missouri provided food from their own tables. For comic relief, Rockwell would tie hardened lumps of fried cornbread called “corn dodgers” on strings and dangle them out the window. Children confronted him from outside the prison walls. “What are you doing?” they asked. “Fishin’ for pukes,” he replied—“pukes” being a derogatory nickname the Illinoisans called the Missourians. “Any bites?” they said. “Nope—but had some glorious nibbles!” The children would stick around, dissolving into laughter when an adult passed and encountered the wriggling victuals hanging from a barred window. “The mental image of it makes me chuckle,” says Boone.
Rockwell shared his gentle side with others, too. Boone says accounts suggest that upon his safe return from prison, Joseph issued Rockwell a Samson-reminiscent mandate: don’t cut your hair, and “neither bullet nor blade” will take your life. From then on Rockwell wore his hair long, sometimes braiding it up under his hat so criminals wouldn’t recognize him from far away. But years later, Rockwell surrendered his hair to make a wig for one of Joseph’s sisters-in-law, who had lost her hair to typhoid fever. It appears, says Boone, that he was willing to forego the potential spiritual protection of keeping his hair to help someone who was dear to the Prophet.
Most people wouldn’t come within miles of a mob leader who led a killing spree on their family—not to mention their entire religious community. But not Amanda Barnes Smith.
Smith grew up in Massachusetts and Ohio, where she met and married her husband, Warren, and converted to Campbellism—thanks to future LDS leaders Orson Hyde and Sidney Rigdon. Several years later she and Warren were baptized into the Church and participated in the settlement of Kirtland and the building of the temple there.
Professor Boone likes to share a lesser-known story about Smith. A few days after the massacre, the order came for the Mormons to leave the state. Robbed of all her possessions, Smith had no provisions and no transportation except her feet. “She had a backbone of steel,” Boone says: she left her four children at the site of the massacre, walked to the mob leader’s home, banged on the door, and said, pointing to the livestock, “You have my horse.” When the mobster wanted to sell it to her and then charge her for having fed it, she firmly replied, “No. I don’t have any money. It’s my horse. I need it. I’ll take it now.” One account reports that she used her apron as a lead rope to get the horse home.
“Zion” in Utah was but a few years old when Elder George Q. Cannon washed up on the shores of Maui, 3,000 miles from home, without purse, scrip, or friend. The first friend he made is now a revered figure in Hawaiian Church history: Jonathan Napela.
Napela took to the gospel immediately, but his conversion cost him dearly: “He was a civilian municipal leader, a city father, a judge, a landowner,” says Professor Boone. “And most all of those he lost for the gospel’s sake.” When Napela converted, bringing many villagers with him, his peers made their disapproval clear. The rift ran so deep that eventually the gathering place for the Hawaiian Saints had to be moved from Lanai to Laie, where the temple and the BYU–Hawaii campus still stand today.
Napela was known for his faith. “One day was terribly overcast,” says Boone, “and they’d planned a conference. The missionaries were wondering if they needed to move inside because of the weather. And [Napela] came and he said, ‘Where’s your faith, elders? . . . You didn’t pray that it wouldn’t be overcast, you prayed that it wouldn’t rain. It’s not raining. Move outside.’” Boone says the rain started after the conference ended.
Napela also had the distinction of starting an unofficial MTC—in his home. He lodged the missionaries and coached them rigorously in Hawaiian. “I will provide food and shelter,” Boone paraphrases Napela, “if you will study.” This was a considerable sacrifice, since his debts exceeded his assets.
But his willingness to give of himself was most apparent when his beloved wife, Kitty Richardson Napela, contracted leprosy. At the time, Boone says, Hawaiian law made divorce extremely easy for couples in their situation, since the afflicted was quarantined—for life—in a squalid colony called Kalaupapa. Rather than abandon his sweetheart, Napela consigned himself to the same fate. He lived there as her kokua, or help, and went to work as a branch president and government liaison to improve living conditions there. Eventually Napela contracted the disease himself, dying two years before Kitty.
Perhaps one of the most remarkable aspects of his final sacrifice, says Boone, is that it is unconfirmed whether Kitty ever joined the Church or received her temple blessings. Incongruent views on faith often strike discord between spouses, but Napela never faltered in his efforts to love, support, and servehis wife.
When “Mattie” was a young girl, she cut her hair short and had a tailor make her a pair of men’s boots. Later, Martha earned three college degrees, worked as a doctor, rallied for women’s right to vote, and was voted the first female state senator in the history of the United States—defeating both her husband and famous suffragette Emmeline B. Wells in the election.
|
#!/usr/bin/env python
import os
import shutil
import sys
import subprocess
import tarfile
import urllib
class Bootstrap:
def __init__(self,
version="12.0.4",
base='http://pypi.python.org/packages/source/v/virtualenv',
python="python2",
env="pyenv",
requirements="requirements.txt"):
self.version = version
self.base = base
self.python = python
self.env = env
self.dirname = 'virtualenv-' + self.version
self.tgz_file = self.dirname + '.tar.gz'
self.venv_url = self.base + '/' + self.tgz_file
self.requirements=requirements
def shellcmd(self,cmd,echo=False):
""" Run 'cmd' in the shell and return its standard out.
"""
if echo: print('[cmd] {0}'.format(cmd))
p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
out = p.communicate()[0]
if echo: print(out)
return out
def download(self):
""" Fetch virtualenv from PyPI
"""
urllib.urlretrieve(self.venv_url,self.tgz_file)
def extract(self):
""" Untar
"""
tar = tarfile.open(self.tgz_file,"r:gz")
tar.extractall()
def create(self):
""" Create the initial env
"""
self.shellcmd('{0} {1}/virtualenv.py {2}'.format(self.python,self.dirname,self.env))
def install(self):
"""Install the virtualenv package itself into the initial env
"""
self.shellcmd('{0}/bin/pip install {1}'.format(self.env,self.tgz_file))
def install_libs(self):
"""Install the virtualenv package itself into the initial env
"""
self.shellcmd('{0}/bin/pip install -r {1}'.format(self.env,self.requirements))
def cleanup(self):
""" Cleanup
"""
os.remove(self.tgz_file)
shutil.rmtree(self.dirname)
def setup(self):
"""Bootraps a python environment
"""
self.download()
self.extract()
self.create()
self.install()
self.cleanup()
if os.path.isfile(self.requirements):
self.install_libs()
if __name__ == "__main__":
bootstrap = Bootstrap()
bootstrap.setup()
|
Many methods, technologies, standards, and languages exist to structure and describe data. The aim of PhD my thesis was to find common features in these methods to determine how data is actually structured and described. One result of the analysis is a categorization of data structuring methods (included as section 4.1).
Given the number and variety of data structuring methods one can categorize them by history and origin, by type of application, by complexity, and by many other criteria. This approach, however, can result in rather arbitrary classifications, because a single facet has to be chosen and because most facets are not selective for all instances. Another approach, that better fits to how people cognitively perceive and classify things, is grouping based on prototypes which act as cognitive reference points (Lakoff 1987; Rosch1983). Following this approach, categories of data structuring methods are not defined by selected features, but data structuring methods are clustered by similarity, until prototypical methods emerge. A can act as good example of a certain category, while other instances belonging to this category are less central. For instance in western society, a chair is a central prototype of furniture, although other furniture may share little properties with chairs.
The main purpose was chosen as dividing facet because is is independent from particular use cases and applications. In short, a main purpose tells what a particular method mainly does with data. For instance the main purpose of storage systems like databases and file systems is storage. The data structuring method’s main purpose can act as guideline to find the nearest prototype from the categorization. Still this dividing facet should not be confused with a strict classifier as known from more formal approaches of categorization. Instances from each category can also serve multiple purposes, just like one can use a chair not only as furniture for sitting but also to stand on it when changing a light bulb or giving a speech. To give an example, the OWL schema language is based on ERM, so it can also be used for conceptual modeling, but it’s main purpose is putting formal constrains and rules on RDF data.
The categorization of data structuring methods gives a very broad view on data. On the other end of granularity of description there is the pattern language of structures in data.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# parallel-untar.py - unpack tarball subdirectories in parallel
#
# copyright (c) 2015 Ben England, Red Hat, under Apache license
# see http://www.apache.org/licenses/LICENSE-2.0 for license details
import os
import errno
import tarfile
# we use multiprocessing module to create separate sub-processes and avoid
# the constraints of the python GIL
import multiprocessing
import sys
import time
debug = (os.getenv('DEBUG') is not None)
NOTOK = 1 # process failure exit status
def usage(msg):
print('ERROR: ' + msg)
print('usage: parallel-untar.py your-file.tar [ max-threads ]')
sys.exit(NOTOK)
fmt_dangling_link = \
'ERROR: %s is a link pointing to an absolute pathname that does not exist'
fmt_link2nonexistent = \
'%s is a link pointing to a relative non-existent file'
# parse command line inputs
thread_count = 4
start_time = time.time()
if len(sys.argv) > 2:
try:
thread_count = int(sys.argv[2])
except ValueError as e:
usage('could not parse thread count %s' % sys.argv[2])
elif len(sys.argv) < 2:
usage('must supply .tar file')
fn = sys.argv[1]
if fn == '--help' or fn == '-h':
usage('so you need help, we all knew that ;-)')
print('untarring file %s with up to %d parallel threads' % (fn, thread_count))
if not fn.endswith('.tar'):
usage('parallel-untar.py does not yet support compressed tar files' +
'uncompress first to .tar file then run it on that')
if not os.path.exists(fn):
usage('does not exist: %s' % fn)
# this class partitions directories in tar file amongst worker threads
# in a static way
# (thread k handles all directories with index d mod thread_count == k )
# so that no preprocessing is needed
class untarThread(multiprocessing.Process):
def __init__(
self, parent_conn_in, child_conn_in,
index_in, thread_count_in, archive_path_in):
# init base class
multiprocessing.Process.__init__(self)
# save thread inputs for run()
self.parent_conn = parent_conn_in
self.child_conn = child_conn_in
self.index = index_in
self.thread_count = thread_count_in
self.archive_path = archive_path_in
# counters for reporting
self.file_count = 0
self.dir_count = 0
self.dir_create_collisions = 0
def __str__(self):
return 'untarThread %d %s %s' % (self.index, self.archive_path)
def run(self):
my_dirs = {}
link_queue = []
archive = tarfile.open(name=self.archive_path)
archive.errorlevel = 2 # want to know if errors
count = self.thread_count - 1
for m in archive: # for each thing in the tarfile
if m.isdir(): # if a directory
stripped_name = m.name.strip(os.sep) # remove any trailing '/'
count += 1
if count >= thread_count:
count = 0
if count == self.index:
if debug:
print('thread %d recording on count %d dir %s' %
(self.index, count, stripped_name))
# value doesn't matter, my_dirs is just a set
my_dirs[stripped_name] = self.index
try:
archive.extract(m)
except OSError as e:
# race condition if > 1 thread
# creating a common parent directory,
# just back off different amounts
# so one of them succeeds.
if e.errno == errno.EEXIST:
time.sleep(0.1 * self.index)
self.dir_create_collisions += 1
archive.extract(m)
else:
raise e
if debug:
print('%d got dir %s' % (self.index, m.name))
self.dir_count += 1
else:
# if not a directory
dirname = os.path.dirname(m.name)
# ASSUMPTION: directory object is always read from tarfile
# before its contents
if dirname in my_dirs:
if m.islnk() or m.issym():
print('link %s -> %s' % (m.name, m.linkname))
if not os.path.exists(m.linkname):
if m.linkname.startswith(os.sep):
if debug:
print(fmt_dangling_link % m.linkname)
else:
# BUT DO IT ANYWAY, that's what tar xf does!
# FIXME: how do we know if link target is a
# file within the untarred directory tree?
# Only postpone link creation for these.
if debug:
print(fmt_link2nonexistent % m.linkname)
link_queue.append(m)
continue
try:
archive.extract(m) # not a link or dir at this point
except OSError as e:
if not (e.errno == errno.EEXIST and m.issym()):
raise e
if debug:
print('%d got file %s' % (self.index, m.name))
self.file_count += 1
# we postpone links to non-existent files in case other threads
# need to create target files
# these links are created after
# all other subprocesses have finished directories and files
# to ensure that this succeeds.
self.child_conn.send('y')
# block until all subprocesses finished above loop
self.child_conn.recv()
# now it should be safe to create softlinks that point within this tree
for m in link_queue:
try:
archive.extract(m)
except OSError as e:
if not (e.errno == errno.EEXIST and m.issym()):
raise e
if debug:
print('%d got file %s' % (self.index, m.name))
self.file_count += 1
archive.close()
self.child_conn.send((self.file_count, self.dir_count,
self.dir_create_collisions))
# create & start worker threads, wait for them to finish
worker_pool = []
for n in range(0, thread_count):
(parent_conn, child_conn) = multiprocessing.Pipe()
t = untarThread(parent_conn, child_conn, n, thread_count, fn)
worker_pool.append(t)
t.daemon = True
t.start()
if debug:
print('thread pool: ' + str(worker_pool))
# implement barrier for softlink creation within the tree
for t in worker_pool:
assert t.parent_conn.recv() == 'y'
for t in worker_pool:
t.parent_conn.send('y')
elapsed_time = time.time() - start_time
print('reached softlink barrier at %7.2f sec' % elapsed_time)
total_files = 0
total_dirs = 0
for t in worker_pool:
(w_file_count, w_dir_count, w_dir_create_collisions) = \
t.parent_conn.recv()
t.join()
print('thread %d file-count %d dir-count %d create-collisions %d' %
(t.index, w_file_count, w_dir_count, w_dir_create_collisions))
total_files += w_file_count
total_dirs += w_dir_count
elapsed_time = time.time() - start_time
print('all threads completed at %7.2f sec' % elapsed_time)
fps = total_files / elapsed_time
print('files per sec = %9.2f' % fps)
dps = total_dirs / elapsed_time
print('directories per sec = %8.2f' % dps)
|
Camping Jávea is located just 2 kilometers from the beach in a relaxed environment with all the amenities.
Our pools are situated within a large garden of 1000m2, with spectacular views of Montgó. There are 2 pools, one semi-olympic and the other is a children’s pool. Enjoy a refreshing dip and relax!
|
# -*- coding: utf-8 -*-
# transle bilibili's av json file into database
import json
import mysql.connector
import ntpath
import urllib2,time
from tt import GetVideoInfo
config = {
'user': 'root',
'password': '',
'host': 'localhost',
'database': 'dbfp',
'raise_on_warnings': True,
}
#con = mysql.connector.connect(**config)
#cur = con.cursor()
#UPDATE `dbfp`.`dbfp_av_info` SET `create_stamp` = FROM_UNIXTIME('1246000296') WHERE `dbfp_av_info`.`id` = 1;
add_av_info_req = ("INSERT INTO `dbfp_av_info` "
"(`id`, `av`, `title`, `up_id`, `create_stamp`, `create_at`, `play_times`, `collect_times`, `dan_count`, `review_times`, `coins_count`)"
"VALUES(NULL, %s, %s, %s, FROM_UNIXTIME(%s), %s, %s, %s, %s, %s, %s);")
add_up_info_req = ("INSERT INTO `dbfp_up_info`"
"(`uid`, `name`, `lvl`, `sign`, `birth`, `reg_date`, `article`, `follow_count`, `fans_count`)"
"VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s);")
av_dir = "D:\\PProject\\bilibili\\allavinfo"
av_dir2 = "D:\\PProject\\bilibili\\allavinfo2-1"
av_dir3 = "D:\\PProject\\bilibili\\allavinfo2"
av_dir4 = 'D:\\PProject\\bilibili\\allavinfo3'
av_dir5 = 'D:\\PProject\\bilibili\\allavinfo4'
user_dir = "D:\\PProject\\bilibili\\alluserinfo"
def getURLContent(url):
while 1:
try:
headers = {'User-Agent':'Mozilla/5.0 (iPad; CPU OS 4_3_5 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8L1 Safari/6533.18.5',
#'Cookie':'pgv_pvi=9629054976; pgv_si=s7276307456; sid=a84gv3d7; fts=1438695578; LIVE_BUVID=a30f235e687b15cddec7073e271b78dc; LIVE_BUVID__ckMd5=1aff9d63faeeb5dd; PLHistory=bVm2%7Co2}GW; IESESSION=alive; DedeUserID=2754937; DedeUserID__ckMd5=62d03cc207ac353c; SESSDATA=8d08bf28%2C1442638544%2C030d0e52; LIVE_LOGIN_DATA=4f48da9e73ffdd64590fc2812487cb4fb2d8d70f; LIVE_LOGIN_DATA__ckMd5=8840c45c091b3590; _cnt_dyn=0; _cnt_pm=0; _cnt_notify=21; uTZ=-480; DedeID=2837864; _dfcaptcha=55b092b61a3f77ba89cde89af6ed7f90; CNZZDATA2724999=cnzz_eid%3D1895263873-1425444951-%26ntime%3D1442045119'
}
req = urllib2.Request(url = url,headers = headers);
content = urllib2.urlopen(req,timeout = 10).read();
except:
print 'connect error...'
time.sleep(20)
continue
break
return content;
def GetuserInfo(id):
url = "http://space.bilibili.com/%d#!/index"%(id)
con = getURLContent(url)
s = con.find("_bili_space_info")
e = con.find("var _bili_is_")
#print s,e
resu = con[s:e].replace("_bili_space_info = eval(",'').replace(');',"")
return resu
def create_av_info(file_name,di):
if type(file_name) == type(''):
av = int(file_name)
elif type(file_name) == type(2):
av = file_name
else:
return -1
#di = json.loads(string)
# av,title, up_id,create_stamp ,created_at, play_times,collect_times, dan_count, review_times,coins_count
# %d, %s, %d, FROM_UNIXTIME(%d), %s, %d, %d, %d, %d, %d)
if di['author'] == None:
id = 0
else:
id = int(di['mid'])
result_tuple = (int(av),di['title'],id,di['created'],di['created_at']+':00',int(di['play']),int(di['favorites']),int(di['video_review']),int(di['review']),int(di['coins']))
#print result_tuple
return result_tuple
def create_user_info(di):
if type(di) != type({}):
return ()
#di = json.loads(st)
# uid, name, lvl,
# sign, birth,reg_date,
# article, follow_count, fans_count,
#VALUES(%d, %s, %d, %s, %s, %s, %d, %d, %d)
result_tuple = (int(di['mid']),di['name'],di['level_info']["current_level"],
di["sign"] and di["sign"] or "NULL",di['birthday'],di["regtime"],
di["article"],di["attention"],di["fans"],
)
return result_tuple
def read_av_info(id):
if type(id) != type(0):
id = int(id)
if 0<=id<=174999:
this_dir = av_dir
elif 175000<=id<=290998:
this_dir = av_dir2
elif 290999<=id<=469999:
this_dir = av_dir3
elif 470000<=id<=539999:
this_dir = av_dir4
else:
this_dir = av_dir5
FILE = this_dir+'\\'+str(id)+'.json'
FILE_EXIST = ntpath.exists(FILE)
if FILE_EXIST:
f = open(FILE,'r')
jsoncon = f.readline()
f.close()
else:
return 404
di = json.loads(jsoncon)
if di.has_key('code') and di['code'] == -403:
return 404
elif di.has_key('code') and di['code'] == -503:
raise NameError, str(id)
con = GetVideoInfo(id)
with open(FILE) as f:
print >> f,con
di = json.loads(jsoncon)
return di
def read_user_info(id):
if type(id) != type(""):
id = str(id)
FILE = user_dir+'\\'+id+'.json'
FILE_EXIST = ntpath.exists(FILE)
if FILE_EXIST:
f = open(FILE,'r')
jsoncon = f.readline()
f.close()
else:
jsoncon = GetuserInfo(int(id))
return json.loads(jsoncon)
def does_user_info_exist(mysql_conn,id):
flag = False
QUERY_STR = ("select uid from dbfp_up_info"
" where uid=%s")
cur = mysql_conn.cursor()
cur.execute(QUERY_STR,(id,))
for x in cur:
flag = True
cur.close()
return flag
def insert_up_info(mysql_conn,tup):
this_cursor = mysql_conn.cursor()
this_cursor.execute(add_up_info_req,tup)
mysql_conn.commit()
this_cursor.close()
def insert_av_info(mysql_conn,tup):
uid = tup[2]
if uid != 0 and not does_user_info_exist(mysql_conn,uid):
updic = read_user_info(uid)
up_tup = create_user_info(updic)
insert_up_info(mysql_conn,up_tup)
this_cursor = mysql_conn.cursor()
this_cursor.execute(add_av_info_req,tup)
mysql_conn.commit()
this_cursor.close()
if __name__ == '__main__':
con = mysql.connector.connect(**config)
#userdic = read_user_info(1)
for i in range(9978,10000):
avdic = read_av_info(i)
if avdic != 404:
avtup = create_av_info(i,avdic)
insert_av_info(con,avtup)
print i,' insert complete'
else:
print i,' is unavil or 404'
con.close()
|
May 2018 - Hairlicious Inc.
What I love most about this look is that it's super cute and easy to do. Too, after I rock this updo for 2-3 days I then unravel my braids and wear a simple yet defined braid out for a couple more days!!
So I go from protective style (milkmaid braids) to low manipulation style (braid out) without the need of combs, direct heat, excessive manipulation etc. It really gives my hair a break. It's a win/win for me!
I love jazzing up this style with a few gold hair accessories. I get them from the Beauty Supply Store for $1-2. I don't know what it is, but there's just something about GOLD bobby pins that I just love! It really enhances your styles in such a simple way! Love it.
I can't believe its been almost 4 months since I gave birth to my precious baby girl, Shi ♡. Words cannot describe how much of a blessing it is to now be a mother of two!! Don't get me wrong, my hands are so full...but now so is my heart!
Well, let me start off by saying that during my pregnancies, for both my son and daughter, I relaxed my hair - yup! that's right. My doctor gave me the "okay" to go ahead and perform my chemical services as usual. I relaxed every 3 months so a total of 3 times (approximately) during my pregnancies. With each pregnancy I used my usual ORS Olive oil relaxer Lye Normal Strength. I experienced no adverse affects and my kids are healthy and strong!
Just one month after giving birth (recovering from a c-section), I started back with my usual hair routine. I somehow found the strength (Lord knows its a struggle when you have an infant) and maintained my exact same regimen i.e. wash every week, deep condition with protein first then moisture, moisturize daily etc. My hair thrived and still is!
Where shedding is concerned, I haven't experienced the excessive postpartum shedding that we all HATE and DREAD!! After having my son, when he was approximately 9-10months old, I began to shed terribly. I believe it was stress and diet related due to me going back to school, work etc. My diet and nutrition took a huge hit as I was constantly on the go and had zero time for meal prep, vitamins etc. as a new mom. Back then, I restored my hair by being consistent with my regimen and doing a Black Tea Rinse every so often to combat shedding. I know that shedding is a natural process! It's inevitable to escape as hair that has been in the "growing" phase during pregnancy and will now begin to shed due to decreased levels of estrogen in the body.
Now that I am home for the year with my baby girl, I am exclusively breastfeeding so what I consume is crucial in producing milk for her and I think my diet has kept the shedding at bay. I know that vitamins and nutrition are vital for healthy hair growth and preventing hair loss. So daily, I ensure I get at least 3 servings of lean protein i.e. eggs, fish, diary products, meat etc. per day. I also drink a TON of water (so necessary for milk production) along with fruits and veggies - not to mention I keep up with my daily vitamins.
I'm currently taking Nestle Materna Pre/Postnatal Multivitamins (1x/day), Evlution Omega 3 Fish Oils (2x/day) which help to keep my scalp hydrated and JamesonVitamin C (2x/day). Vitamin C helps my body absorb iron as well as produce collagen which is known to strengthen the hair shaft. I believe that all of these practices combined i.e. hair care, diet, nutrition etc. have played a role in delaying the postpartum shedding and improving the health of my hair.
Overall, my hair has been pretty good to me, I can't complain! My edges are still in tact and no major shedding to report. Trust me, having an infant and keeping up with a hair regimen is definitely not easy PLUS I'm always tired..LOL.. BUT I try to find the time i.e. deep conditioning overnight or on the go, moisturizing with baby in the cuddle carrier etc. It's all about multitasking and maximizing your free time to maintain good hair care habits.
My vitamins mentioned in this post are available on Amazon.com.
|
##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import Gaffer
##########################################################################
# Public methods
##########################################################################
def appendNodeContextMenuDefinitions( nodeGraph, node, menuDefinition ) :
if len( menuDefinition.items() ) :
menuDefinition.append( "/GraphBookmarksDivider", { "divider" : True } )
menuDefinition.append(
"/Bookmarked",
{
"checkBox" : __getBookmarked( node ),
"command" : functools.partial( __setBookmarked, node ),
"active" : node.ancestor( Gaffer.Reference ) is None,
}
)
def appendPlugContextMenuDefinitions( nodeGraph, plug, menuDefinition ) :
parent = nodeGraph.graphGadget().getRoot()
dividerAdded = False
for bookmark in __bookmarks( parent ) :
nodeGadget = nodeGraph.graphGadget().nodeGadget( bookmark )
if nodeGadget is None :
continue
compatibleConnections = []
for nodule in __nodules( nodeGadget ) :
inPlug, outPlug = __connection( plug, nodule.plug() )
if inPlug is not None :
compatibleConnections.append( ( inPlug, outPlug ) )
if not compatibleConnections :
continue
if not dividerAdded :
if len( menuDefinition.items() ) :
menuDefinition.append( "/BookmarksDivider", { "divider" : True } )
dividerAdded = True
for inPlug, outPlug in compatibleConnections :
label = bookmark.getName()
if len( compatibleConnections ) > 1 :
bookmarkPlug = outPlug if inPlug.isSame( plug ) else inPlug
label += "/" + bookmarkPlug.relativeName( bookmark )
menuDefinition.append(
"/Connect Bookmark/" + label,
{
"command" : functools.partial( __connect, inPlug, outPlug ),
"active" : not outPlug.isSame( inPlug.getInput() )
}
)
##########################################################################
# Internal implementation
##########################################################################
def __getBookmarked( node ) :
return Gaffer.Metadata.nodeValue( node, "graphBookmarks:bookmarked" ) or False
def __setBookmarked( node, bookmarked ) :
with Gaffer.UndoContext( node.scriptNode() ) :
Gaffer.Metadata.registerNodeValue( node, "graphBookmarks:bookmarked", bookmarked )
def __bookmarks( parent ) :
return [ n for n in parent.children( Gaffer.Node ) if __getBookmarked( n ) ]
## \todo Perhaps this functionality should be provided by the
# GraphGadget or NodeGadget class?
def __nodules( nodeGadget ) :
result = []
def walk( graphComponent ) :
if isinstance( graphComponent, Gaffer.Plug ) :
nodule = nodeGadget.nodule( graphComponent )
if nodule is not None :
result.append( nodule )
for c in graphComponent.children( Gaffer.Plug ) :
walk( c )
walk( nodeGadget.node() )
return result
## \todo This is similar to the private
# StandardNodule::connection() method. Perhaps we
# should find a single sensible place to put it?
# Maybe on the GraphGadget class? Or in a new
# PlugAlgo.h file?
def __connection( plug1, plug2 ) :
if plug1.node().isSame( plug2.node() ) :
return None, None
if plug1.direction() == plug2.direction() :
return None, None
if plug1.direction() == plug1.Direction.In :
inPlug, outPlug = plug1, plug2
else :
inPlug, outPlug = plug2, plug1
if inPlug.acceptsInput( outPlug ) :
return inPlug, outPlug
return None, None
def __connect( inPlug, outPlug ) :
with Gaffer.UndoContext( inPlug.ancestor( Gaffer.ScriptNode ) ) :
inPlug.setInput( outPlug )
|
“Great Demon King Qin Lun, wu wu wu, Sister Hu Fei died……” Xiao Lian’s nerves became bigger as she embraced Qin Lun’s neck, sobbing her grievances.
“I’m sorry! When I’m exposed to danger, Joey will appear……” Qin Lun said as his gaze dulled, and patted the little beauty’s back.
“Let’s go, we’re lagging behind the group.” Seeing Qin Lun returning back to his normal state, Lin Feng relaxed a breath. The pressure of being beside Joey is too big, that string within his heart was almost about to break.
At the following coordinators, the small mixed group once again met a few waves of zombies. But compared to the group of ten-plus people, the small groups of zombies were unable to block the passages and cause peril. However, two more of the Federation Soldiers that were used as shields were dead, and all the prisoners were injured.
After catching up with the group, several people were found in a disjointed conversation with Ladyboy Lily. They were able to learn quite a bit about this mixed group of theirs. So it actually turns out that after the death row prisoners escaped from their jail and got cutting tools from the dining hall, they were forced by the scattered zombies to the control hall of the base and met up with the surviving research workers and Federation Soldiers.
|
from six.moves import zip
from numpy import arange, zeros, unique
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.bdf.bdf_interface.assign_type import (integer,
double, double_or_blank)
class PLOADX1(object):
type = 'PLOADX1'
def __init__(self, model):
"""
Defines the PLOADX1 object.
Parameters
----------
model : BDF
the BDF object
"""
self.model = model
self.n = 0
self._cards = []
self._comments = []
def __getitem__(self, i):
unique_lid = unique(self.load_id)
if len(i):
f = PLOADX1(self.model)
f.load_id = self.load_id[i]
f.element_id = self.element_id[i, :]
f.p = self.p[i]
f.node_ids = self.node_ids[i, :]
f.theta = self.theta[i]
f.n = len(i)
return f
raise RuntimeError('len(i) = 0')
def __mul__(self, value):
f = PLOADX1(self.model)
f.load_id = self.load_id
f.element_id = self.element_id
f.p = self.p * value
f.node_ids = self.node_ids
f.theta = self.theta
f.n = self.n
return f
def __rmul__(self, value):
return self.__mul__(value)
def add_card(self, card, comment=''):
self._cards.append(card)
self._comments.append(comment)
def build(self):
cards = self._cards
ncards = len(cards)
self.n = ncards
if ncards:
float_fmt = self.model.float_fmt
#: Property ID
self.load_id = zeros(ncards, 'int32')
#: Element ID
self.element_id = zeros(ncards, 'int32')
# Surface traction at grid point GA. (Real)
self.p = zeros((ncards, 2), float_fmt)
#: Corner grid points. GA and GB are any two adjacent corner grid points of the
#: element. (Integer > 0)
self.node_ids = zeros((ncards, 2), 'int32')
#: Angle between surface traction and inward normal to the line segment.
#: (Real Default = 0.0)
self.theta = zeros(ncards, float_fmt)
for i, card in enumerate(cards):
self.load_id[i] = integer(card, 1, 'load_id')
self.element_id[i] = integer(card, 2, 'element_id')
pa = double(card, 3, 'pa')
pb = double_or_blank(card, 4, 'pb', pa)
self.p[i, :] = [pa, pb]
self.node_ids[i, :] = [integer(card, 5, 'ga'),
integer(card, 6, 'gb')]
self.theta[i] = double_or_blank(card, 7, 'theta', 0.)
assert len(card) <= 8, 'len(PLOADX1 card) = %i\ncard=%s' % (len(card), card)
i = self.load_id.argsort()
self.load_id = self.load_id[i]
self.element_id = self.element_id[i]
self.node_ids = self.node_ids[i, :]
self.p = self.p[i, :]
self.theta = self.theta[i]
self._cards = []
self._comments = []
def get_stats(self):
msg = []
if self.n:
msg.append(' %-8s: %i' % ('PLOADX1', self.n))
return msg
def get_index(self, load_ids=None):
#if load_ids:
i = arange(self.n)
#else:
# i = searchsorted(load_ids, self.load_id)
return i
def write_card(self, bdf_file, size=8, is_double=False, load_id=None):
if self.n:
i = self.get_index(load_ids)
for (lid, eid, p, n, theta) in zip(self.load_id[i],
self.element_id[i], self.p[i], self.node_ids[i], self.theta[i]):
card = ['PLOADX1', lid, eid, p[0], p[1], n[0], n[1], theta]
if size == 8:
bdf_file.write(print_card_8(card))
else:
bdf_file.write(print_card_16(card))
|
Canada pharmacy free shipping coupon code phenergan anti nausea medicine online pharmacy free viagra samples phenergan gel onset buy phenergan canada phenergan gel. Over the counter viagra type medicine naprosyn pill color wellbutrin generic best generic cialis canada online pharmacy. Is phenergan over the counter in canada acquistare viagra originale on line Phenergan 25mg $79.89 - $0.44 Per pill can you buy cialis otc in canada generic wellbutrin 150 mg. Online pharmacy with free viagra buy phenergan 25 mg online uk phenergan veterinary medicine cialis over the counter in canada. Phenergan medicine to buy brand name cialis canada cialis dosage in canada generic wellbutrin 150 xl generic wellbutrin 100mg. Generic cialis daily canada cialis price in canada cialis pills for sale in canada phenergan over the counter canada cialis pharmacy canada cheapest cialis online canada. Phenergan travel sickness medicine phenergan gel dosage pediatric over the counter type viagra generic cialis canada customs where can i buy phenergan in canada. Cialis discount canada buy cialis 5mg canada phenergan with codeine canada phenergan with codeine in canada. Comprare viagra originale on line cialis 20 mg price in canada phenergan nausea medicine phenergan topical gel oxford online pharmacy viagra. Cialis pills sale canada where can i buy phenergan medicine over the counter medicine like phenergan phenergan gel dose. Is viagra an over the counter medication cialis best price canada phenergan gel dosage phenergan topical gel dose. Cialis lowest price canada Amitriptyline 25mg cost naprosyn e pillola buy real cialis online canada is there an over the counter version of viagra. cost of phenergan with codeine can you get cialis over the counter in canada reliable online pharmacy viagra phenergan cream canada. Generic wellbutrin 300 mg phenergan over the counter in canada phenergan topical gel dosage buy phenergan medicine Isotane 10 is cialis over the counter in canada.
Augmentin cost without insurance tab augmentin cost can i buy strattera in canada diflucan pill dose augmentin drops price augmentin price in kuwait. Augmentin inj price augmentin 625 mg cost cost for augmentin 875 buy phenergan suppositories online augmentin 625 tablet price Buy toradol shot. Augmentin injection price syrup augmentin duo price diflucan 2 pills buy phenergan over the counter augmentin medicine price augmentin duo forte price. Augmentin price malaysia augmentin 1000 duo price in india phenergan tablets to buy online augmentin 1000 mg cost buy phenergan with codeine. Augmentin antibiotic prices augmentin 875 125 cost augmentin average cost phenergan with codeine syrup buy online. Co amoxiclav augmentin 625mg price augmentin 625 price augmentin 500mg price augmentin dds price augmentin tablets 625mg price. Augmentin 625 price singapore phenergan generic cost augmentin duo 625 mg price buy phenergan codeine online augmentin 600 price augmentin 500 price. Cheapest price for augmentin tab augmentin price augmentin price australia buy phenergan with codeine syrup augmentin duo price ireland promethazine dm generic for phenergan. Augmentin 875 mg price phenergan dm generic augmentin duo 1 gm price augmentin 457mg/5ml price tab augmentin 375 mg price augmentin tablet price in india. Augmentin cost price price of augmentin 1g cost of augmentin 500mg augmentin 500-125 price can you buy phenergan online augmentin low cost phenergan generic name. Augmentin 500mg price augmentin tablet cost augmentin 625 price uk how much does augmentin cost at walgreens. Augmentin duo 1000 price india diflucan pill for tinea versicolor augmentin antibiotic cost augmentin price mercury drug augmentin xr price. Augmentin price rite aid augmentin es 600 suspension price augmentin 375 mg price Xenical preзo e onde comprar phenergan buy online uk. Canada drug pharmacy free shipping augmentin retail price phenergan dm cost augmentin 875 mg tablets price phenergan buy online. Buy phenergan elixir can i buy phenergan online promethazine 25 mg generic for phenergan buy strattera australia augmentin 1000 mg tablet price augmentin 875 cash price.
Viagra ireland over the counter buy amoxicillin 250 mg phenergan veterinary medicine kamagra pills phenergan online ireland. Buy viagra online from ireland can you buy viagra online ireland buy phenergan travel sickness phenergan cream uk buy viagra online in ireland. Buy amoxicillin liquid viagra ireland price trazodone cost canada buy phenergan medicine flagyl for sale uk buy amoxicillin trihydrate. Kamagra pills dosage viagra for sale northern ireland buy amoxicillin london viagra ireland pharmacy kamagra in usa kaufen. Kamagra kaufen usa buy phenergan in uk order kamagra usa viagra ireland buy phenergan 10mg price uk buy amoxicillin clavulanic acid. Buy amoxicillin capsules phenergan over the counter ireland kamagra pills review kamagra pills amazon trazodone brand name canada buy viagra in pharmacy ireland. Kamagra for sale usa phenergan with codeine uk us customs kamagra buy phenergan tablets uk canada pharmacy express shipping kamagra fast delivery usa. Trazodone price canada buy phenergan 25mg uk viagra ireland kamagra pills wiki phenergan over counter ireland phenergan nausea medicine. Kamagra sildenafil 32 pills kamagra for sale in usa kamagra pills for sale can you buy phenergan over the counter in ireland Order cialis 5mg. Buy human amoxicillin flagyl online sale kamagra 100mg oral jelly usa viagra online ireland buy phenergan elixir uk kamagra online usa. Buy amoxicillin for humans can you buy viagra online in ireland order phenergan online uk buy amoxicillin from canada. Buy amoxicillin fast shipping kamagra shipped to us buy phenergan online uk.
Erythromycin generic phenergan over the counter in uk can you buy zithromax online how can i get viagra in canada. Can you purchase viagra over the counter in canada can you buy phenergan over the counter in the uk 2012 buying viagra in canada online. Can i buy viagra over the counter canada can i buy phenergan over the counter uk over the counter drug like phenergan tretinoin gel for sale. Low dose lisinopril hctz lisinopril 20 12.5 dosage Levitra generico farmacia italiana obagi tretinoin cream 0.05 on sale buy real zithromax online buy zithromax online overnight shipping. Phenergan tablets over the counter first medicine online pharmacy store where to buy viagra in canada online can i buy viagra over the counter in ontario phenergan over the counter uk. Para cuando cialis generico en españa tretinoin 1 cream for sale buy viagra from canadian pharmacy retin a tretinoin cream for sale can you buy phenergan over the counter in the uk. Lisinopril what dose buy pfizer zithromax online pictures phenergan pills where can i buy viagra over the counter in canada Cheap online pharmacy with prescription how do i buy viagra in canada. How much does phenergan with codeine cost comprar cialis generico por telefono en españa over the counter phenergan with codeine Buy zithromax overnight delivery. Lisinopril decreasing dosage where can i buy viagra in canada comprar generico de cialis en españa can i get viagra in canada buy zithromax suspension online. Erythromycin generic brand tretinoin 0.025 cream for sale buy phenergan pills is buying zithromax online safe. Low dose of lisinopril buy phenergan over the counter where can i buy phenergan over the counter uk buy zithromax online in usa. Is phenergan available over the counter in australia lisinopril 2.5mg dose can you buy phenergan tablets over the counter buy phenergan tablets uk lisinopril dose equivalent. Can i get viagra from canada erythromycin generic pharmacy.
|
import telebot
from telebot import types
import timetable as table
import weather
token = 'your_token'
bot = telebot.TeleBot(token)
@bot.message_handler(commands=['functions'])
def functions_handler(message):
bot.send_message(message.chat.id, '1 - Распсание \n'
'2 - Погода \n')
@bot.message_handler(commands=['help'])
def show_info():
pass
@bot.message_handler(commands=['start'])
def show_start_info():
pass
@bot.message_handler(content_types=['text'])
def main_activity(message):
try:
if message.text == 'Погода':
current_weather = bot.send_message(message.chat.id, 'Введите город')
bot.register_next_step_handler(current_weather, weather.weather_handler)
if message.text == 'Расписание':
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
keyboard.add('Понедельник', 'Вторник', 'Среда', 'Четверг', 'Пятница')
keyboard.one_time_keyboard = True
timetable = bot.reply_to(message, 'Укажите день недели', reply_markup=keyboard)
bot.register_next_step_handler(timetable, table.timetable_handler)
except Exception as e:
bot.send_message(message.chat.id, e.args)
if __name__ == '__main__':
bot.polling(none_stop=True)
|
Sunday morning, I made toast and scrambled eggs with onions and spinach for Ky, my 12 year old who was leaving for a week long camp at Blairsville, Georgia. His boy scouts troop plans to camp at the Woodruff camp grounds until noon next Sunday. As I looked down into the skillet sautéing the scrambles, I wondered what other moms and dads across Johns Creek (our town) were making for their own boys to get them up and running. So, I asked.
A boy ate oatmeal with blackberries while a mom ate “the good old fashioned 1960’s breakfast style” oatmeal.
A Korean mom gave her son an omelet with rice and kimchi, while an Indian boy ate vadas with rasam.
A boy ate an “everything bagel” and drank a smoothie, while a dad had a plain bagel with peanut butter and bottled water.
A boy ate a croissant, milk and banana, while another ate just a banana.
Eggs turned out to be the champions. Eggs were eaten by themselves, or were eaten with toast, or with bacon, or with bacon and biscuits, or with bacon and sausage, or with sausage and milk. Eggs were eaten with toast and they were eaten as omelets. A dad had scrambled eggs along with tea decaf and blue berry bagels.
A dad wished he could finish his quota of 3 coffees before 9am, “but there’s just so much to do still, so he has to do with only half a cup for now.” A mom was going to back home after her boy was off, to fish out a pop tart from her pantry.
A mom helped herself with scallion pancakes and tea and gave her son pancakes, bacon and kefir.
Some kids ate just pancakes, some ate just chocolate chip pancakes. And a few ate eggs and pancakes.
A dad ate a muffin while another wasn’t so sure what he had “this morning, its been a while since I got up, hasn’t it?”, he asked his son, who shook his head and smiled.
One kid ate toast with butter while another had it with jelly.
Of all the kids who ate cereal, one kid ate Frosted flakes, “Tony the tiger” he air quoted and laughed, while one had Cinnamon Toast Crunch and another, the Special K one.
A lucky boy had two chocolate milks and a blueberry muffin and an even luckier one had 2 chocolate chip cookies and milk.
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from functools import partial
from multiprocessing import Pool
import os
import re
import cropper
import numpy as np
import tqdm
# ==============================================================================
# = param =
# ==============================================================================
parser = argparse.ArgumentParser()
# main
parser.add_argument('--img_dir', dest='img_dir', default='./data/img_celeba/img_celeba')
parser.add_argument('--save_dir', dest='save_dir', default='./data/img_celeba/aligned')
parser.add_argument('--landmark_file', dest='landmark_file', default='./data/img_celeba/landmark.txt')
parser.add_argument('--standard_landmark_file', dest='standard_landmark_file', default='./data/img_celeba/standard_landmark_68pts.txt')
parser.add_argument('--crop_size_h', dest='crop_size_h', type=int, default=572)
parser.add_argument('--crop_size_w', dest='crop_size_w', type=int, default=572)
parser.add_argument('--move_h', dest='move_h', type=float, default=0.25)
parser.add_argument('--move_w', dest='move_w', type=float, default=0.)
parser.add_argument('--save_format', dest='save_format', choices=['jpg', 'png'], default='jpg')
parser.add_argument('--n_worker', dest='n_worker', type=int, default=8)
# others
parser.add_argument('--face_factor', dest='face_factor', type=float, help='The factor of face area relative to the output image.', default=0.45)
parser.add_argument('--align_type', dest='align_type', choices=['affine', 'similarity'], default='similarity')
parser.add_argument('--order', dest='order', type=int, choices=[0, 1, 2, 3, 4, 5], help='The order of interpolation.', default=3)
parser.add_argument('--mode', dest='mode', choices=['constant', 'edge', 'symmetric', 'reflect', 'wrap'], default='edge')
args = parser.parse_args()
# ==============================================================================
# = opencv first =
# ==============================================================================
_DEAFAULT_JPG_QUALITY = 95
try:
import cv2
imread = cv2.imread
imwrite = partial(cv2.imwrite, params=[int(cv2.IMWRITE_JPEG_QUALITY), _DEAFAULT_JPG_QUALITY])
align_crop = cropper.align_crop_opencv
print('Use OpenCV')
except:
import skimage.io as io
imread = io.imread
imwrite = partial(io.imsave, quality=_DEAFAULT_JPG_QUALITY)
align_crop = cropper.align_crop_skimage
print('Importing OpenCv fails. Use scikit-image')
# ==============================================================================
# = run =
# ==============================================================================
# count landmarks
with open(args.landmark_file) as f:
line = f.readline()
n_landmark = len(re.split('[ ]+', line)[1:]) // 2
# read data
img_names = np.genfromtxt(args.landmark_file, dtype=np.str, usecols=0)
landmarks = np.genfromtxt(args.landmark_file, dtype=np.float, usecols=range(1, n_landmark * 2 + 1)).reshape(-1, n_landmark, 2)
standard_landmark = np.genfromtxt(args.standard_landmark_file, dtype=np.float).reshape(n_landmark, 2)
standard_landmark[:, 0] += args.move_w
standard_landmark[:, 1] += args.move_h
# data dir
save_dir = os.path.join(args.save_dir, 'align_size(%d,%d)_move(%.3f,%.3f)_face_factor(%.3f)_%s' % (args.crop_size_h, args.crop_size_w, args.move_h, args.move_w, args.face_factor, args.save_format))
data_dir = os.path.join(save_dir, 'data')
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
def work(i): # a single work
for _ in range(3): # try three times
try:
img = imread(os.path.join(args.img_dir, img_names[i]))
img_crop, tformed_landmarks = align_crop(img,
landmarks[i],
standard_landmark,
crop_size=(args.crop_size_h, args.crop_size_w),
face_factor=args.face_factor,
align_type=args.align_type,
order=args.order,
mode=args.mode)
name = os.path.splitext(img_names[i])[0] + '.' + args.save_format
path = os.path.join(data_dir, name)
if not os.path.isdir(os.path.split(path)[0]):
os.makedirs(os.path.split(path)[0])
imwrite(path, img_crop)
tformed_landmarks.shape = -1
name_landmark_str = ('%s' + ' %.1f' * n_landmark * 2) % ((name, ) + tuple(tformed_landmarks))
succeed = True
break
except:
succeed = False
if succeed:
return name_landmark_str
else:
print('%s fails!' % img_names[i])
if __name__ == '__main__':
pool = Pool(args.n_worker)
name_landmark_strs = list(tqdm.tqdm(pool.imap(work, range(len(img_names))), total=len(img_names)))
pool.close()
pool.join()
landmarks_path = os.path.join(save_dir, 'landmark.txt')
with open(landmarks_path, 'w') as f:
for name_landmark_str in name_landmark_strs:
if name_landmark_str:
f.write(name_landmark_str + '\n')
|
CHICAGO, March 25, 2015 – The American Council on Education (ACE) announced today that Loyola University Chicago Quinlan School of Business professor Joan M. Phillips, PhD, MBA, has been named an ACE Fellow for academic year 2015-16.
The ACE Fellows Program, established in 1965, is designed to strengthen institutions and leadership in American higher education by identifying and preparing emerging leaders for senior positions in college and university administration. Forty-seven Fellows, nominated by the senior administration of their institutions, were selected this year following a rigorous application process.
ACE President Molly Corbett Broad noted that the Fellows Program is celebrating its 50th anniversary this academic year, and that over those five decades nearly 2,000 higher education leaders have participated, with more than 300 Fellows having served as chief executive officers of colleges or universities and more than 1,300 having served as provosts, vice presidents and deans.
Phillips joined Loyola in 2008 after serving on the faculties at University of Notre Dame, University of Kentucky, and Michigan State University. She holds a BA from State University of New York at Albany, an MBA from The University of Virginia’s Darden School of Business, and a PhD from University of Illinois at Urbana-Champaign.
Her research explores consumer decision-making, brand strategy, and the impact of marketing on society, and she teaches at the undergraduate, MBA, and executive levels.
As an ACE Fellow, Phillips will go through a combination of retreats, interactive learning opportunities, campus visits, and placement at another higher education institution to condense years of on-the-job experience and skills development into one year.
While working closely with a college or university president and other senior officers at a host institution, Phillips will focus on an issue of concern to Loyola.
ACE Fellows also attend three retreats on higher education issues organized by ACE and visit campuses and other higher education-related organizations across the United States and abroad as they work to read extensively in the field and engage in interactive learning opportunities to increase their understanding of higher education challenges and opportunities.
Founded in 1918, ACE is the major coordinating body for all the nation’s higher education institutions, representing more than 1,600 college and university presidents, and more than 200 related associations, nationwide. It provides leadership on key higher education issues and influences public policy through advocacy. For more information, please visit www.acenet.edu or follow ACE on Twitter @ACEducation.
|
from dependencies.dependency import ClassSecurityInfo
from dependencies.dependency import schemata
from dependencies.dependency import HoldingReference
from dependencies import atapi
from dependencies.dependency import *
from dependencies.dependency import getToolByName
from lims import bikaMessageFactory as _
from lims.browser.widgets import DateTimeWidget, ReferenceWidget
from lims.config import PROJECTNAME
from lims.content.bikaschema import BikaSchema
schema = BikaSchema.copy() + Schema((
DateTimeField('DateIssued',
with_time = 1,
with_date = 1,
widget = DateTimeWidget(
label=_("Report Date"),
description=_("Validation report date"),
),
),
DateTimeField('DownFrom',
with_time = 1,
with_date = 1,
widget = DateTimeWidget(
label=_("From"),
description=_("Date from which the instrument is under validation"),
),
),
DateTimeField('DownTo',
with_time = 1,
with_date = 1,
widget = DateTimeWidget(
label=_("To"),
description=_("Date until the instrument will not be available"),
),
),
StringField('Validator',
widget = StringWidget(
label=_("Validator"),
description=_("The analyst responsible of the validation"),
)
),
TextField('Considerations',
default_content_type = 'text/plain',
allowed_content_types= ('text/plain', ),
default_output_type="text/plain",
widget = TextAreaWidget(
label=_("Considerations"),
description=_("Remarks to take into account before validation"),
),
),
TextField('WorkPerformed',
default_content_type = 'text/plain',
allowed_content_types= ('text/plain', ),
default_output_type="text/plain",
widget = TextAreaWidget(
label=_("Work Performed"),
description=_("Description of the actions made during the validation"),
),
),
ReferenceField('Worker',
vocabulary='getLabContacts',
allowed_types=('LabContact',),
relationship='LabContactInstrumentValidation',
widget=ReferenceWidget(
checkbox_bound=0,
label=_("Performed by"),
description=_("The person at the supplier who performed the task"),
size=30,
base_query={'inactive_state': 'active'},
showOn=True,
colModel=[{'columnName': 'UID', 'hidden': True},
{'columnName': 'JobTitle', 'width': '20', 'label': _('Job Title')},
{'columnName': 'Title', 'width': '80', 'label': _('Name')}
],
),
),
StringField('ReportID',
widget = StringWidget(
label=_("Report ID"),
description=_("Report identification number"),
)
),
TextField('Remarks',
default_content_type = 'text/plain',
allowed_content_types= ('text/plain', ),
default_output_type="text/plain",
widget = TextAreaWidget(
label=_("Remarks"),
),
),
))
schema['title'].widget.label = 'Asset Number'
class InstrumentValidation(BaseFolder):
security = ClassSecurityInfo()
schema = schema
displayContentsTab = False
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from lims.idserver import renameAfterCreation
renameAfterCreation(self)
def getLabContacts(self):
bsc = getToolByName(self, 'bika_setup_catalog')
# fallback - all Lab Contacts
pairs = []
for contact in bsc(portal_type='LabContact',
inactive_state='active',
sort_on='sortable_title'):
pairs.append((contact.UID, contact.Title))
return DisplayList(pairs)
atapi.registerType(InstrumentValidation, PROJECTNAME)
|
The Inmarsat FleetBroadband High-Usage airtime plans includes 4, 8, 20 or 40 Gigabytes of data allowance per month for high users. These plans offer affordable voice and data costs and worldwide connectivity.
3-Month Plan $0.60 per message.
12-Month Plan $0.50 per message.
**Shared Corporate Allowance Package ( SCAP) available at a Service Fee of $1,940.00 per Month. Call us for more information about this plan.
**Shared Corporate Allowance Package ( SCAP) available at a Service Fee of $2,500.00 per Month. Call us for more information about this plan.
There is a first-time $35.00 activation fee for new FleetBroadband postpaid accounts.
All high usage plans have an Allowance of 1 or 2 Sim Cards in same vessel.
In order to activate a new Fleet broadband airtime postpaid plan, we need a signed copy of the FleetBroadband airtime service agreement and the Vessel information form.
Inmarsat FleetBroadband service have the options below to add to your plan.
Static IP: $50.00 per month (billed 12 months in advance).
Dynamic IP: $40.00 per month (billed 12 months in advance).
Voice packages include voice to fixed and voice to cellular calls only. For all other call rates please refer to the plan. Minimum contract duration for a voice package is the same as the data plan it is activated on.
The voice plans are available in SCAP form. If applied to a SCAP, the voice package minutes can be used across the SCAP.
The FleetBroadband Streaming rates are for selectable Quality of Service Internet Protocol (IP) Streaming. When selected these services deliver priority IP routing to the edge of the Inmarsat core network. Internet applications such as Live Video, large secure file transfers and other data applications that require quick, and ready bandwidth.
The FleetBroadband High Usage airtime service may be used worldwide over the global Inmarsat-4 satellite network.
All monthly plans require a one-time $35.00 activation fee. The monthly fee and usage will be billed monthly.
The Allowance in the Allowance Plans is Data (IP) only. All other services are charged in addition. Once all of the data allowance has been used, Out of Allowance rates apply.
The Allowance Plans can support up to 2 SIMs as long as they are both activated on the same vessel.
For the 1GB Allowance Plans only - Where the number of vessels activated on the same Allowance Plan is 5 or higher for one fleet, the allowance for the vessels can be shared across the entire fleet. Please note the slightly higher per SIM monthly SCAP subscription.
If in a given month the number of vessels activated on the same large allowance plan falls below 5 vessels then the pooling of traffic will stop.
A 24 months’ minimum contract duration is required to activate the 20GB and 40GB plans. A 3 months’ minimum contract duration is required to activate the 4GB and 8GB plans.
The provisioning of the Dual SIM plans and SCAPs requires lnmarsat to issue an activation ID for each vessel. A minimum of 3 business days lead time should be allowed to ensure no delays.
Customer authorize NorthernAxcess to charge the credit card specified in the Inmarsat FleetBroadband Monthly Airtime agreement for monthly subscriptions costs, calls, SMS, Data & overages. By buying this product customer also agree to NorthernAxcess Terms and Conditions.
Click the button below to add the Inmarsat FleetBroadband High-Usage Rate Plan to your wish list.
|
#!/usr/bin/python
# Copyright 2013 Peter Goetz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from os import listdir, makedirs, getcwd, chdir, remove, path
import os.path
from sys import argv
from subprocess import call
from shutil import rmtree
TEST_CASES_DIR = "test-cases"
DIFF_VIEWER = argv[1] if len(argv) > 1 else "meld"
SCRIPT_NAME = "vcf-compare"
def main():
assert os.path.exists(TEST_CASES_DIR)
num_errors = 0
num_tests = 0
for test_case in listdir(TEST_CASES_DIR):
num_tests += 1
success = run(test_case)
if not success: num_errors += 1
print "\n" + str(num_tests) + " Tests completed. ERRORS:", num_errors
def run(test_case):
print ".",
test_run_data_dir = path.join("test-run-data", test_case)
ensure_dir_exists_and_is_empty(test_run_data_dir)
success = True
with InDirectory(test_run_data_dir):
with open("actual-output", "w") as actual_output_file, \
open("stderr", "w") as stderr_file:
rc = call([path.join(getcwd(), "..", "..", SCRIPT_NAME),
path.join("..", "..", TEST_CASES_DIR, test_case, "a.vcf"),
path.join("..", "..", TEST_CASES_DIR, test_case, "b.vcf")],
stdout= actual_output_file,
stderr=stderr_file)
if rc != 0:
print error(test_case, "script returned error. RC = " + str(rc))
success = False
else:
actual_output_filename = "actual-output"
expected_output_filename = path.join("..", "..", TEST_CASES_DIR, test_case, "expected-output")
with open(actual_output_filename) as actual_output_file, \
open(expected_output_filename) as expected_output_file:
if actual_output_file.read() != expected_output_file.read():
success = False
print error(test_case, "Files differ. Running diff\n")
call([DIFF_VIEWER, actual_output_filename, expected_output_filename])
print "\nEnd of Diff\n"
if success: rmtree(test_run_data_dir)
return success
def ensure_dir_exists_and_is_empty(path):
if os.path.exists(path): rmtree(path)
makedirs(path)
def error(test_case, message):
return "\nIn " + test_case + ": " + message
class InDirectory:
def __init__(self, new_path):
self.new_path = new_path
def __enter__(self):
self.saved_path = os.getcwd()
os.chdir(self.new_path)
def __exit__(self, etype, value, traceback):
os.chdir(self.saved_path)
if __name__ == "__main__":
main()
|
These amazing thinning tools use the same principle as Smart Tails and Smart Manes, but have a wider blade width of 5". They are brilliant for removing the thick winter undercoat. The Smart Coats thinner has been tried and tested on ponies with Cushings and Donkeys and Shetland ponies who tend to be heavily coated all year round. If the Smart Coats thinner is used continuously during the non shedding season it will thin the coat by cutting. Using them in the Spring time on all other large animals as well, will allow removal of the winter coat to be done very quickly as the head is so wide.
These tools are available in coarse or fine. Takes replacement blades and is interchangeable.
They do not give a smooth clipped finish but do leave the coat tidy.
|
"""
simple_physicaloptics
performs optics calculations using physical optics
inputs: reads the heights profiles from tmpHeights.dat
file produced by dabam.py with detrending, e.g.:
python3 dabam.py 4
output: some plots
"""
__author__ = "Manuel Sanchez del Rio"
__contact__ = "srio@esrf.eu"
__copyright = "ESRF, 2015"
import numpy
from matplotlib import pylab as plt
#lensF is in fact 2*F
def goFromTo(source, image, distance=1.0, lensF=None, slopeError=None, wavelength=1e-10):
distance = numpy.array(distance)
x1 = numpy.outer(source,numpy.ones(image.size))
x2 = numpy.outer(numpy.ones(source.size),image)
r = numpy.sqrt( numpy.power(x1-x2,2) + numpy.power(distance,2) )
# add lens at the image plane
if lensF != None:
r = r - numpy.power(x1-x2,2)/lensF
if slopeError != None:
r = r + 2 * slopeError
wavenumber = numpy.pi*2/wavelength
return numpy.exp(1.j * wavenumber * r)
def main():
#
# y axis is horizontal
# z axis is vertical
#
#
#define focal distances
#
p = 30.0
q = 10.0
theta_grazing = 3e-3
#
#compute mirror radius
#
R = 2 / numpy.sin(theta_grazing) / (1/p + 1/q)
F = 1 / (1/p + 1/q)
print("Mirror radius of curvature set to: %.3f m (p=%.3f m, q=%.3f m, theta=%.2f mrad))"%(R,p,q,theta_grazing*1e3))
print("Mirror focal length set to: %.3f m "%(F))
#
#load height profile
#
input_file = "tmpHeights.dat"
a = numpy.loadtxt(input_file)
hy0 = a[:,0]
hz0 = a[:,1]
#
#interpolate to increase the number of points ans statistics
#
do_interpolate = 0
if do_interpolate:
mirror_length = (hy0.max() - hy0.min())
npoints = 500 # hy0.size
hy = numpy.linspace(-0.5*mirror_length,0.5*mirror_length,npoints)
hz = numpy.interp(hy,hy0,hz0)
else:
hy = hy0
hz = hz0
#remove mean
hz -= hz.mean()
L = hy[-1]-hy[0]
print("Mirror data from file: %s :"%input_file)
print(" Mirror length is: %.3f m"%L)
print(" Mirror aperture is: %.3f um"%(1e6*L*numpy.sin(theta_grazing)))
N = hy.size
print(" Mirror contains %d points"%N)
# #
# #load slopes profile
# #
# input_file = "tmpSlopes.dat"
# a = numpy.loadtxt(input_file)
# hy = a[:,0]
# sz = a[:,1]
#
#compute slopes
#
sz = numpy.gradient(hz,(hy[1]-hy[0]))
slope_errors_rms = sz.std()
print(" Mirror slope error RMS is %.3f urad = %.3f arcsec"%(slope_errors_rms*1e6,slope_errors_rms*180/numpy.pi*3600))
#
#project on optical axis
#
hy_projected = hy * numpy.sin(theta_grazing)
# # dump to file
# outFile = "tmpImage.dat"
# if outFile != "":
# dd = numpy.concatenate( (bin_centers.reshape(-1,1), image_histogram.reshape(-1,1)),axis=1)
# dd[:,0] *= -1e6 # in microns, inverted to agree with shadow
# dd[:,1] /= dd[:,1].max()
# numpy.savetxt(outFile,dd)
sourcepoints = 1000
slitpoints = 1000
detpoints = 1000
wavelength = 1e-10
aperture_diameter = 2 * hy_projected.max()
airy_disk_theta = 1.22 * wavelength / aperture_diameter
detector_size = 50 * airy_disk_theta * q
fwhm_theory = 2 * 2.35 * slope_errors_rms * q
print("aperture _diameter = %f um "%(aperture_diameter*1e6))
print("detector_size = %f um"%(detector_size*1e6))
print("FWHM theory (2 sigma q) = %f um"%(fwhm_theory*1e6))
print("Airy disk is: %f urad = %f um"%(airy_disk_theta*1e6,airy_disk_theta*q*1e6))
if airy_disk_theta*q >= detector_size:
detector_size = 5 * airy_disk_theta * q
print("detector_size NEW = %f um"%(detector_size*1e6))
position1x = numpy.linspace(0,0,sourcepoints)
position2x = numpy.linspace(-aperture_diameter/2,aperture_diameter/2,slitpoints)
position3x = numpy.linspace(-detector_size/2,detector_size/2,detpoints)
sz_projected_interpolated = numpy.interp(position2x, hy, sz * numpy.sin(theta_grazing) )
# sz_projected_interpolated = None
# fields12 = goFromTo(position1x,position2x,q, wavelength=wavelength, lensF=2*F)
fields12 = goFromTo(position1x,position2x,p, lensF=2*F, slopeError=sz_projected_interpolated, wavelength=wavelength)
fields23 = goFromTo(position2x,position3x,q, lensF=None,wavelength=wavelength)
# from 1 to 3, matrix multiplication
fields13 = numpy.dot(fields12,fields23)
print ("Shape of fields12, fields23, fields13: ",fields12.shape,fields23.shape,fields13.shape)
#prepare results
fieldComplexAmplitude = numpy.dot(numpy.ones(sourcepoints),fields13)
print ("Shape of Complex U: ",fieldComplexAmplitude.shape)
print ("Shape of position1x: ",position1x.shape)
fieldIntensity = numpy.power(numpy.abs(fieldComplexAmplitude),2)
fieldPhase = numpy.arctan2(numpy.real(fieldComplexAmplitude), \
numpy.imag(fieldComplexAmplitude))
#
# write spec formatted file
#
out_file = "" # "simple_physicaloptics.spec"
if out_file != "":
f = open(out_file, 'w')
header="#F %s \n\n#S 1 fresnel-kirchhoff diffraction integral\n#N 3 \n#L X[m] intensity phase\n"%out_file
f.write(header)
for i in range(detpoints):
out = numpy.array((position2x[i], fieldIntensity[i], fieldPhase[i]))
f.write( ("%20.11e "*out.size+"\n") % tuple( out.tolist()) )
f.close()
print ("File written to disk: %s"%out_file)
#
# write two-column formatted file
#
fieldIntensity /= fieldIntensity.max()
tmpAbscissas = position3x * 1e6
outFile = "tmpPhysicalOptics.dat"
itmp = numpy.argmax(fieldIntensity)
tmpAbscissas = tmpAbscissas - tmpAbscissas[itmp]
if outFile != "":
dd=numpy.concatenate( (tmpAbscissas, fieldIntensity) ,axis=0).reshape(2,-1).transpose()
numpy.savetxt(outFile,dd)
print ("File "+outFile+" written to disk.\n")
#
#plots
#
do_plots = 0
if do_plots:
#
#plots
#
from matplotlib import pylab as plt
# plt.figure(1)
# plt.plot(hy*1e3,hz*1e9)
# plt.title("Profile used")
# plt.xlabel("X [mm]")
# plt.ylabel("Z [nm]")
plt.figure(2)
plt.plot(tmpAbscissas,fieldIntensity)
plt.title("Fresnel-Kirchhoff Diffraction")
plt.xlabel("X [um]")
plt.ylabel("Intensity [a.u.]")
plt.show()
#
# main program
#
if __name__ == '__main__':
main()
|
OVB Financial Literacy Leader Hope Roush shows students the value of paying more than the minimum payment when it comes to credit card debt.
OVB’s Jackie Hornsby and Jeff Smith share some of their experience when it comes to making financially sound decisions.
GALLIPOLIS — Financial education is essential when preparing students to become responsible young adults. Through their partnership with the Gallia Academy High School Afterschool Program (ASSET), OVB recently held an interactive budgeting event for participating students.
With the help of the Gallia 4-H Extension Office, the event, “Real Money Real World,” taught students the importance of budgeting and money management. “Real Money Real World” is a youth-focused financial literacy program from Ohio State University Extension. According to OSU, the program reaches ages 12-18, but is ideally suited for youth ages 13-16. Using an interactive format, “Real Money Real World” is a spending simulation that provides the opportunity to make lifestyle and budget choices similar to those made by adults.
Increase participants’ awareness of how education level and corresponding career choice influence personal income and financial security.
Increase participants’ knowledge of money management tools used in daily spending for cost-of-living decisions.
Increase participants’ awareness of how income and lifestyle choices affect the amount of money available for discretionary spending.
GAHS students achieved the program’s goals as they all finished the simulation with what organizers described as “great success and a better view of how education, income, and family situation can affect our lifestyles.” The different booths students were required to visit included housing, transportation, credit, student loans, food, clothing, entertainment, child care, communications, and financial advice. Students also visited a philanthropy booth that stressed the importance of giving back through means, such as volunteer work.
OVB Vice President, Corporate Communications Bryna Butler said she enjoyed volunteering at the event.
To schedule a financial education event at your school or group, contact Roush at hdroush@ovbc.com.
Information submitted by Ohio Valley Bank.
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'RuleSet.include_anonymous'
db.delete_column(u'request_profiler_ruleset', 'include_anonymous')
# Adding field 'RuleSet.user_filter_type'
db.add_column(u'request_profiler_ruleset', 'user_filter_type',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Adding field 'RuleSet.include_anonymous'
db.add_column(u'request_profiler_ruleset', 'include_anonymous',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Deleting field 'RuleSet.user_filter_type'
db.delete_column(u'request_profiler_ruleset', 'user_filter_type')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'request_profiler.profilingrecord': {
'Meta': {'object_name': 'ProfilingRecord'},
'duration': ('django.db.models.fields.FloatField', [], {}),
'end_ts': ('django.db.models.fields.DateTimeField', [], {}),
'http_method': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'http_user_agent': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_addr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'request_uri': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'response_status_code': ('django.db.models.fields.IntegerField', [], {}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'start_ts': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_func_name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'request_profiler.ruleset': {
'Meta': {'object_name': 'RuleSet'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uri_regex': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'user_filter_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user_group_filter': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'})
}
}
complete_apps = ['request_profiler']
|
Vip4exam.com provide our candidates with the latest exam materials updates for PMI. To ensure about the relevance and accuracy of our resources, the study materials are constantly revised and updated by our expert team. We guarantee the materials with quality and reliability which will help you pass any PMI certification exam.
Vip4exam.com provide you free demo of all PMI Products. You can have a try of our PMI training products before you make your decisions to buy. You can download them easily from our website You may find sample PMI questions and answers for downloads on all the PMI exams pages. And all the demos are for free. Even though, our actual PMI products are more useful and comprehensive than their demo versions, it is something we urge you to try first. Informed choices are always the best choices.
|
#!/usr/bin/env python3
import logging
from pathlib import Path
import os
import sys
if __name__ == "__main__" and __package__ is None:
logging.getLogger().error("You seem to be trying to execute " +
"this script directly which is discouraged. " +
"Try python -m instead.")
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parent_dir)
import keysign
#mod = __import__('keysign')
#sys.modules["keysign"] = mod
__package__ = str('keysign')
from .gpgmeh import export_uids, minimise_key
def escape_filename(fname):
escaped = ''.join(c if c.isalnum() else "_" for c in fname)
return escaped
def main():
fname = sys.argv[1]
keydata = open(fname, 'rb').read()
minimise = True
if minimise:
keydata = minimise_key(keydata)
for i, (uid, uid_bytes) in enumerate(export_uids(keydata), start=1):
uid_file = Path('.') / ("{:02d}-".format(i) + escape_filename(uid) + ".pgp.asc")
print (f"Writing {uid_file}...")
uid_file.write_bytes(uid_bytes)
print (f"Done!")
if __name__ == "__main__":
main()
|
Listed as one of The London Times’ 50 of the World’s Best Design Blogs, This Is Glamorous features an array of articles including design, fashion, travel & the daily search for beautiful things. I love looking at their Travel page – they post pictures of dreamy and exotic locations.
This was the blog that changed everything for me. It was the first ever lifestyle blog that I had ever followed and each time I’m at the homepage, I feel happy. This blog reminds me of why I love movies like Breakfast at Tiffany’s and My Fair Lady.
A makeup & fashion blog that also features strong women of music, fashion and business. And I suppose best part about this blog is the giveaways! (Happy dance….) I haven’t been following this blog for long but I’m looking forward to seeing more posts by Maisha (owner & editor).
What attracted me the most to this blog is it’s tagline. Coincidentally, when I found this blog / online magazine, I was reading a book of a similar title written by Patricia Volk. This site embodies all the qualities that I inspire my blog to be.
Now you might think that this blog is of a totally different context but actually its not. This is one of my most favorite blog. When I first got engaged (about 2 years ago – now already married), I asked myself, “What kind of wife do I wish to be?”. I’ve always known that I wanted to work from home, so I suppose that would also make me a housewife. But I refuse to believe that I would the only way to be a housewife is to wake up at the break of dawn, get breakfast ready, do chores all day and end up tired in bed dressed in nothing but t-shirt and sweats. This blog gave me hope, not only telling me but also showing me that it’s possible to work from home, be a successful housewife and also be glamorous.
So there you go, my inspirations for the birth of this blog. Go ahead and visit those blogs, take a look around and say ‘Hi!’. I hope they inspire you as much as they did for me. And if there are similar blogs that you would like to share, go ahead and post the links in the comment section.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pywikibot
from pywikibot import Category
from pywikibot import pagegenerators
import re
from klasa import *
from main import *
def addOdczas(title):
re_tabelkaAttr = re.compile(r'\|\s*?(z|)robienie\s*?=(.*?)(?=}}|\n)')
word = Haslo(title)
log = ''
if word.type == 3 and word.listLangs:
for sekcja in word.listLangs:
if sekcja.lang == 'polski':
sekcja.pola()
try: sekcja.odmiana
except AttributeError:
pass
else:
s_tabelkaAttr = re.search(re_tabelkaAttr, sekcja.odmiana.text)
if s_tabelkaAttr:
odczasownikowy = s_tabelkaAttr.group(2).strip()
if odczasownikowy:
enieaniestop = 0
czasownik = sekcja.title
nowe = Haslo(odczasownikowy)
if nowe.type == 0:
log += '*[[%s]] - redirect' % (odczasownikowy)
elif nowe.type == 1 and ' ' not in odczasownikowy:
nowaSekcja = LanguageSection(title=odczasownikowy, type=9, lang='język polski')
nowaSekcja.znaczeniaDetail.append(['\'\'rzeczownik, rodzaj nijaki\'\'', '\n: (1.1) {{rzecz}} {{odczas}} \'\'od\'\' [[%s]]' % czasownik])
if odczasownikowy[-4:] == 'enie' or odczasownikowy[-4:] == 'anie' or odczasownikowy[-3:] == 'cie':
pre = odczasownikowy[:-2]
nowaSekcja.odmiana.text = '\n: (1.1) {{odmiana-rzeczownik-polski\n|Mianownik lp = %sie\n|Dopełniacz lp = %sia\n|Celownik lp = %siu\n|Biernik lp = %sie\n|Narzędnik lp = %siem\n|Miejscownik lp = %siu\n|Wołacz lp = %sie\n}}' % (pre, pre, pre, pre, pre, pre, pre)
else:
enieaniestop = 1
if not enieaniestop:
nowaSekcja.antonimy.text = '\n: (1.1) [[nie%s]]' % odczasownikowy
nowaSekcja.saveChanges()
page = pywikibot.Page(site, odczasownikowy)
try: page.get()
except pywikibot.NoPage:
page.put(nowaSekcja.content, comment='dodanie hasła o rzeczowniku odczasownikowym na podstawie [[%s]]' % czasownik)
nieodczasownikowy = 'nie' + odczasownikowy
nowe = Haslo(nieodczasownikowy)
if nowe.type == 0:
log += '*[[%s]] - redirect' % (nieodczasownikowy)
elif nowe.type == 1 and ' ' not in nieodczasownikowy:
nowaSekcja = LanguageSection(title=nieodczasownikowy, type=9, lang='język polski')
nowaSekcja.znaczeniaDetail.append(['\'\'rzeczownik, rodzaj nijaki\'\'', '\n: (1.1) {{rzecz}} {{odczas}} \'\'od\'\' [[nie]] [[%s]]' % czasownik])
if not enieaniestop:
pre = nieodczasownikowy[:-3]
nowaSekcja.odmiana.text = '\n: (1.1) {{odmiana-rzeczownik-polski\n|Mianownik lp = %snie\n|Dopełniacz lp = %snia\n|Celownik lp = %sniu\n|Biernik lp = %snie\n|Narzędnik lp = %sniem\n|Miejscownik lp = %sniu\n|Wołacz lp = %snie\n}}' % (pre, pre, pre, pre, pre, pre, pre)
nowaSekcja.antonimy.text = '\n: (1.1) [[%s]]' % odczasownikowy
nowaSekcja.saveChanges()
page = pywikibot.Page(site, nieodczasownikowy)
try: page.get()
except pywikibot.NoPage:
page.put(nowaSekcja.content, comment='dodanie hasła o rzeczowniku odczasownikowym na podstawie [[%s]]' % czasownik)
def main():
global odmOlafa
odmOlafa = OdmianaOlafa()
global site
site = pywikibot.Site()
templatePage = pywikibot.Page(site, 'Szablon:ndk')
#lista = pagegenerators.ReferringPageGenerator(templatePage, True, True, True)
lista = ['poszukać']
for a in lista:
addOdczas(a)
if __name__ == '__main__':
try:
main()
finally:
pywikibot.stopme()
|
ST. PAUL, Minn. (AP) Minnesota Wild defenseman Ryan Suter has been the latest player on the team to come down with the mumps.
Suter's streak of 153 straight games played plus 18 in the playoffs ended Wednesday with his absence against Montreal, his first since joining the Wild in 2012.
Suter was held out of practice Thursday, and coach Mike Yeo confirmed afterward to reporters that the NHL's leader in ice time for three straight seasons has been diagnosed with the virus.
Yeo, however, declined to rule out Suter for the game Friday against Anaheim.
Suter is the fifth Wild player, all defensemen, believed to have been stricken by the mumps this season. The virus has caused absences ranging from two to eight games for Keith Ballard, Jonas Brodin, Christian Folin and Marco Scandella.
|
"""
Star Wars: Dark Forces
GOB Container Functions
"""
import string
import struct
GOB_HEADER_SIZE = 8
GOB_CATALOG_OFFSET_SIZE = 4
GOB_CATALOG_ENTRY_SIZE = 21
GOB_MAX_SIZE = 2 ** 31 - 1
class GOBException(Exception):
pass
def is_valid_entry_name(filename):
"""Return whether a name is valid as an entry name.
Checks a name against an assortment of DOS-like filename rules.
:param filename: The name to check
:return: bool
"""
allowed = string.ascii_letters + string.digits + "_^$~!#%&-{}@`'()"
reserved = ['CON', 'PRN', 'AUX', 'CLOCK$', 'NUL',
'COM0', 'COM1', 'COM2', 'COM3', 'COM4', 'COM5', 'COM6', 'COM7', 'COM8', 'COM9',
'LPT0', 'LPT1', 'LPT2', 'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8', 'LPT9',
'LST', 'KEYBD$', 'SCREEN$', '$IDLE$', 'CONFIG$']
# Cannot be empty or None.
if not filename:
return False
# Separator cannot appear more than once.
if filename.count('.') > 1:
return False
# Split into name and extension.
s = filename.partition('.')
name = s[0]
separator = s[1]
extension = s[2]
# Check name length.
name_len_ok = (0 < len(name) <= 8)
# Check name characters.
name_char_ok = all(c in allowed for c in name)
# Check name reservation.
name_reserved_ok = (name.upper() not in reserved)
# Default to valid extension checks.
ext_len_ok = True
ext_char_ok = True
# Check extension if a separator is present.
# Must have a valid extension if separator is present.
if separator:
# Check extension length.
ext_len_ok = (0 < len(extension) <= 3)
# Check extension characters.
ext_char_ok = all(c in allowed for c in name)
# Reserved names do not apply to extensions.
return ((name_len_ok and name_char_ok and name_reserved_ok) and (ext_len_ok and ext_char_ok))
def get_gob_size(entries):
"""Return a tuple of size information given a list of entries.
Projects the meta data size and raw data size of a GOB if it were created with the given list of entries.
:param entries: List of GOB entry tuples [(str, bytes), ..., ] where the tuple represents (name, data) of the entry
:return: A tuple containing meta data size and raw data size in number of bytes (meta_size, data_size)
"""
# Header + Catalog Offset + Catalog
meta_size = GOB_HEADER_SIZE + GOB_CATALOG_OFFSET_SIZE + (GOB_CATALOG_ENTRY_SIZE * len(entries))
# Raw Data
data_size = sum([len(entry[1]) for entry in entries])
return (meta_size, data_size)
def read(filename):
"""Reads a GOB container and returns all stored files.
:param filename: Path to the GOB to read
:return: List of GOB entry tuples [(str, bytes), ..., ] where the tuple represents (name, data) of the entry
"""
with open(filename, 'rb') as file:
entries = []
if file.read(4) != b'GOB\n':
return
catalog_offset = struct.unpack('<i', file.read(4))[0]
file.seek(catalog_offset)
num_entries = struct.unpack('<i', file.read(4))[0]
for i in range(num_entries):
data_offset = struct.unpack('<i', file.read(4))[0]
data_length = struct.unpack('<i', file.read(4))[0]
raw_name = file.read(13)
try:
name = raw_name[0 : raw_name.index(0)].decode('ascii')
except ValueError:
name = raw_name.decode('ascii')
print(f'catalog entry {i} has no null terminator in its filename "{name}"')
data = b''
if data_length > 0:
next_entry = file.tell()
file.seek(data_offset)
data = file.read(data_length)
file.seek(next_entry)
entries.append((name, data))
return entries
def write(filename, entries):
"""Writes a GOB container given a path and a list of GOB entries.
:param filename: Path to write the GOB to
:param entries: List of GOB entry tuples [(str, bytes), ..., ] where the tuple represents (name, data) of the entry
:return: None
"""
meta_size, data_size = get_gob_size(entries)
if (meta_size + data_size) > GOB_MAX_SIZE:
raise GOBException('Cannot create GOB because it would exceed maximum size.')
for entry in entries:
if not is_valid_entry_name(entry[0]):
raise GOBException('"' + entry[0] + '" is an invalid entry name.')
with open(filename, 'wb') as file:
file.write(b'GOB\n')
file.write(struct.pack('<i', GOB_HEADER_SIZE + data_size))
for entry in entries:
file.write(entry[1])
file.write(struct.pack('<i', len(entries)))
offset = GOB_HEADER_SIZE
for entry in entries:
file.write(struct.pack('<i', offset))
file.write(struct.pack('<i', len(entry[1])))
file.write(struct.pack('13s', entry[0].encode('ascii')))
offset += len(entry[1])
|
Congratulations! Jim Chim-in-ey is listed as one of the Top 3 Chimney sweeps in Bournemouth.
"We would like to Thank You for providing consistent high quality in your area of business. Our review team either approved or updated your business listing using our 50-Point Inspection which includes everything from checking reputation, history, complaints, ratings, satisfaction, nearness, trust and cost to the general excellence."
|
# -*- coding: utf-8 -*-
"""
pyap.source_US.data
~~~~~~~~~~~~~~~~~~~~
This module provides regular expression definitions required for
detecting Canada addresses.
The module is expected to always contain 'full_address' variable containing
all address parsing definitions.
:copyright: (c) 2015 by Vladimir Goncharov.
:license: MIT, see LICENSE for more details.
"""
import re
''' Numerals from one to nine
Note: here and below we use syntax like '[Oo][Nn][Ee]'
instead of '(one)(?i)' to match 'One' or 'oNe' because
Python Regexps don't seem to support turning On/Off
case modes for subcapturing groups.
'''
zero_to_nine = r"""(?:
[Zz][Ee][Rr][Oo]\ |[Oo][Nn][Ee]\ |[Tt][Ww][Oo]\ |
[Tt][Hh][Rr][Ee][Ee]\ |[Ff][Oo][Uu][Rr]\ |
[Ff][Ii][Vv][Ee]\ |[Ss][Ii][Xx]\ |
[Ss][Ee][Vv][Ee][Nn]\ |[Ee][Ii][Gg][Hh][Tt]\ |
[Nn][Ii][Nn][Ee]\ |[Tt][Ee][Nn]\ |
[Ee][Ll][Ee][Vv][Ee][Nn]\ |
[Tt][Ww][Ee][Ll][Vv][Ee]\ |
[Tt][Hh][Ii][Rr][Tt][Ee][Ee][Nn]\ |
[Ff][Oo][Uu][Rr][Tt][Ee][Ee][Nn]\ |
[Ff][Ii][Ff][Tt][Ee][Ee][Nn]\ |
[Ss][Ii][Xx][Tt][Ee][Ee][Nn]\ |
[Ss][Ee][Vv][Ee][Nn][Tt][Ee][Ee][Nn]\ |
[Ee][Ii][Gg][Hh][Tt][Ee][Ee][Nn]\ |
[Nn][Ii][Nn][Ee][Tt][Ee][Ee][Nn]\
)
"""
# Numerals - 10, 20, 30 ... 90
ten_to_ninety = r"""(?:
[Tt][Ee][Nn]\ |[Tt][Ww][Ee][Nn][Tt][Yy]\ |
[Tt][Hh][Ii][Rr][Tt][Yy]\ |
[Ff][Oo][Rr][Tt][Yy]\ |
[Ff][Oo][Uu][Rr][Tt][Yy]\ |
[Ff][Ii][Ff][Tt][Yy]\ |[Ss][Ii][Xx][Tt][Yy]\ |
[Ss][Ee][Vv][Ee][Nn][Tt][Yy]\ |
[Ee][Ii][Gg][Hh][Tt][Yy]\ |
[Nn][Ii][Nn][Ee][Tt][Yy]\
)"""
# One hundred
hundred = r"""(?:
[Hh][Uu][Nn][Dd][Rr][Ee][Dd]\
)"""
# One thousand
thousand = r"""(?:
[Tt][Hh][Oo][Uu][Ss][Aa][Nn][Dd]\
)"""
'''
Regexp for matching street number.
Street number can be written 2 ways:
1) Using letters - "One thousand twenty two"
2) Using numbers
a) - "1022"
b) - "85-1190"
"85 - 1190"
"85th - 1190"
c) - "85 1190"
'''
street_number = r"""(?<![\.0-9])(?P<street_number>
(?:
[Aa][Nn][Dd]\
|
{thousand}
|
{hundred}
|
{zero_to_nine}
|
{ten_to_ninety}
){from_to}
|
# 85th - 1190
(?:\d{from_to}(?:th)?
(?:\ ?\-?\ ?\d{from_to}(?:th)?)?\
)
|
# 45
(?:\d{from_to}(?=[\ ,]))
)
""".format(thousand=thousand,
hundred=hundred,
zero_to_nine=zero_to_nine,
ten_to_ninety=ten_to_ninety,
from_to='{1,5}')
'''
Regexp for matching street name.
In example below:
"Hoover Boulevard": "Hoover" is a street name
'''
street_name = r"""(?P<street_name>
\w[\w0-9\'\-\ \.]{0,30}?
)
"""
post_direction = r"""
(?P<post_direction>
(?:
# English
[Nn][Oo][Rr][Tt][Hh]{d}|
[Ss][Oo][Uu][Tt][Hh]{d}|
[Ee][Aa][Ss][Tt]{d}|
[Ww][Ee][Ss][Tt]{d}|
[Nn][Oo][Rr][Tt][Hh][Ee][Aa][Ss][Tt]{d}|
[Nn][Oo][Rr][Tt][Hh][Ww][Ee][Ss][Tt]{d}|
[Ss][Oo][Uu][Tt][Hh][Ee][Aa][Ss][Tt]{d}|
[Ss][Oo][Uu][Tt][Hh][Ww][Ee][Ss][Tt]{d}|
# French
[Ee][Ss][Tt]{d}|
[Nn][Oo][Rr][Dd]{d}|
[Nn][Oo][Rr][Dd]\-[Ee][Ss][Tt]{d}|
[Nn][Oo][Rr][Dd]\-[Oo][Uu][Ee][Ss][Tt]{d}|
[Ss][Uu][Dd]{d}|
[Ss][Uu][Dd]\-[Ee][Ss][Tt]{d}|
[Ss][Uu][Dd]\-[Oo][Uu][Ee][Ss][Tt]{d}|
[Oo][Uu][Ee][Ss][Tt]{d}
)
|
(?:
# English
NW{d}|NE{d}|SW{d}|SE{d}|
# French (missing above)
NO{d}|SO{d}
)
|
(?:
# English
N[\.\ ]|S[\.\ ]|E[\.\ ]|W[\.\ ]|
# French (missing above)
O[\.\ ]
)
)
""".format(d='[\ ,]')
# Regexp for matching street type
# According to
# https://www.canadapost.ca/tools/pg/manual/PGaddress-e.asp#1385939
street_type = r"""
(?P<street_type>
[Aa][Bb][Bb][Ee][Yy]{div}|
[Aa][Cc][Rr][Ee][Ss]{div}|
[Aa][Ll][Ll][Éé][Ee]{div}|
[Aa][Ll][Ll][Ee][Yy]{div}|
[Aa][Uu][Tt][Oo][Rr][Oo][Uu][Tt][Ee]{div}|[Aa][Uu][Tt]{div}|
[Aa][Vv][Ee][Nn][Uu][Ee]{div}|[Aa][Vv][Ee]?{div}|
[Bb][Aa][Yy]{div}|
[Bb][Ee][Aa][Cc][Hh]{div}|
[Bb][Ee][Nn][Dd]{div}|
[Bb][Oo][Uu][Ll][Ee][Vv][Aa][Er][Dd]{div}|[Bb][Ll][Vv][Dd]{div}|[Bb][Oo][Uu][Ll]{div}|
# Broadway
[Bb][Rr][Oo][Aa][Dd][Ww][Aa][Yy]{div}|
[Bb][Yy]\-?[Pp][Aa][Ss][Ss]{div}|
[Bb][Yy][Ww][Aa][Yy]{div}|
[Cc][Aa][Mm][Pp][Uu][Ss]{div}|
[Cc][Aa][Pp][Ee]{div}|
[Cc][Aa][Rr][Rr][EéÉ]{div}|[Cc][Aa][Rr]{div}|
[Cc][Aa][Rr][Rr][Ee][Ff][Oo][Uu][Rr]{div}|[Cc][Aa][Rr][Re][Ee][Ff]{div}|
[Cc][Ee][Nn][Tt][Rr][Ee]{div}|[Cc][Tt][Rr]{div}|
[Cc][Ee][Rr][Cc][Ll][Ee]{div}|
[Cc][Hh][Aa][Ss][Ee]{div}|
[Cc][Hh][Ee][Mm][Ii][Nn]{div}|[Cc][Hh]{div}|
[Cc][Ii][Rr][Cc][Ll][Ee]{div}|[Cc][Ii][Rr]{div}|
[Cc][Ii][Rr][Cc][Uu][Ii][Tt]{div}|[Cc][Ii][Rr][Cc][Tt]{div}|
[Cc][Ll][Oo][Ss][Ee]{div}|
[Cc][Oo][Mm][Mm][Oo][Nn]{div}|
[Cc][Oo][Nn][Cc][Ee][Ss][Ss][Ii][Oo][Nn]{div}|[Cc][Oo][Nn][Cc]{div}|
[Cc][Oo][Rr][Nn][Ee][Rr][Ss]{div}|
[Cc][Ôô][Tt][Ee]{div}|
[Cc][Oo][Uu][Rr][Ss]{div}|
[Cc][Oo][Uu][Rr]{div}|
[Cc][Oo][Uu][Rr][Tt]{div}|[Cc][Rr][Tt]{div}|
[Cc][Oo][Vv][Ee]{div}|
[Cc][Rr][Ee][Ss][Cc][Ee][Nn][Tt]{div}|[Cc][Rr][Ee][Ss]{div}|
[Cc][Rr][Oo][Ii][Ss][Ss][Aa][Nn][Tt]{div}|[Cc][Rr][Oo][Ii][Ss]{div}|
[Cc][Rr][Oo][Ss][Ss][Ii][Nn][Gg]{div}|[Cc][Rr][Oo][Ss][Ss]{div}|
[Cc][Uu][Ll]\-[Dd][Ee]\-[Ss][Aa][Cc]{div}|[Cc][Dd][Ss]{div}|
[Dd][Aa][Ll][Ee]{div}|
[Dd][Ee][Ll][Ll]{div}|
[Dd][Ii][Vv][Ee][Rr][Ss][Ii][Oo][Nn]{div}|[Dd][Ii][Vv][Ee][Rr][Ss]{div}|
[Dd][Oo][Ww][Nn][Ss]{div}|
[Dd][Rr][Ii][Vv][Ee]{div}|[Dd][Rr]{div}|
[Ée][Cc][Hh][Aa][Nn][Gg][Ee][Uu][Rr]{div}|[Ée][Cc][Hh]{div}|
[Ee][Nn][Dd]{div}|
[Ee][Ss][Pp][Ll][Aa][Nn][Aa][Dd][Ee]{div}|[Ee][Ss][Pp][Ll]{div}|
[Ee][Ss][Tt][Aa][Tt][Ee][Ss]?{div}|
[Ee][Xx][Pp][Rr][Ee][Ss][Ss][Ww][Aa][Yy]{div}|[Ee][Xx][Pp][Yy]{div}|
[Ee][Xx][Tt][Ee][Nn][Ss][Ii][Oo][Nn]{div}|[Ee][Xx][Tt][Ee][Nn]{div}|
[Ff][Aa][Rr][Mm]{div}|
[Ff][Ii][Ee][Ll][Dd]{div}|
[Ff][Oo][Rr][Ee][Ss][Tt]{div}|
[Ff][Rr][Ee][Ee][Ww][Aa][Yy]{div}|[Ff][Ww][Yy]{div}|
[Ff][Rr][Oo][Nn][Tt]{div}|
[Gg][Aa][Rr][Dd][Ee][Nn][Ss]{div}|[Gg][Dd][Nn][Ss]{div}|
[Gg][Aa][Tt][Ee]{div}|
[Gg][Ll][Aa][Dd][Ee]{div}|
[Gg][Ll][Ee][Nn]{div}|
[Gg][Rr][Ee][Ee][Nn]{div}|
[Gg][Rr][Uo][Uu][Nn][Dd][Ss]{div}|[Gg][Rr][Nn][Dd][Ss]{div}|
[Gg][Rr][Oo][Vv][Ee]{div}|
[Hh][Aa][Rr][Bb][Oo][Uu][Rr]{div}|[Hh][Aa][Rr][Bb][Rr]{div}|
[Hh][Ee][Aa][Tt][Hh]{div}|
[Hh][Ee][Ii][Gg][Hh][Tt][Ss]{div}|[Hh][Tt][Ss]{div}|
[Hh][Ii][Gg][Hh][Ll][Aa][Nn][Dd][Ss]{div}|[Hh][Gg][Hh][Ll][Dd][Sd]{div}|
[Hh][Ii][Gg][Gh][Ww][Aa][Yy]{div}|[Hh][Ww][Yy]{div}|
[Hh][Ii][Ll][Ll]{div}|
[Hh][Oo][Ll][Ll][Oo][Ww]{div}|
[Îi][Ll][Ee]{div}|
[Ii][Mm][Pp][Aa][Ss][Ss][Ee]{div}|I[Mm][Pp]{div}|
[Ii][Nn][Ll][Ee][Tt]{div}|
[Ii][Ss][Ll][Aa][Nn][Dd]{div}|
[Kk][Ee][Yy]{div}|
[Kk][Nn][Oo][Ll][Ll]{div}|
[Ll][Aa][Nn][Dd][Ii][Nn][Gg]{div}|[Ll][Aa][Nn][Dd][Nn][Gg]{div}|
[Ll][Aa][Nn][Ee]{div}|
[Ll][Ii][Mm][Ii][Tt][Ss]{div}|[Ll][Mm][Tt][Ss]{div}|
[Ll][Ii][Nn][Ee]{div}|
[Ll][Ii][Nn][Kk]{div}|
[Ll][Oo][Oo][Kk][Oo][Uu][Tt]{div}|[Ll][Kk][Oo][Uu][Tt]{div}|
[Mm][Aa][Ii][Nn][Ww][Aa][Yy]{div}|
[Mm][Aa][Ll][Ll]{div}|
[Mm][Aa][Nn][Oo][Rr]{div}|
[Mm][Aa][Zz][Ee]{div}|
[Mm][Ee][Aa][Dd][Oo][Ww]{div}|
[Mm][Ee][Ww][Ss]{div}|
[Mm][Oo][Nn][Tt][Éé][Ee]{div}|
[Mm][Oo][Oo][Rr]{div}|
[Mm][Oo][Uu][Nn][Tt][Aa][Ii][Nn]{div}|[Mm][Tt][Nn]{div}|
[Mm][Oo][Uu][Nn][Tt]{div}|
[Oo][Rr][Cc][Hh][Aa][Rr][Dd]{div}|[Oo][Rr][Cc][Hh]{div}|
[Pp][Aa][Rr][Aa][Dd][Ee]{div}|
[Pp][Aa][Rr][Cc]{div}|
[Pp][Aa][Rr][Kk][Ww][Aa][Yy]{div}|[Pp][Kk][Yy]{div}|
[Pp][Aa][Rr][Kk]{div}|[Pp][Kk]{div}|
[Pp][Aa][Ss][Ss][Aa][Gg][Ee]{div}|[Pp][As][Ss][Ss]{div}|
[Pp][Aa][Tt][Hh]{div}|
[Pp][Aa][Tt][Hh][Ww][Aa][Yy]{div}|[Pp][Tt][Ww][Aa][Yy]{div}|
[Pp][Ii][Nn][Ee][Ss]{div}|
[Pp][Ll][Aa][Cc][Ee]{div}|[Pp][Ll]{div}|
[Pp][Ll][Aa][Tt][Ee][Aa][Uu]{div}|[Pp][Ll][Aa][Tt]{div}|
[Pp][Ll][Aa][Zz][Aa]{div}|
[Pp][Oo][Ii][Nn][Tt][Ee]{div}|
[Pp][Oo][Ii][Nn][Tt]{div}|[Pp][Tt]{div}|
[Pp][Oo][Rr][Tt]{div}|
[Pp][Rr][Ii][Vv][Aa][Tt][Ee]{div}|[Pp][Vv][Tt]{div}|
[Pp][Rr][Oo][Mm][Ee][Nn][Aa][Dd][Ee]{div}|[Pp][Rr][Oo][Mm]{div}|
[Qq][Uu][Aa][Ii]{div}|
[Qq][Uu][Aa][Yy]{div}|
[Rr][Aa][Mm][Pp]{div}|
[Rr][Aa][Nn][Gg][Ee]{div}|[Rr][Gg]{div}|
[Rr][Aa][Nn][Gg]{div}|
[Rr][Ii][Dd][Gg][Ee]{div}|
[Rr][Ii][Ss][Ee]{div}|
[Rr][Oo][Aa][Dd]{div}|[Rr][Dd]{div}|
[Rr][Oo][Nn][Dd]\-[Pp][Oo][Ii][Nn][Tt]{div}|[Rr][Dd][Pp][Tt]{div}|
[Rr][Oo][Uu][Tt][Ee]{div}|[Rr][Tt][Ee]{div}|
[Rr][Oo][Ww]{div}|
[Rr][Uu][Ee][Ll][Ll][Ee]{div}|[Rr][Ll][Ee]{div}|
[Rr][Uu][Ee]{div}|
[Rr][Uu][Nn]{div}|
[Ss][Ee][Nn][Tt][Ii][Ee][Rr]{div}|[Ss][Ee][Nn][Tt]{div}|
# Street
[Ss][Tt][Rr][Ee][Ee][Tt]{div}|[Ss][Tt](?![A-Za-z]){div}|
# Square
[Ss][Qq][Uu][Aa][Rr][Ee]{div}|[Ss][Qq]{div}|
[Ss][Uu][Bb][Dd][Ii][Vv][Ii][Ss][Ii][Oo][Nn]{div}|[Ss][Uu][Bb][Dd][Ii][Vv]{div}|
[Tt][Ee][Rr][Rr][Aa][Cc][Ee]{div}|[Tt][Ee][Re][Re]{div}|
[Tt][Ee][Rr][Rr][Aa][Ss][Ss][Ee]{div}|[Tt][Ss][Ss][Es]{div}|
[Tt][Hh][Ii][Cc][Kk][Ee][Tt]{div}|[Tt][Hh][Ii][Cc][Kk]{div}|
[Tt][Oo][Ww][Ee][Rr][Ss]{div}|
[Tt][Oo][Ww][Nn][Ll][Ii][Nn][Ee]{div}|[Tt][Ll][Ii][Nn][Ee]{div}|
[Tt][Rr][Aa][Ii][Ll]{div}|
[Tt][Uu][Rr][Nn][Aa][Bb][Oo][Uu][Tt]{div}|[Tt][Rr][Nn][Aa][Bb][Tt]{div}|
[Vv][Aa][Ll][Ee]{div}|
[Vv][Ii][Aa]{div}|
[Vv][Ii][Ee][Ww]{div}|
[Vv][Ii][Ll][Ll][Aa][Gg][Ee]{div}|[Vv][Ii][Ll][Ll][Gg][Ee]{div}|
[Vv][Ii][Ll][Ll][Aa][Ss]{div}|
[Vv][Ii][Ss][Tt][Aa]{div}|
[Vv][Oo][Ii][Ee]{div}|
[Ww][Aa][Ll][Lk]{div}|
[Ww][Aa][Yy]{div}|
[Ww][Hh][Aa][Rr][Ff]{div}|
[Ww][Oo][Oo][Dd]{div}|
[Ww][Yy][Nn][Dd]{div}
)
(?P<route_id>
[\(\ \,]{route_symbols}
[Rr][Oo][Uu][Tt][Ee]\ [A-Za-z0-9]+[\)\ \,]{route_symbols}
)?
""".format(div="[\.\ ,]{0,2}", route_symbols='{0,3}')
floor = r"""
(?P<floor>
(?:
\d+[A-Za-z]{0,2}\.?\ [Ff][Ll][Oo][Oo][Rr]\
)
|
(?:
[Ff][Ll][Oo][Oo][Rr]\ \d+[A-Za-z]{0,2}\
)
)
"""
building = r"""
(?:
(?:
(?:[Bb][Uu][Ii][Ll][Dd][Ii][Nn][Gg])
|
(?:[Bb][Ll][Dd][Gg])
)
\ \d{0,2}[A-Za-z]?
)
"""
occupancy = r"""
(?:
(?:
(?:
#
# English
#
# Suite
[Ss][Uu][Ii][Tt][Ee]\ |[Ss][Tt][Ee]\.?\
|
# Apartment
[Aa][Pp][Tt]\.?\ |[Aa][Pp][Aa][Rr][Tt][Mm][Ee][Nn][Tt]\
|
# Room
[Rr][Oo][Oo][Mm]\ |[Rr][Mm]\.?\
|
# Unit
[Uu][Nn][Ii][Tt]\
|
#
# French
#
# Apartement
[Aa][Pp][Aa][Rr][Tt][Ee][Mm][Ee][Nn][Tt]\ |A[Pp][Pp]\
|
# Bureau
[Bb][Uu][Rr][Ee][Aa][Uu]\
|
# Unité
[Uu][Nn][Ii][Tt][Éé]\
)
(?:
[A-Za-z\#\&\-\d]{1,7}
)?
)
|
(?:
\#[0-9]{,3}[A-Za-z]{1}
)
)\ ?
"""
po_box = r"""
(?P<postal_box>
# English - PO Box 123
(?:[Pp]\.?\ ?[Oo]\.?\ [Bb][Oo][Xx]\ \d+)
|
# French - B.P. 123
(?:[Bb]\.?\ [Pp]\.?\ \d+)
|
# C.P. 123
(?:[Cc]\.?\ [Pp]\.?\ \d+)
|
# Case postale 123
(?:[Cc]ase\ [Pp][Oo][Ss][Tt][Aa][Ll][Ee]\ \d+)
|
# C.P. 123
(?:[Cc]\.[Pp]\.\ \d+)
)
"""
'''Define detection rules for a second type of address format
(the French one)
'''
street_number_b = re.sub('<([a-z\_]+)>', r'<\1_b>', street_number)
street_name_b = re.sub('<([a-z\_]+)>', r'<\1_b>', street_name)
street_type_b = re.sub('<([a-z\_]+)>', r'<\1_b>', street_type)
po_box_b = re.sub('<([a-z\_]+)>', r'<\1_b>', po_box)
post_direction_b = re.sub('<([a-z\_]+)>', r'<\1_b>', post_direction)
po_box_positive_lookahead = r"""
(?=
# English - PO Box 123
(?:[Pp]\.?\ ?[Oo]\.?\ [Bb][Oo][Xx]\ \d+)
|
# French - B.P. 123
(?:[Bb]\.?\ [Pp]\.?\ \d+)
|
# C.P. 123
(?:[Cc]\.?\ [Pp]\.?\ \d+)
|
# Case postale 123
(?:[Cc]ase\ [Pp][Oo][Ss][Tt][Aa][Ll][Ee]\ \d+)
|
# C.P. 123
(?:[Cc]\.[Pp]\.\ \d+)
|
(?:[\ \,])
)
"""
full_street = r"""
(?:
# Format commonly used in French
(?P<full_street_b>
{street_number_b}{div}
{street_type_b}{div}
({street_name_b} {po_box_positive_lookahead})?\,?\ ?
{post_direction_b}?\,?\ ?
{po_box_b}?\,?\ ?
)
|
# Format commonly used in English
(?P<full_street>
{street_number}\,?\ ?
{street_name}?\,?\ ?
(?:(?<=[\ \,]){street_type})\,?\ ?
{post_direction}?\,?\ ?
{floor}?\,?\ ?
(?P<building_id>
{building}
)?\,?\ ?
(?P<occupancy>
{occupancy}
)?\,?\ ?
{po_box}?
)
)""".format(street_number=street_number,
street_number_b=street_number_b,
street_name=street_name,
street_name_b=street_name_b,
street_type=street_type,
street_type_b=street_type_b,
post_direction=post_direction,
post_direction_b=post_direction_b,
floor=floor,
building=building,
occupancy=occupancy,
po_box=po_box,
po_box_b=po_box_b,
po_box_positive_lookahead=po_box_positive_lookahead,
div='[\ ,]{1,2}',
)
# region1 here is actually a "province"
region1 = r"""
(?P<region1>
(?:
# province abbreviations (English)
A\.?B\.?|B\.?C\.?|M\.?B\.?|N\.?B\.?|N\.?L\.?|
N\.?T\.?|N\.?S\.?|N\.?U\.?|O\.?N\.?|P\.?E\.?|
Q\.?C\.?|S\.?K\.?|Y\.?T\.?
)
|
(?:
# provinces full (English)
[Aa][Ll][Bb][Ee][Rr][Tt][Aa]|
[Bb][Rr][Ii][Tt][Ii][Ss][Hh]\ [Cc][Oo][Ll][Uu][Mm][Bb][Ii][Aa]|
[Mm][Aa][Nn][Ii][Tt][Oo][Bb][Aa]|
[Nn][Ee][Ww]\ [Bb][Rr][Uu][Nn][Ss][Ww][Ii][Cc][Kk]|
[Nn][Ee][Ww][Ff][Oo][Uu][Nn][Dd][Ll][Aa][Nn][Dd]\
[Aa][Nn][Dd]\ [Ll][Aa][Bb][Rr][Aa][Dd][Oo][Rr]|
[Nn][Ee][Ww][Ff][Oo][Uu][Nn][Dd][Ll][Aa][Nn][Dd]\
\&\ [Ll][Aa][Bb][Rr][Aa][Dd][Oo][Rr]|
[Nn][Oo][Rr][Tt][Hh][Ww][Ee][Ss][Tt]\
[Tt][Ee][Rr][Rr][Ii][Tt][Oo][Rr][Ii][Ee][Ss]|
[Nn][Oo][Vv][Aa]\ [Ss][Cc][Oo][Tt][Ii][Aa]|
[Nn][Uu][Nn][Aa][Vv][Uu][Tt]|
[Oo][Nn][Tt][Aa][Rr][Ii][Oo]|
[Pp][Rr][Ii][Nn][Cc][Ee]\ [Ee][Dd][Ww][Aa][Rr][Dd]\
[Ii][Ss][Ll][Aa][Nn][Dd]|
[Qq][Uu][Ee][Bb][Ee][Cc]|
[Ss][Aa][Ss][Kk][Aa][Tt][Cc][Hh][Ee][Ww][Aa][Nn]|
[Yy][Uu][Kk][Oo][Nn]|
# provinces full (French)
[Cc][Oo][Ll][Oo][Mm][Bb][Ii][Ee]\-
[Bb][Rr][Ii][Tt][Aa][Nn]{1,2}[Ii][Qq][Eu][Ee]|
[Nn][Oo][Uu][Vv][Ee][Aa][Uu]\-[Bb][Rr][Uu][Nn][Ss][Ww][Ii][Cc][Kk]|
[Tt][Ee][Rr][Rr][Ee]\-[Nn][Ee][Uu][Vv][Ee]\-
[Ee][Tt]\-[Ll][Aa][Bb][Rr][Aa][Dd][Oo][Rr]|
[Tt][Ee][Rr][Rr][Ii][Tt][Oo][Ii][Rr][Ee][Ss]\ [Dd][Uu]\
[Nn][Oo][Rr][Dd]\-[Oo][Uu][Ee][Ss][Tt]|
[Nn][Oo][Uu][Vv][Ee][Ll][Ll][Ee]\-[ÉéEe][Cc][Oo][Ss][Ss][Ee]|
[ÎîIi][Ll][Ee]\-[Dd][Uu]\-[Pp][Rr][Ii][Nn][Cc][Ee]\-
[ÉéEe][Dd][Oo][Uu][Aa][Rr][Dd]|
[Qq][Uu][Éé][Bb][Ee][Cc]
)
)
"""
city = r"""
(?P<city>
(?<=[\, ])[A-z]{1}(?![0-9]) # city second char should not be number
[\w\ \-\'\.]{2,20}?(?=[\, ])
)
"""
postal_code = r"""
(?P<postal_code>
(?:
[ABCEGHJKLMNPRSTVXY]\d[ABCEGHJKLMNPRSTVWXYZ]\ ?
\d[ABCEGHJKLMNPRSTVWXYZ]\d
)
)
"""
country = r"""
(?:
[Cc][Aa][Nn][Aa][Dd][Aa]
)
"""
# define detection rules for postal code placed in different parts of address
postal_code_b = re.sub('<([a-z\_]+)>', r'<\1_b>', postal_code)
postal_code_c = re.sub('<([a-z\_]+)>', r'<\1_c>', postal_code)
full_address = r"""
(?P<full_address>
{full_street} {div}
{city} {div}
(?:{postal_code_c} {div})?
\(?{region1}[\)\.]? {div}
(?:
(?:
{postal_code}? {div} {country}?
(?:{div} {postal_code_b})?
)
)
)
""".format(
full_street=full_street,
div='[\, ]{,2}',
city=city,
region1=region1,
country=country,
country_b=country,
postal_code=postal_code,
postal_code_b=postal_code_b,
postal_code_c=postal_code_c,
)
|
Mens peach Ralph Lauren Polo. Size medium.
My Posh Picks · Brand new Polo Ralph Lauren striped men's sweater NWT large men's Polo Ralph Lauren sweater.
My Posh Picks · Lauren Ralph Lauren Paisley Long Sleeve Blouse This beautiful blouse is in very good condition.
My Posh Picks · Men's Polo Ralph Lauren Long Sleeve Gray Tee Brand new with tags. Size small.
|
# pylint: disable=E0611
""" Module for printout out linux device neighbors
"""
from netshow.linux.netjson_encoder import NetEncoder
from netshow.linux import print_iface
import netshowlib.linux.cache as linux_cache
from collections import OrderedDict
import json
from tabulate import tabulate
from netshow.linux.common import _, legend_wrapped_cli_output
class ShowNeighbors(object):
"""
Class responsible for printing out basic linux device neighbor info
"""
def __init__(self, cl):
self.use_json = cl.get('--json') or cl.get('-j')
self.ifacelist = OrderedDict()
self.cache = linux_cache
self.print_iface = print_iface
self.show_legend = False
if cl.get('-l') or cl.get('--legend'):
self.show_legend = True
def run(self):
"""
:return: basic neighbor information based on data obtained on netshow-lib
"""
feature_cache = self.cache.Cache()
feature_cache.run()
for _ifacename in sorted(feature_cache.lldp.keys()):
self.ifacelist[_ifacename] = self.print_iface.iface(_ifacename, feature_cache)
if self.use_json:
return json.dumps(self.ifacelist,
cls=NetEncoder, indent=4)
return self.print_neighbor_info()
def print_neighbor_info(self):
"""
:return: cli output of netshow neighbor
"""
_header = [_('local'), _('speed'), _('mode'), '',
_('remote'), _('sw/hostname'), _('summary')]
_table = []
for _iface in self.ifacelist.values():
_table.append([_iface.name, _iface.speed,
_iface.port_category,
'====',
_iface.iface.lldp[0].get('adj_port'),
_iface.iface.lldp[0].get('adj_hostname'),
', '.join(_iface.summary)])
del _iface.iface.lldp[0]
if _iface.iface.lldp:
for _entry in _iface.iface.lldp:
_table.append(['', '', '', '====',
_entry.get('adj_port'),
_entry.get('adj_hostname')])
return legend_wrapped_cli_output(tabulate(_table, _header), self.show_legend)
|
Great! Myres 3 Piece Conversation Set with Cushions exellent design By Beachcrest Home. Myres 3 Piece Conversation Set with Cushions very well made, sleek and simple. Complete your living room furniture with a modern Myres 3 Piece Conversation Set with Cushions. Its charming sturdy, attractivce and it looks expensive and a good value for the money. Myres 3 Piece Conversation Set with Cushions is one of the most homy, cozy, beautiful look and exotic Myres 3 Piece Conversation Set with Cushions especially for the price and made of superior products. Great quality, easy to assemble, delivery on time and in best condition. Myres 3 Piece Conversation Set with Cushions is good merchandise at fair prices and amazing free shipping. Guarantee damaged claim by offering to send parts or to keep the item at a discounted price. Great buy would definitely recommend. Shop with our low-price guarantee and find great deals on ##ptitle# and more!. Reading the reviews helped you purchase.
Probably the most useful appliance of the home may be the feces. Bar stools are helpful wherever it's needed it may be in the kitchen area or other events at home. Stools could be run by hand that is simple in working. These bar stools could be adjusted by hand to the degree of the comfort zone. There are various kinds of club feces which consists of wood and metals, for this certain kinds of seat there might not have a flexible tool which may hot lower the height from the seat but rather designed in a well sited method. Barstools adds additional kitchen and dining chairs with stylish look. It keeps visitors lingering of the modern home. Finding bar seat with suitable design for house is the tough 1. You need to find the right style with right price and quality of the barstools to be bought. We have to find the ideal peak of the bar stools that matches our height, and need to find the main difference between the club feces and counter feces. Finally, you have to find stools in the right price, right quality with guarantee with needed sturdiness which must satisfy us for that spending.
This is definitely the cutest and ingeniously created loveseat. Contingent in the shade, it gives a disconcerting appearance for your workplace or rooms. The sofa unimaginably imperceptible for a early morning espresso or perhaps an animated journal studying carefully. It's obtainable in leather as well as in texture depending on what you support a brilliant look, or perhaps a beautiful, beautiful look at. The best part, it's very reasonable! The extremely stylish tufting is molded precious stone, which retains an ideal Chesterfield design. Probably the most lovable point about this couch is its form. Combined with the appeal, the rear style gives your back understanding of rest. It is cleverly designed so that smaller rooms consolidate luxury in a smaller sized space. It is a clever decision for the kids space, because it offers several sweet shades. They would like to strike them for reading the story or for a fantastic dream. No trouble for adults too for any comfortable night with Manga or Wonders.
When it comes to buying living room furniture, leather is always a smart option. It doesn't only look good with most designs, but its very durable (its the ideal materials for any household with kids or pets) and its extremely-simple to clean, too. The downside of leather Edwin Loveseat? It can have a much higher cost than fabric, micro-fiber or fake leather furniture. This reclining loveseat stays a budget-friendly choice due to 1 guru trick: its seating area is padded with leather, as the sides are upholstered with more inexpensive fake leather-based. Which means you receive the look and feel of the complete leather loveseat without the hefty price tag. Make use of the lever around the loveseats equip to relax and take it easy on its high-denseness foam filling up, and consider the money you saved. This specific loveseat posseses an additional reward: professional assembly is available in many areas of the nation for the next fee.
Just like its namesake, the camelback sofa has a difficulty or maybe two that highlights the primary outline of this sofa design. This is a traditional style which was made popular within the 1700s by Chippendale and households who wanted aristocratic, official furniture often chosen this style. Today, this sofa style gives a far more formal air to the room, especially when padded inside a formal fabric. A far more casual fabric option will make it suitable for a family room that doesnt need a great deal of formality. Either way, the camelback couch has some elements that characterize the design and style. The thighs are typically exposed, the sofa has no back again soft cushions also it usually has sq . or rolled hands.
This set includes a one left arm sofa established, two armless sofa models and one corner couch set. This provides sufficient space to accommodate your friends and relations. The fabric is 100% rayon for sufficient durability and comfort. The good thing about this sofa set is the mixing and matching of seats within the available space in the room for any ideal shape. As well as enables fitted even in little rooms. It requires just light assembling.It also functions drive cushions for maximum comfort. You might want to do this established. It works well.
This RTA loveseat offers different types of furniture with respect to the material. That significantly incorporates linen and velvet. Sophisticated shades, Marzipan, Stoneware-Light tan and Rye-Dark brown enhance design for the couch. Choose the shading and the material that will depend on the stylistic design of your room. Not only the covering, the material can also mirror the main difference in fashion. Make sure that the material you select matches the theme of the space. This loveseat ballots in favor of combining the present style with an exemplary design. It's been increased through the hands that begin to feed the couch. With this particular blend, you only witness enhanced comfort and stylish design when lighting the pads.
This reclining loveseat takes the 2nd spot because its an excellent affordable, super comfortable choice that may participate in most home decorations designs. Its wood frame is padded with coils (for many springtime) and high-denseness foam (for many soft), and padded with brownish faux leather which makes it an ultra-flexible furniture piece. Add it to the man cave for a comfy destination to watch the game (and do not worry about any spillsits vinyl furniture tends to make cleanup easy) or perhaps your living room with a few decorative throw pillows for that ideal spot to cozy up following a long-day. Real-life clients love this reclining loveseat because its comfortable, durable, simple to put together 1 rater even noted that she could assemble two units in under forty-five minutes! and shipping in the vendor is a breeze.
Usually now used in houses with a great room or open up layout, they are usually large configurations that chair plenty of visitors. In smaller areas, they works well for seating within an area that has a strange part or any other room restriction. The opportunity to combine corner units, finish models and lying areas according to room and individual choice makes this couch style extremely versatile. Sofa sofas also come in a wide variety of designs, from ultra modern or extremely luxe, to much more family-friendly modern designs.
This is the next couch on the checklist. It's additional comfy and incredibly well suited for small rooms or attic residing. It is upholstered in polyester material, potential customers inset control keys that provide an elegant gemstone-tufted design. It is made from durable supplies and the thighs are constructed with long lasting wooden to increase its durability. The loveseat comes with an espresso stained wooden thighs and non-tagging foot hats. It offers an appropriate froth padding and polyester material furniture which makes it very magnificent. It features a longue place that gives an exceptional room for relaxing.
This reclining loveseat takes the second place simply because its a super affordable, extremely comfortable choice that can fit in with most home dcor styles. Its solid wood body is padded with coils (for many springtime) and high-density foam (for some gentle), and upholstered with dark brown faux leather-based that makes it an ultra-flexible piece of furniture. Add it to the man cavern for a cozy destination to view the game (and do not worry about any spillsits vinyl fabric upholstery makes clean-up simple) or perhaps your living room with a couple of decorative toss cushions for the ideal place to cozy up following a long day. Actual-existence clients adore this lying loveseat because its comfortable, long lasting, easy to assemble 1 rater even mentioned that they could assemble two models in under 45 minutes! and delivery in the merchant is easy.
With the recent surge in interest in the Mid-Century Contemporary design genre, this couch design is a hot item. Regardless of whether accurate classic pieces, forgeries or new designs that incorporate Middle-Hundred years Modern elements, these are really versatile couches. Most often utilized in a minimalist or middle-hundred years design plan, they are wonderful for including a retro really feel to some space. The differentiating components range from the exposed thighs and straight line framework. Most mid-hundred years sofas but not every one of them may have some tufting around the back.
Copyright © Myres 3 Piece Conversation Set with Cushions By Beachcrest Home in Outdoor Sofas All right reserved.
|
# Create your views here.
from django.shortcuts import get_object_or_404, render_to_response, get_list_or_404
from django.template import RequestContext
from django.views.generic import TemplateView
from myauth.urls import *
"""
Workaround for migrating direct_to_template function to DirectTemplateView
"""
from django.views.generic import TemplateView
"""
Views for django-signup.
"""
from django.conf import settings
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template.loader import render_to_string
from django.core.mail import send_mail
from django.contrib.auth.models import User
from myauth.models import SignUpProfile, MyUser
from myauth.forms import SignUpForm, ActivateForm
import datetime
from django.contrib.sites.models import get_current_site
def index(request, template_name="myauth/index.html"):
page_title = 'Accounts page'
return render_to_response(template_name, locals(),context_instance=RequestContext(request))
def login(request, template_name="myauth/login.html"):
page_title = 'Login page'
return render_to_response(template_name, locals(),context_instance=RequestContext(request))
def logout(request, template_name="myauth/logout.html"):
page_title = 'Logout page'
return render_to_response(template_name, locals(),context_instance=RequestContext(request))
class profile(TemplateView):
template_name = "myauth/profile.html"
class DirectTemplateView(TemplateView):
extra_context = None
def get_context_data(self, **kwargs):
context = super(self.__class__, self).get_context_data(**kwargs)
if self.extra_context is not None:
for key, value in self.extra_context.items():
if callable(value):
context[key] = value()
else:
context[key] = value
return context
def _send_activation_email(profile):
# Render activation email
message = render_to_string('myauth/activation_email.txt',
{'signup_key': profile.signup_key,
'expiration_days': settings.SIGNUP_EXPIRY_DAYS,
'site': get_current_site})
subject = render_to_string('myauth/activation_email_subject.txt')
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
# Send activation email
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [profile.email,],
fail_silently=False)
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
instance = form.save()
# Generate and send activation email
_send_activation_email(instance)
return HttpResponseRedirect('/accounts/signup/checkyouremail')
else:
form = SignUpForm()
return render_to_response('myauth/signup_form.html', {'form': form},
context_instance=RequestContext(request))
def activate(request, signup_key):
# Try and get a sign up profile that matches activation key
# Redirect to failure page if no match
try:
profile = SignUpProfile.objects.get(signup_key=signup_key)
except:
return HttpResponseRedirect('/accounts/signup/key_invalid')
# Check if profile has expired
if profile.expiry_date > datetime.datetime.now():
#related with USE_TZ in settings.py!
if request.method == 'POST':
form = ActivateForm(request.POST)
if form.is_valid():
# Create a new User instance
user = MyUser(email=profile.email)
user.set_password(form.cleaned_data['password1'])
user.save()
# Delete the sign up profile
profile.delete()
return HttpResponseRedirect('/accounts/signup/success')
else:
form = ActivateForm()
else:
# Delete expired sign up profile and show invalid key page
profile.delete()
return HttpResponseRedirect('/accounts/signup/key_invalid')
return render_to_response('myauth/activate_form.html', {'form': form, 'user': profile.email},
context_instance=RequestContext(request))
|
College of Agricultural and Environmental Sciences (CA&ES) offers an extensive network of advising for undergraduate students, working to assist them in getting the most out of their academic experiences. Students receive general and comprehensive advising along their paths to graduation provided by faculty, staff, and peer advisors.
Business Hours: 8 a.m.–12 p.m, 1 p.m.–5 p.m.
Drop-in Hours: 9 a.m.-12 p.m, 1:30 p.m.-4 p.m.
Advising Hours: 8am–11:30 a.m, 1 p.m.–4:30 p.m.
Students may schedule an appointment with a major advisors using the online Advising Appointment System or by visiting each department's respective websites. Drop-in appointments may also be available, as well as peer advisors.
|
import logging
import rv.api
import sunvox
class SunSynth(object):
def __init__(self, filename, slot):
"""
:type slot: sunvox.Slot
"""
logging.debug('filename=%r', filename)
self.filename = filename
synth = rv.api.read_sunvox_file(filename)
self.project = rv.api.Project()
self.project.attach_module(synth.module)
self.module = synth.module
synth.module >> self.project.output
self.slot = slot
slot.load(self.project)
def process_midi(self, message):
if message.type == 'note_on' and message.velocity > 0:
note = sunvox.NOTECMD(message.note + 1)
logging.debug('Note on: %r', note)
logging.debug('Velocity: %r', message.velocity)
self.slot.send_event(
track_num=1,
note=note,
vel=message.velocity,
module=self.module,
ctl=0,
ctl_val=0,
)
elif message.type == 'note_off' or \
(message.type == 'note_on' and message.velocity == 0):
note = sunvox.NOTECMD(message.note)
self.slot.send_event(
track_num=1,
note=sunvox.NOTECMD.NOTE_OFF,
vel=0,
module=self.module,
ctl=0,
ctl_val=0,
)
|
Kelly's Bulk Peanut Brittle. Australian made. Roasted peanuts in delicious caramelised toffee. BIG Bulk box of 3 Kg.
|
# In England the currency is made up of pound, £, and pence, p, and there are eight coins in general circulation:
#
# 1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).
#
# It is possible to make £2 in the following way:
#
# 1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
#
# How many different ways can £2 be made using any number of coins?
#
# ans = 73682
#########################################################
import time
#########################################################
num_coins = 8
NO_COIN = 100
coin_values = [1, 2, 5, 10, 20, 50, 100, 200] # values are in pence[p]
target_value = 200#200
#########################################################
def recursive_solve(coins, coin_id = NO_COIN):
num_options = 0
if coin_id != NO_COIN:
coins[coin_id] += 1
sum_coins = sum([a*b for a,b in zip(coins, coin_values)])
if sum_coins == target_value:
return 1
elif sum_coins > target_value:
return 0
if coin_id == NO_COIN:
starting_coin = 0
else:
starting_coin = coin_id
for coin_id in range(starting_coin, num_coins):
num_options += recursive_solve(coins[:], coin_id)
return num_options
#########################################################
def euler_problem_31():
print "Problem 31:"
ans = recursive_solve([0,0,0,0,0,0,0,0])
print "ans = ", ans
#########################################################
start_time = time.time()
euler_problem_31()
end_time = time.time()
print "total calculation time is ", (end_time - start_time), " [Sec]"
|
Absolutely Spotless, Inc. has served the families residing in Dayton NJ for more than twenty years. Our belief is to offer the highest quality mold remediation, mold removal, mold testing, mold inspection, air duct cleaning, dryer vent cleaning, chimney cleaning, window cleaning, gutter cleaning, floor waxing, power washing, carpet cleaning, pressure washing, chimney sweep Dayton NJ. You will feel secure knowing that we are fully licensed and insured, are Mold Removal certified, and set up to provide immediate service of your carpet, window, dryer vent power washing, chimney sweep, floor waxing, pressure washing, air duct, gutter, home house cleaning issues. Call us today!
Here at Absolutely Spotless, Inc. we believe that the client should have the ability to contract with one company that can handle all of your carpet, window, dryer vent power washing, chimney sweep, floor waxing, pressure washing, air duct, gutter, house home cleaning tasks for them. The days of calling numerous carpet, window, dryer vent power washing, chimney sweep, floor waxing, home, house, pressure washing, gutter, air duct cleaning contractors is over. All of the service technicians are trained and certified in the service that you hire us for. As a result you will be benefiting from the best value and work quality in the industry. Our customer service is also second to none. We answer the phones when you call and in the rare event that you find that it is necessary to leave a voice mail for our staff, know that someone will call you back promptly! So remember that Absolutely Spotless, Inc. will always be there for you when you need carpet, window, dryer vent power washing, air duct, chimney sweep, floor waxing, pressure washing, gutter, home house cleaning.
Call Absolutely Spotless for a FREE estimate!
|
#
# Copyright (c) 2015 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
class BasePageObject(object):
def __init__(self, context, timeout=10):
self.context = context
self.timeout = timeout
def _find_element_containing_text(self, text, element_type='*'):
return self._find_element_by_xpath("//%s[contains(.,'%s')]" % (element_type, text))
def _find_element_by_css_locator(self, locator, timeout=None, dom_context=None):
locator_tuple = (By.CSS_SELECTOR, locator)
self._wait_until(EC.visibility_of_element_located(locator_tuple), timeout or self.timeout)
context = dom_context or self.context.browser
return context.find_element_by_css_selector(locator)
def _find_element_by_id(self,id):
return self.context.browser.find_element_by_id(id)
def _find_elements_by_css_locator(self, locator, timeout=None):
locator_tuple = (By.CSS_SELECTOR, locator)
self._wait_until(EC.visibility_of_element_located(locator_tuple), timeout or self.timeout)
return self.context.browser.find_elements_by_css_selector(locator)
def _find_elements_by_xpath(self, xpath, timeout=None):
locator_tuple = (By.XPATH, xpath)
self._wait_until(EC.visibility_of_element_located(locator_tuple), timeout or self.timeout)
return self.context.browser.find_elements_by_xpath(xpath)
def _find_element_by_xpath(self, xpath, timeout=None, dom_context=None):
locator_tuple = (By.XPATH, xpath)
self._wait_until(EC.visibility_of_element_located(locator_tuple), timeout or self.timeout)
context = dom_context or self.context.browser
return self.context.browser.find_element_by_xpath(xpath)
def _wait_element_to_be_removed(self, locator, timeout=None):
locator_tuple = (By.CSS_SELECTOR, locator)
self._wait_until(EC.invisibility_of_element_located(locator_tuple), timeout or self.timeout)
def _wait_element_to_be_removed_by_xpath(self, xpath, timeout=None):
locator_tuple = (By.XPATH, xpath)
self._wait_until(EC.invisibility_of_element_located(locator_tuple), timeout or self.timeout)
def _wait_until(self, condition_function, timeout=None):
wait = WebDriverWait(self.context.browser, timeout or self.timeout)
wait.until(condition_function)
def wait_until_element_is_visible_by_css_locator(self, locator):
wait = WebDriverWait(self.context.browser, 60)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, locator)))
return self.context.browser.find_element_by_selector(locator)
def wait_until_elements_are_visible_by_css_locator(self, locator):
wait = WebDriverWait(self.context.browser, 60)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, locator)))
def wait_until_element_is_visible_by_locator(self, locator):
wait = WebDriverWait(self.context.browser, 60)
by, locator = locator
wait.until(EC.visibility_of_element_located((by, locator)))
return self.context.browser.find_element(by, locator)
def wait_until_element_is_invisible_by_locator(self, locator):
wait = WebDriverWait(self.context.browser, 60)
by, locator = locator
wait.until(EC.invisibility_of_element_located((by, locator)))
|
White metal band Immortal Guardian signed with M-Theory Audio in April 2018. The band’s debut full-length album was due that summer. The band featured Gabriel Guardian and had issued several demos, including Revolution Part I and Super Metal, since formation in 2008. The band called its music ‘Super Metal.’ Immortal Guardian’s Age Of Revolution was out through M-Theory Audio. The artwork for the album was created by the group's vocalist Carlos Zema (ex-Outworld).
|
#!/usr/bin/env python
#
# Copyright (C) 2011, 2015 Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import numpy
import sherpa.all
# Override Sherpa's feature that binds model identifiers as local objects
import sherpa.ui
import sherpa.ui.utils
sherpa.ui.utils._assign_obj_to_main = lambda name, obj: None
import sherpa.astro.all
import sherpa.astro.ui as sherpaUI
import logging
logger = logging.getLogger(__name__)
info = logger.info
from sherpa_samp.utils import encode_string, decode_string
__all__ = ("SherpaSession", "check_for_nans")
def check_for_nans(ui):
session = ui.session
stat = session.get_stat()
for ii in session.list_data_ids():
x, = session.get_indep(ii, filter=False)
y = session.get_dep(ii, filter=False)
err = session.get_staterror(ii, filter=False)
mask = numpy.isnan(x)
mask |= numpy.isnan(y)
# No need to filter NaNs in flux error column
# unless statistic actually uses errors
# Least-squares, cash, c-stat do *not* use errors
# chi-squared *does* use errors.
if not isinstance(stat, (sherpa.stats.LeastSq, sherpa.stats.CStat, sherpa.stats.Cash)):
mask |= numpy.isnan(err)
# When using chi-squared, also automatically filter out
# data points where the error bar is zero. The fit will
# proceed with usable errors.
mask |= err == 0
session.set_filter(ii, mask, ignore=True)
#
## Sherpa Session Object
#
class SherpaSession(object):
def __init__(self, msg_id=None, mtype=None):
session = sherpaUI.utils.Session()
session._add_model_types(sherpa.models.basic)
session._add_model_types(sherpa.models.template)
session._add_model_types(sherpa.astro.models)
session._add_model_types(sherpa.instrument,
baselist=(sherpa.models.Model,))
session._add_model_types(sherpa.astro.instrument)
session._add_model_types(sherpa.astro.optical)
#session._add_model_types(sherpa.astro.xspec,
# (sherpa.astro.xspec.XSAdditiveModel,
# sherpa.astro.xspec.XSMultiplicativeModel))
self.session = session
self.msg_id = msg_id
self.mtype = mtype
# max_rstat of 3 is unhelpful in SED fitting.
self.session.set_conf_opt("max_rstat", 1.e+38)
# compute 90% confidence limits by default
self.session.set_conf_opt("sigma", 1.6448536269514722)
def set_data(self, datamaps):
if not numpy.iterable(datamaps):
raise TypeError("datamaps is not iterable")
#keys = ["x", "y", "staterror", "syserror", "weights"]
keys = ["x", "y", "staterror", "syserror"]
for ii, data in enumerate(datamaps):
for key in keys:
if data.has_key(key):
data[key] = decode_string(data[key])
info('decoding' + key)
self.session.set_data(ii, sherpa.data.Data1D(**data))
d = self.session.get_data(ii)
numpy.set_printoptions(precision=4, threshold=6)
info("DataSet %i x: " % ii + numpy.array2string(d.x))
info("DataSet %i y: " % ii + numpy.array2string(d.y))
info("DataSet %i staterror: " % ii + numpy.array2string(d.staterror))
def set_model(self, modelmaps):
for ii, model in enumerate(modelmaps):
if model["name"].strip() == '':
raise TypeError("Model expression not found")
self.session.set_model(ii, model["name"])
info("Model: " + str(ii) + str(self.session.get_source(ii)))
def set_parameters(self, modelmaps, usermodels):
# If entries in usermodels dictionary, interpret them here
for model_info in usermodels:
# Check that model name is a string and can be split
if (type(model_info["name"]) == type("str")):
if (len(model_info["name"].split('.')) == 2):
model_type = None
model_name = None
try:
# The "name" is actually type.name
# eg. tablemodel.c1 so split it
model_type=model_info["name"].split('.')[0]
model_name=model_info["name"].split('.')[1]
if (model_type == "tablemodel"):
self.session.load_table_model(model_name,
model_info["file"].strip())
if (model_type == "template"):
# Template model fits can be done with continuous optimization methods in Sherpa >4.6
self.session.load_template_model(model_name,
model_info["file"].strip())
#self.session.set_method("gridsearch")
#tempmdl = self.session.get_model_component(model_name)
#self.session.set_method_opt("sequence", tempmdl.parvals)
if (model_type == "usermodel"):
# user_model_ref set by code in user model
# Python file
execfile(model_info["file"].strip())
# Get reference to user model function
func_ref = model_info["function"].strip()
func_ref = eval(func_ref)
self.session.load_user_model(func_ref,
model_name)
# Now, look in modelmaps for instance of user model
# That has a dictionary of parameters, so create
# user model parameters from entries in that dictionary
for ii, model in enumerate(modelmaps):
for component in model["parts"]:
if (model_info["name"] == component["name"]):
parnames = []
parvals = []
parmins = []
parmaxs = []
parfrozen = []
for pardict in component["pars"]:
parnames = parnames + [pardict["name"].split(".")[1].strip()]
parvals = parvals + [float(pardict["val"])]
parmins = parmins + [float(pardict["min"])]
parmaxs = parmaxs + [float(pardict["max"])]
parfrozen = parfrozen + [bool(int(pardict["frozen"]))]
self.session.add_user_pars(model_name,
parnames,
parvals,
parmins,
parmaxs,
None,
parfrozen)
break
# end of block to interpret user models
except Exception, e:
try:
if (model_name != None):
self.session.delete_model_component(model_name)
except:
raise e
# end of block to interpret custom models
# Now, update parameter values, and create *or* update
# model components, for all components listed in modelmaps.
# (If model was already created above, the code below merely
# updates parameter values.)
for ii, model in enumerate(modelmaps):
for component in model["parts"]:
if component["name"].strip() == '':
raise TypeError("Model expression not found")
mdl = self.session._eval_model_expression(component["name"])
for pardict in component["pars"]:
if pardict["name"].strip() == '':
raise TypeError("Model component name missing")
par = self.session.get_par(pardict["name"])
parname = pardict.pop("name").split(".")[1].strip()
# Specview sends parameter attributes as strings,
# convert to floats here.
#for attrname in ['val', 'min', 'max']:
for attrname in ['val']:
if pardict.has_key(attrname):
pardict[attrname] = float(pardict[attrname])
#pardict.pop('min', None)
#pardict.pop('max', None)
pardict.pop('alwaysfrozen', None)
attrname = 'frozen'
if pardict.has_key(attrname):
pardict[attrname] = bool(int(pardict[attrname]))
par.set(**pardict)
info('setting ' + parname + ' with ' + str(pardict))
info(str(mdl))
def set_stat(self, statmap):
self.session.set_stat(statmap["name"])
# FIXME: A kludge when Specview passes all zeros for staterror
# for NED SEDs.
# check for zeros in uncertainties when using leastsq
if statmap["name"] == "leastsq":
for ii in self.session.list_data_ids():
data = self.session.get_data(ii)
if(data.staterror is not None and
(True in (data.staterror <= 0.0))):
#data.staterror = numpy.ones_like(data.staterror)
data.staterror = numpy.ones_like(data.y)
info(statmap["name"] + ": " + self.session.get_stat_name())
def set_method(self, methodmap):
self.session.set_method(methodmap["name"])
info(methodmap["name"] + ": ")
configdict = methodmap.get("config", None)
if configdict is not None:
info(methodmap["name"] + ": " + str(methodmap["config"]))
for key in configdict:
if str(configdict[key]).startswith('INDEF'):
configdict[key] = None
self.session.set_method_opt(key, configdict[key])
info(str(self.session.get_method_opt()))
def set_confidence(self, confidencemap):
methodname = confidencemap["name"].strip().lower()
method_opt = getattr(self.session, 'set_%s_opt' % methodname)
info(confidencemap["name"] + ": ")
configdict = confidencemap.get("config", None)
if configdict is not None:
info(confidencemap["name"] + ": " + str(confidencemap["config"]))
for key in configdict:
if str(configdict[key]).startswith('INDEF'):
configdict[key] = None
val = None
try:
val = float(configdict[key])
except:
raise Exception("Sigma must be a valid floating-point value")
if numpy.isnan(val):
raise Exception("Sigma must be a valid floating-point value")
method_opt(key, val)
method_opt = getattr(self.session, 'get_%s_opt' % methodname)
info(str(method_opt()))
def get_confidence(self, confidencemap):
methodname = confidencemap["name"].strip().lower()
method = getattr(self.session, 'get_%s' % methodname)
return method()
def get_flux(self, fluxtype):
flux_func = getattr(self.session, 'calc_%s_flux' % fluxtype)
return flux_func
def run_confidence(self, confidencemap):
methodname = confidencemap["name"].strip().lower()
method = getattr(self.session, methodname)
method()
def get_confidence_results(self, confidencemap, confidence_results=None):
if confidence_results is None:
methodname = confidencemap["name"].strip().lower()
method_result = getattr(self.session, 'get_%s_results' % methodname)
confidence_results = method_result()
results = {}
results["sigma"] = repr(float(confidence_results.sigma))
results["percent"] = repr(float(confidence_results.percent))
results["parnames"] = list(confidence_results.parnames)
results["parvals"] = encode_string(confidence_results.parvals)
results["parmins"] = encode_string(confidence_results.parmins)
results["parmaxes"] = encode_string(confidence_results.parmaxes)
return results
def get_fit_results(self, fit_results=None):
if fit_results is None:
fit_results = self.session.get_fit_results()
results = {}
results["succeeded"] = str(int(bool(fit_results.succeeded)))
results["parvals"] = encode_string(fit_results.parvals)
results["parnames"] = list(fit_results.parnames)
results["statval"] = repr(float(fit_results.statval))
results["numpoints"] = str(int(fit_results.numpoints))
results["dof"] = repr(float(fit_results.dof))
results["qval"] = 'nan'
if fit_results.qval is not None:
results["qval"] = repr(float(fit_results.qval))
results["rstat"] = 'nan'
if fit_results.rstat is not None:
results["rstat"] = repr(float(fit_results.rstat))
results["nfev"] = str(int(fit_results.nfev))
return results
|
SoletAer combines solar heat with a built-in heat pump and provides hot water that is good for both your economy and the environment.
SoletAer combines solar heat with a built-in heat pump and provides hot water that is good for both your economy and the environment. SoletAer is mounted on the outside wall which makes for an easy installation. It is connected to your existing water heater or to our SoletAer 260 L water heater.
Heating water for showers, dishwashing and laundry in an average single-family home consumes approximately 5000 kWh / year. This represents approximately 20% of the total energy use in a home.
We shower and take baths more frequently these days and many people dream about having their own spa. Trend shows that domestic hot water is an increasing part of the total energy consumption in households.
Our product SoletAer is a newly developed solar heat pump. SoletAer lowers the cost of domestic hot water by two-thirds. SoletAer uses solar energy to heat the water which also benefits the Environment.
INNENCO reduce energy use in every kind of buildings with potential to save up to 85 % of its specific energy use. INNENCO provides low thermal network adapted to viable city solutions that increase the building value and indoor comfort.
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_script
version_added: "2.5"
author: Andrew Welsh
short_description: Add/Edit/Delete and execute scripts
description: Create/edit/delete scripts and execute the scripts on the FortiManager using jsonrpc API
options:
adom:
description:
- The administrative domain (admon) the configuration belongs to
required: true
vdom:
description:
- The virtual domain (vdom) the configuration belongs to
host:
description:
- The FortiManager's Address.
required: true
username:
description:
- The username to log into the FortiManager
required: true
password:
description:
- The password associated with the username account.
required: false
state:
description:
- The desired state of the specified object.
- present - will create a script.
- execute - execute the scipt.
- delete - delete the script.
required: false
default: present
choices: ["present", "execute", "delete"]
script_name:
description:
- The name of the script.
required: True
script_type:
description:
- The type of script (CLI or TCL).
required: false
script_target:
description:
- The target of the script to be run.
required: false
script_description:
description:
- The description of the script.
required: false
script_content:
description:
- The script content that will be executed.
required: false
script_scope:
description:
- (datasource) The devices that the script will run on, can have both device member and device group member.
required: false
script_package:
description:
- (datasource) Policy package object to run the script against
required: false
'''
EXAMPLES = '''
- name: CREATE SCRIPT
fmgr_script:
host: "{{inventory_hostname}}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
script_name: "TestScript"
script_type: "cli"
script_target: "remote_device"
script_description: "Create by Ansible"
script_content: "get system status"
- name: EXECUTE SCRIPT
fmgr_script:
host: "{{inventory_hostname}}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
script_name: "TestScript"
state: "execute"
script_scope: "FGT1,FGT2"
- name: DELETE SCRIPT
fmgr_script:
host: "{{inventory_hostname}}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
script_name: "TestScript"
state: "delete"
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: string
"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.network.fortimanager.fortimanager import AnsibleFortiManager
# check for pyFMG lib
try:
from pyFMG.fortimgr import FortiManager
HAS_PYFMGR = True
except ImportError:
HAS_PYFMGR = False
def set_script(fmg, script_name, script_type, script_content, script_desc, script_target, adom):
"""
This method sets a script.
"""
datagram = {
'content': script_content,
'desc': script_desc,
'name': script_name,
'target': script_target,
'type': script_type,
}
url = '/dvmdb/adom/{adom}/script/'.format(adom=adom)
response = fmg.set(url, datagram)
return response
def delete_script(fmg, script_name, adom):
"""
This method deletes a script.
"""
datagram = {
'name': script_name,
}
url = '/dvmdb/adom/{adom}/script/{script_name}'.format(adom=adom, script_name=script_name)
response = fmg.delete(url, datagram)
return response
def execute_script(fmg, script_name, scope, package, adom, vdom):
"""
This method will execute a specific script.
"""
scope_list = list()
scope = scope.replace(' ', '')
scope = scope.split(',')
for dev_name in scope:
scope_list.append({'name': dev_name, 'vdom': vdom})
datagram = {
'adom': adom,
'script': script_name,
'package': package,
'scope': scope_list,
}
url = '/dvmdb/adom/{adom}/script/execute'.format(adom=adom)
response = fmg.execute(url, datagram)
return response
def main():
argument_spec = dict(
adom=dict(required=False, type="str"),
vdom=dict(required=False, type="str"),
host=dict(required=True, type="str"),
password=dict(fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"]), no_log=True),
username=dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"])),
state=dict(choices=["execute", "delete", "present"], type="str"),
script_name=dict(required=True, type="str"),
script_type=dict(required=False, type="str"),
script_target=dict(required=False, type="str"),
script_description=dict(required=False, type="str"),
script_content=dict(required=False, type="str"),
script_scope=dict(required=False, type="str"),
script_package=dict(required=False, type="str"),
)
module = AnsibleModule(argument_spec, supports_check_mode=True,)
# check if params are set
if module.params["host"] is None or module.params["username"] is None:
module.fail_json(msg="Host and username are required for connection")
# check if login failed
fmg = AnsibleFortiManager(module, module.params["host"], module.params["username"], module.params["password"])
response = fmg.login()
if "FortiManager instance connnected" not in str(response):
module.fail_json(msg="Connection to FortiManager Failed")
else:
adom = module.params["adom"]
if adom is None:
adom = "root"
vdom = module.params["vdom"]
if vdom is None:
vdom = "root"
state = module.params["state"]
if state is None:
state = "present"
script_name = module.params["script_name"]
script_type = module.params["script_type"]
script_target = module.params["script_target"]
script_description = module.params["script_description"]
script_content = module.params["script_content"]
script_scope = module.params["script_scope"]
script_package = module.params["script_package"]
# if state is present (default), then add the script
if state == "present":
results = set_script(fmg, script_name, script_type, script_content, script_description, script_target, adom)
if not results[0] == 0:
if isinstance(results[1], list):
module.fail_json(msg="Adding Script Failed", **results)
else:
module.fail_json(msg="Adding Script Failed")
elif state == "execute":
results = execute_script(fmg, script_name, script_scope, script_package, adom, vdom)
if not results[0] == 0:
module.fail_json(msg="Script Execution Failed", **results)
elif state == "delete":
results = delete_script(fmg, script_name, adom)
if not results[0] == 0:
module.fail_json(msg="Script Deletion Failed", **results)
fmg.logout()
# results is returned as a tuple
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
|
Today is the first official World Oceans Day and as you can imagine this is a day for celebration at Shellbelle's Tiki Hut!
The Ocean Project has ask two things of us today. First, wear blue in honor of the ocean. Secondly, tell people two things they likely don't know about our ocean and how they can help.
Here in Florida we have mangrove trees and they play an important role in preserving our coastlines and the inhabitants of the world's oceans. There are three species of mangroves in Florida: Rhizophora mangle (Red Mangrove), Avicennia germinans (Black Mangrove) and Laguncularia racemosa (White Mangrove). Yesterday I was hiking at Weedon Island and took this picture of the distinctive roots of the red mangroves and the black mangroves.
Red mangroves live closest to the water and are actually in the water at high tide. The roots of the red mangrove drop down from their branches and embed themselves in the sediment below. These are called prop roots and they have the ability to take in water while keeping out the salt. The appearance of the prop roots of the red mangrove have given them the nickname of walking tree, because they appear to be walking on water. This tree takes in oxygen through pores on its bark called lentices.
Black mangrove roots are just as distinctive as those of the red mangrove. The black mangrove grows behind red mangroves and its root system sends out finger-like projections called pneumatophores, which extend upwards through the sediment, allowing the tree to take in oxygen. Black mangroves remove salt by excreting it through their leaves.
White mangroves cannot survive in the water, so they grow the furthest from the coastline. They have neither prop roots or pneumatophores, but their leaf stems have two bumps that remove the salt taken in by their roots.
So, now you're asking yourself what all this information has to do with World Oceans Day, right? All food chains start somewhere and one of these chains begins in mangroves. The leaves of mangrove trees fall into the water, where they decompose into a substance called detritus. Detritus is ingested by critters, such as small crabs and young shrimp, which in turn are eaten by fish, who are then eaten by bigger fish, who end up in the bellies of mammals, such as dolphins, thus completing the food chain.
The mighty mangrove doesn't stop at food chains in aiding our coastal ecosytem! The tangled roots of mangroves also provide a habitat for many marine organisms and protect the young of many larger species. Mangroves also serve as nesting sites for many of our beautiful birds. Mangroves protect our coastlines against erosion, they filter pollutants from river runoff and they're just darn beautiful! Unfortunately, mangroves are being lost. The good news? You can help!
Visit the Mangrove Action Project to help reverse the degradation and loss of mangrove ecosystems worldwide.
As I wrote earlier, mangroves play an important part in a food chain, but there is something that that does not belong in our oceans — something that has been ingested by precious creatures and something that ultimately led to their deaths. What is this you ask? Garbage! We've all seen it on our beaches — plastic bottles, fast-food containers, tangled fishing lines and more. The garbage was thrown there or washed ashore from some unknown location. What are people thinking? Animals eat this crap or they get caught in it and they die. They die.
Did you know that an estimated 100,000 mammals and 2 million seabirds die every year after eating or getting caught in plastic? So, wherever you are or whatever you're doing, don't leave your trash behind. Take it with you and dispose of it properly. Please. Thank you.
Aim to be carbon neutral by reducing and offsetting your energy consumption.
Upgrade to Energy Star appliances and compact fluorescent light bulbs.
Rethink what you need, purchase “green” products, and buy locally grown foods and sustainable seafood.
Let your friends, family, colleagues, and the local media know about the impact of climate change on the ocean.
Walk, bike, carpool, and take public transportation to cut down your coral-reef harming carbon emissions.
Get active as a volunteer with a local watershed or ocean group.
Take part in a World Ocean Day activity near you or plan your own.
For more World Oceans Day posts visit CrazyCris @ Here There and Everywhere who is hosting the Oceanic Blog-A-Thon!
Thank you for all your "C" worthy words. There is such delight in this celebration. Thank you for stopping by. May your sails be set free this day in shared joy.
I love the seven "C"s - AWESOME!
Great post! Thanks for the all the great information. I'm certainly wearing blue today! I even told the video store guy what day it was today! Spread the word!
It's a sad testimony... Everywhere on the planet, life forms are suffering because of our careless actions. Thanks for your story. Happy Oceans Day!
It's nice to see the mangroves getting some appreciation! I had so much fun around them as a teenager visiting Veracruz and the Florida Keys... hiking around mangrove coasts is a very rewarding challenge, just teeming with wildlife!
great post! lived in FL for a few years. your cycle of life was a cool take on the day. each piece playing its part. happy Oceans Day!
What a lovely, enjoyable read. I can tell you put much care and thought into it. Thank you for the great call to action.
Thank you all for celebrating World Oceans Day with me and thank you Chris for hosting the Oceanic Blog-A-Thon.
I'm happy to see you all enjoyed learning more about Mangroves. Kathy, there are about 50 species worldwide, isn't that amazing?
You were right Rose, I did enjoy your post Sometimes Real Sometimes Pretend . I have always held seashells in awe, ever since my grandmother told me the mermaids left them on the shore while I was sleeping. They remind me of her and all that can be.
Hi Shellbelle, I love your blog, it is Awesome!
I also enjoyed this informative post about our world of oceans. They really are our lifeline and we need to protect and care for them.
Mangroves do play an important part in Florida, that is why they are protected from being cut down and uprooted.
Hi Rhonda, love your blog and I will be "following" you. I've not been in FL too long but learned alot about mangroves from your blog.
Noticed on the side about the Sand Scultpting Festival, I've already marked it in my calendar! There's another down in Ft. Myers in November, I believe. Thanks for all the info and a belated Happy World Oceans Day!
Thanks for visiting my blog and when I'm up your way I'll be visiting the Florida Botanical Gardens.
BTW Rhonda, where do I find Rose' blog and her post Sometimes Read, Sometimes Pretend. Sounds like it might have something to do with shells and I am getting into that.
I am so grateful for all the people who speak up and try to raise awareness about the ocean and the planet in general. It's so hard to read all this..., how we humans behave toward nature. There is such a disconnect... And it wasn't too long ago that I didn't even know about the great Pacific garbage patch.
Lovely themes, gorgeous photo and a detailed write up to celebrate the World Oceans Day. I can see you have put your heart and soul in making this fantastic, educative post.
|
"""
Django settings for boot project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')hlj1!*+&cv!9(sg3xfxtip($_c#pf4imb=_aw=stbc$n0lzn+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'news',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'boot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR , 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'boot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'pytest',
'USER': 'root',
'PASSWORD': 'azdingdb123',
'HOST':'localhost',
'PORT':'3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
STATIC_PATH= os.path.join(os.path.dirname(__file__), '../static').replace('\\','/')
|
I hope everyone had a wonderful summer.
Yes, I know technically it’s still summer despite the fact that some retailers in the States are already selling Spiced Pumpkin Latte mixes and there are bloggers talking about Halloween.
I don’t understand this rush. Let’s enjoy this month and getting ready for fall. This is my favorite time of year/season. Not that I dislike the other three (well maybe winter, lack of sun and daylight is not my scene) but there’s something about fall that makes me happy. I was one of those geeky kids who couldn’t wait to buy back-to-school supplies. In my young mind there weren’t many things more exciting that a Mead Trapper Keeper notebook.
Unfortunately, back-to-school shopping for clothes wasn’t as fun as my Caribbean mom was not about that life. Every time I asked for something that EVERYONE was wearing, my mom would say that I was going to school to get an education, not for a fashion show. My dad backed her up as he didn’t know (or care) about the difference between Girbaud, or Guess, jeans and a random pair from wherever.
I still look at fall as a beginning even though I graduated from college back in the Stone Age. Like spring, it’s an opportunity to hit the reset button.
One of the color trends for Fall 2018 is this deep blue called Sargasso Sea.
It happens to be a color that I’m a fan of.
I’m feeling this cape but I might be too short for this lewk. Love the pop of color from Ms. Palermo’s shoes. Great combo.
This classic pattern from Schumacher is an excellent example of Hollywood Regency glamour.
There are so many beautiful colors to choose from but this one is in my top three, okay five.
I cannot write about September without listening to this old school favorite. It dropped in 1978 and if you play it at any wedding reception, party, or cookout now people will still dance.
Here’s to a great fall!
p.s. Here’s a fantastic piece from NPR regarding why this song is so popular years later.
I asked Jeffrey Peretz, a professor of music theory at New York University’s Clive Davis Institute, what makes that groove so powerful. He says a lot of it has to do with how the music unfolds. The song’s very structure is an endless cycle that keeps us dancing and wanting more.
The trigger for that yearning feeling, Peretz says, is the opening line. White asks, “Do you remember?” and we supply the memories. It’s a song that can bring all of the generations together, which makes it perfect for family gatherings. The true meaning is up to us — including, Allee Willis says, that strangely specific date.
“September” is an all time favorite song of mine as well! If I hear it at work, I can’t help but “chair dance” at my desk.
I too, don’t like this rush to Halloween/Autumn and don’t get me started with the craze over Pumpkin Spice! It’s everywhere, in the states.
The month of September is the best of both seasons, here in the California foothills; the cool mornings of Autumn with the warmth of Summer during the day. The light has a more golden quality to it, which is so scrumptious before the sunset. On the coast, it’s when the fog starts to fade and there’s hope of sighting the sun.
Enjoy this truly magical time of the year.
I love the light during this time of year. I hope to get out to the sea (it’s only a 30 minute drive or so from Rome) to see a sunset. If not, rooftop aperitivi will have to do.
I don’t drink Pumpkin Spice (or any) lattes so I really don’t understand the hype. I say this as someone who does enjoy pumpkin pie.
I am the proud owner of that exact blue Le Creuset. Enjoy your Fall – blistering hot here again.
All my friends in Los Angeles are complaining about the heat. Hope you get a break in the heatwave soon.
|
#
# Kiwi: a Framework and Enhanced Widgets for Python
#
# Copyright (C) 2002, 2003 Async Open Source
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# Author(s): Christian Reis <kiko@async.com.br>
# Lorenzo Gil Sanchez <lgs@sicem.biz>
# Johan Dahlin <jdahlin@async.com.br>
#
"""Defines the Delegate classes that are included in the Kiwi Framework."""
from kiwi.ui.views import SlaveView, BaseView
from kiwi.controllers import BaseController
from kiwi.python import deprecationwarn
class Delegate(BaseView, BaseController):
"""A class that combines view and controller functionality into a
single package. The Delegate class possesses a top-level window.
"""
def __init__(self, toplevel=None, widgets=(), gladefile=None,
toplevel_name=None,
delete_handler=None, keyactions=None):
"""Creates a new Delegate.
The keyactions parameter is sent to L{kiwi.controllers.BaseController},
the rest are sent to L{kiwi.ui.views.BaseView}
"""
if gladefile:
deprecationwarn(
'gladefile is deprecated in Delegate, '
'use GladeDelegate instead',
stacklevel=3)
BaseView.__init__(self,
toplevel=toplevel,
widgets=widgets,
gladefile=gladefile,
toplevel_name=toplevel_name,
delete_handler=delete_handler)
BaseController.__init__(self, view=self, keyactions=keyactions)
class GladeDelegate(BaseView, BaseController):
"""A class that combines view and controller functionality into a
single package. The Delegate class possesses a top-level window.
"""
def __init__(self, gladefile=None, toplevel_name=None, domain=None,
delete_handler=None, keyactions=None):
"""Creates a new GladeDelegate.
The keyactions parameter is sent to L{kiwi.controllers.BaseController},
the rest are sent to L{kiwi.ui.views.BaseView}
"""
BaseView.__init__(self,
gladefile=gladefile,
toplevel_name=toplevel_name,
domain=domain,
delete_handler=delete_handler)
BaseController.__init__(self, view=self, keyactions=keyactions)
class SlaveDelegate(SlaveView, BaseController):
"""A class that combines view and controller functionality into a
single package. It does not possess a top-level window, but is instead
intended to be plugged in to a View or Delegate using attach_slave().
"""
def __init__(self, toplevel=None, widgets=(), gladefile=None,
toplevel_name=None, keyactions=None):
"""
The keyactions parameter is sent to L{kiwi.controllers.BaseController},
the rest are sent to L{kiwi.ui.views.SlaveView}
"""
if gladefile:
deprecationwarn(
'gladefile is deprecated in Delegate, '
'use GladeSlaveDelegate instead',
stacklevel=3)
SlaveView.__init__(self, toplevel, widgets, gladefile,
toplevel_name)
BaseController.__init__(self, view=self, keyactions=keyactions)
class GladeSlaveDelegate(SlaveView, BaseController):
"""A class that combines view and controller functionality into a
single package. It does not possess a top-level window, but is instead
intended to be plugged in to a View or Delegate using attach_slave().
"""
def __init__(self, gladefile=None,
toplevel_name=None, domain=None,
keyactions=None):
"""
The keyactions parameter is sent to L{kiwi.controllers.BaseController},
the rest are sent to L{kiwi.ui.views.SlavseView}
"""
SlaveView.__init__(self,
gladefile=gladefile,
toplevel_name=toplevel_name,
domain=domain)
BaseController.__init__(self, view=self, keyactions=keyactions)
class ProxyDelegate(Delegate):
"""A class that combines view, controller and proxy functionality into a
single package. The Delegate class possesses a top-level window.
@ivar model: the model
@ivar proxy: the proxy
"""
def __init__(self, model, proxy_widgets=None, gladefile=None,
toplevel=None, widgets=(),
toplevel_name=None, domain=None, delete_handler=None,
keyactions=None):
"""Creates a new Delegate.
@param model: instance to be attached
@param proxy_widgets:
The keyactions parameter is sent to L{kiwi.controllers.BaseController},
the rest are sent to L{kiwi.ui.views.BaseView}
"""
BaseView.__init__(self, toplevel, widgets, gladefile,
toplevel_name, domain,
delete_handler)
self.model = model
self.proxy = self.add_proxy(model, proxy_widgets)
self.proxy.proxy_updated = self.proxy_updated
BaseController.__init__(self, view=self, keyactions=keyactions)
def set_model(self, model):
"""
Set model.
@param model:
"""
self.proxy.set_model(model)
self.model = model
def proxy_updated(self, widget, attribute, value):
# Can be overriden in subclasses
pass
def update(self, attribute):
self.proxy.update(attribute)
class ProxySlaveDelegate(GladeSlaveDelegate):
"""A class that combines view, controller and proxy functionality into a
single package. It does not possess a top-level window, but is instead
intended to be plugged in to a View or Delegate using attach_slave()
@ivar model: the model
@ivar proxy: the proxy
"""
def __init__(self, model, proxy_widgets=None, gladefile=None,
toplevel_name=None, domain=None, keyactions=None):
"""Creates a new Delegate.
@param model: instance to be attached
@param proxy_widgets:
The keyactions parameter is sent to L{kiwi.controllers.BaseController},
the rest are sent to L{kiwi.ui.views.BaseView}
"""
GladeSlaveDelegate.__init__(self, gladefile, toplevel_name,
domain, keyactions)
self.model = model
self.proxy = self.add_proxy(model, proxy_widgets)
self.proxy.proxy_updated = self.proxy_updated
def set_model(self, model):
"""
Set model.
@param model:
"""
self.proxy.set_model(model)
self.model = model
def proxy_updated(self, widget, attribute, value):
# Can be overriden in subclasses
pass
def update(self, attribute):
self.proxy.update(attribute)
|
Brixton Hill Studios (“we”, “our” or “us”) promises to respect any personal data you share with us, or that we get from other organisations and keep it safe. We aim to be clear when we collect your data and not do anything you wouldn’t reasonably expect.
You may give us your information in order to make a booking or use one of our services, take a survey, purchase products or communicate with us. When you give us this information we take responsibility for looking after it.
In addition, the type of device you’re using to access our website and the settings on that device may provide us with information about your device, including what type of device it is, what specific device you have and what operating system you’re using. Your device manufacturer or operating system provider will have more details about what information your device makes available to us.
To place an order with us online, registration is required. At the point of registration, we request certain information including your name, delivery address and email address. This information is required to enable us to process your order and notify you of its progress. Once an order has been placed, we may contact you by email to confirm your order details and again once your order has been accepted and despatched. Should we need to contact you for any reason regarding your order, we will use the email address registered to your account, or the telephone number where provided. BHXS does not store any sensitive card data on our systems following online transactions. BHXS utilises payment processor PS Connect (Payment Sense Ltd) to handle these matters.
When you use our services we will use this information to provide the best possible standards of administration and communication.
When we allow access to your information, we will always have complete control of what they see, what they are allowed to do with it and how long they can see it. We do not sell or share your personal information for other organisations to use.
We undertake regular reviews of who has access to information that we hold to ensure that your information is only accessible by appropriately trained staff and service providers.
Some of our suppliers run their operations outside the European Economic Area (EEA). Although they may not be subject to the same data protection laws as companies based in the UK, we will take steps to make sure they provide an adequate level of protection in accordance with UK data protection law. By submitting your personal information to us you agree to this transfer, storing or processing at a location outside the EEA.
We make it easy for you to tell us how you want us to communicate, in a way that suits you. If you don’t want to hear from us, that’s fine. Just let us know when you provide your data or contact us on 020 86745 3065 or email us at hello@brixtonhillstudios.com.
You have a right to ask us to stop processing your personal data, and if it’s not necessary for the purpose you provided it to us for (e.g. to provide our service to you) we will do so. Contact us on 020 86745 3065 or email us at hello@brixtonhillstudios.com if you have any concerns.
If you want to access your information, please contact 020 86745 3065 or email us at hello@brixtonhillstudios.com with a description of the information you want to see and a proof of your identity. If you have any questions please send these to Brixton Sonic Ltd, Unit 1, 126 Brixton Hill SW2 1RS or email hello@brixtonhillstudios.com and for further information see the Information Commissioner’s guidance here (link is external).
|
__author__ = 'abhishekanurag'
from django.conf.urls import patterns, url
from django.views.generic.base import TemplateView
from travel_log import views
urlpatterns = patterns('',
# /travel_log
url(r'^$', views.index, name='index'),
# /travel_log/userlogin
url(r'^userlogin/$', views.userlogin, name='userlogin'),
# /travel_log/userlogout
url(r'^userlogout/$', views.userlogout, name='userlogout'),
# /travel_log/signup
url(r'^signup/$', views.signup, name='signup'),
# /travel_log/home
url(r'^home/$', views.home, name='home'),
# /travel_log/<trip_id>/view
url(r'^(?P<trip_id>\d+)/view/$', views.trip_view, name='view'),
# /travel_log/edit : For new trips
url(r'^edit/$', views.trip_edit, name='edit'),
# /travel_log/<trip_id>/edit
url(r'^(?P<trip_id>\d+)/edit/$', views.trip_edit, name='edit'),
# /travel_log/<trip_id>/delete
url(r'^(?P<trip_id>\d+)/delete/$', views.trip_delete, name='delete'),
)
|
When Wolfgang and his friends learn about a secret cave where a baby dragon is growing they quickly run off to find it but, sadly, Wolfgang gets left behind. His bag is full of heavy worries that are making him slow and he just can’t let go of them. When he trips on a rock and falls, Spider shows him how to rest his busy mind and that worries aren’t so bad if you share them.
Nobody likes worries, so it’s good to know how to give a worried mind a rest!
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""APLog.py
logging script, taking snapshots of the system
dependencies:
sensor and actuator modules
"""
from sensors import DHT22, D18B20, HCSR04, LFM, BH1750
from dbControl import dbLog
import sqlite3 as lite
import datetime
from datetime import timedelta
import time
import sys, traceback
def main():
#set database name
dbName='APDatabase.sqlite'
dbLog.softwareLog(dbName,'APLog.py','script started')
time.sleep(1)
"""get options list per sensor type
Options_sensor table:
id type gpio gpio_trig gpio_echo w1_id bed_cm increment_sec i2c_id
"""
DHT22List = DHT22.getSensorList(dbName)
D18B20List = D18B20.getSensorList(dbName)
HCSR04List = HCSR04.getSensorList(dbName)
LFMList = LFM.getSensorList(dbName)
BH1750List = BH1750.getSensorList(dbName)
dbLog.softwareLog(dbName,'APLog.py','sensors imported (DHT22: %s, D18B20: %s, HCSR04: %s, LFM: %s, BH1750: %s)' % (len(DHT22List),len(D18B20List),len(HCSR04List),len(LFMList),len(BH1750List)))
time.sleep(1)
#set intitial log time
logTime = datetime.datetime.now()
"""read snapshot increment time in min from database"""
try:
db = lite.connect(dbName)
cursor = db.cursor()
cursor.execute("SELECT VALUE FROM Opt_general WHERE PARAMETER='snapshotIncrement'")
snapInc = int(cursor.fetchone()[0])
except Exception as e:
cursor.execute("INSERT INTO Log_software(MODULE,MESSAGE) VALUES('APLog.py Opt_general',?)",[repr(e)])
db.commit()
finally:
db.close()
"""start endless logging loop and wait for correct insert in database"""
dbLog.softwareLog(dbName,'APLog.py','start endless loop...')
time.sleep(1)
try:
while True:
try:
if logTime < datetime.datetime.now():
#read and log all D18B20 sensors
for row in D18B20List:
id = row[0]
temp = D18B20.readTemp(dbName,row[5])
D18B20.logSensorValue(dbName,id,round(temp,2))
#read and log all DHT22 sensors
for row in DHT22List:
id = row[0]
temp = DHT22.readTempHum(dbName,row[2])[0]
hum = DHT22.readTempHum(dbName,row[2])[1]
if (hum != None) and (temp != None):
DHT22.logSensorValue(dbName,id,round(temp,2),round(hum,2))
#read and log all HCSR04 sensors
for row in HCSR04List:
id = row[0]
level = HCSR04.readLevel(dbName,row[3],row[4],row[6])
HCSR04.logSensorValue(dbName,id,round(level,2))
#read and log all LFM sensors
for row in LFMList:
id = row[0]
flow = LFM.readFlow(dbName,row[2])
LFM.logSensorValue(dbName,id,round(flow,2))
"""read and log all BH1750 sensors"""
for row in BH1750List:
id = row[0]
light = BH1750.readLight(dbName,row[8])
BH1750.logSensorValue(dbName,id,round(light,2))
#set new logging time with general increment
logTime = logTime + timedelta(minutes=snapInc)
except TypeError as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
err = repr(traceback.format_exception(exc_type, exc_value, exc_traceback))
dbLog.softwareLog(dbName,'APLog.py','sensor error: ' + str(err))
"""pause to limit the cpu usage"""
time.sleep(0.1)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
err = repr(traceback.format_exception(exc_type, exc_value, exc_traceback))
dbLog.softwareLog(dbName,'APLog.py','critical error: ' + str(err))
if __name__ == '__main__':
main()
|
Dirigida por: M Night Shyamalan.
Actúan: Hugo Weaving, Stephen Lang, Robert Sheehan, Hera Hilmar, Colin Salmon.
Dirigida por: Rodney Rothman, Peter Ramsey, Bob Persichetti.
Actúan: Brian Tyree Henry, Shameik Moore, Mahershala Ali, Liev Schreiber.
|
#!/usr/bin/env python3
#
# Django REST Witchcraft documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 10 09:20:17 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
import django
import django.test.utils
from django.conf import settings
sys.path.insert(0, os.path.abspath(".."))
sys.path.insert(0, os.path.abspath("."))
settings.configure()
getattr(django, "setup", bool)()
django.test.utils.setup_test_environment()
here = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(here, "..", "rest_witchcraft", "__version__.py")) as f:
exec(f.read(), about) # yapf: disable
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx.ext.napoleon", "sphinx.ext.coverage"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Django REST Witchcraft"
copyright = "2017, Serkan Hosca"
author = "Serkan Hosca"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = about["__version__"]
# The full version, including alpha/beta/rc tags.
release = about["__version__"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "DjangoRESTWitchcraftdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "DjangoRESTWitchcraft.tex", "Django REST Witchcraft Documentation", "Serkan Hosca", "manual")
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "djangorestwitchcraft", "Django REST Witchcraft Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"DjangoRESTWitchcraft",
"Django REST Witchcraft Documentation",
author,
"DjangoRESTWitchcraft",
"One line description of project.",
"Miscellaneous",
)
]
|
While Nova Scotia had its fair share of ice this week, a different kind of ice was being made into street art in Dartmouth this weekend.
On Saturday, six artists carved 15 blocks of ice into a wolf, rabbit, beaver and moose — to name a few — for the Downtown Dartmouth Ice Festival.
“February tends to be a quiet time,” said Tim Rissesco, executive director of the Downtown Dartmouth Business Commission.
But despite the icy weather in the province, the ice had to be brought in from Caraquet, N.B.
Rissesco said he looked to get blocks of ice in Nova Scotia but this was the closest place.
“It’s special ice made for carving,” Rissesco said, admitting he didn’t know what made the ice so special.
The man who does know is Richard Chiasson.
The chef from Caraquet, N.B. started sculpting ice in the 1980s and now runs his own ice business, and is believed to be the only producer of ice blocks in the Maritimes.
“It’s the clarity. It’s crystal clear,” he said.
Chiasson explained that an ice cube, for example, freezes from six sides.
“[It] doesn’t have room for expansion, so it cracks,” he said. His blocks, however, are frozen from the bottom up.
Chiasson also filters his water three or four times to make sure it freezes all the way through.
Saturday’s blistering winds and chilly temperatures didn’t phase Chiasson, who works in a food service freezer set at -5 C year-round.
While many people were bundled up and shivering, he worked away cheerfully on several ice sculptures throughout the afternoon and praised the “beautiful” weather.
Chiasson said working with ice is unique because unlike clay, for example, you can’t build it up.
The ice sculptures along Portland Street were done in flower planters, Rissesco said, to keep them safe from plows.
The rest of the event on Saturday evening includes music, free hot chocolate at several spots in downtown Dartmouth and collections to raise money for the United Way.
Rissesco said the sculptures will stay up as long as Mother Nature allows, but he’s hoping they’ll last at least two or three weeks.
|
# -*- coding: utf-8 -*-
"""
Chebfun module
==============
Vendorized version from:
https://github.com/pychebfun/pychebfun/blob/master/pychebfun
The rational for not including this library as a strict dependency is that
it has not been released.
.. moduleauthor :: Chris Swierczewski <cswiercz@gmail.com>
.. moduleauthor :: Olivier Verdier <olivier.verdier@gmail.com>
.. moduleauthor :: Gregory Potter <ghpotter@gmail.com>
The copyright notice (BSD-3 clause) is as follows:
Copyright 2017 Olivier Verdier
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import division
import operator
from functools import wraps
import numpy as np
import numpy.polynomial as poly
from numpy.polynomial.chebyshev import cheb2poly, Chebyshev
from numpy.polynomial.polynomial import Polynomial
import sys
emach = sys.float_info.epsilon # machine epsilon
global sp_fftpack_ifft
sp_fftpack_ifft = None
def fftpack_ifft(*args, **kwargs):
global sp_fftpack_ifft
if sp_fftpack_ifft is None:
from scipy.fftpack import ifft as sp_fftpack_ifft
return sp_fftpack_ifft(*args, **kwargs)
global sp_fftpack_fft
sp_fftpack_fft = None
def fftpack_fft(*args, **kwargs):
global sp_fftpack_fft
if sp_fftpack_fft is None:
from scipy.fftpack import fft as sp_fftpack_fft
return sp_fftpack_fft(*args, **kwargs)
global sp_eigvals
sp_eigvals = None
def eigvals(*args, **kwargs):
global sp_eigvals
if sp_eigvals is None:
from scipy.linalg import eigvals as sp_eigvals
return sp_eigvals(*args, **kwargs)
global sp_toeplitz
sp_toeplitz = None
def toeplitz(*args, **kwargs):
global sp_toeplitz
if sp_toeplitz is None:
from scipy.linalg import toeplitz as sp_toeplitz
return sp_toeplitz(*args, **kwargs)
def build_pychebfun(f, domain, N=15):
fvec = lambda xs: [f(xi) for xi in xs]
return chebfun(f=fvec, domain=domain, N=N)
def build_solve_pychebfun(f, goal, domain, N=15, N_max=100, find_roots=2):
cache = {}
def cached_fun(x):
# Almost half the points are cached!
if x in cache:
return cache[x]
val = f(x)
cache[x] = val
return val
fun = build_pychebfun(cached_fun, domain, N=N)
roots = (fun - goal).roots()
while (len(roots) < find_roots and len(fun._values) < N_max):
N *= 2
fun = build_pychebfun(cached_fun, domain, N=N)
roots = (fun - goal).roots()
roots = [i for i in roots if domain[0] < i < domain[1]]
return roots, fun
def chebfun_to_poly(coeffs_or_fun, domain=None, text=False):
if isinstance(coeffs_or_fun, Chebfun):
coeffs = coeffs_or_fun.coefficients()
domain = coeffs_or_fun._domain
elif hasattr(coeffs_or_fun, '__class__') and coeffs_or_fun.__class__.__name__ == 'ChebyshevExpansion':
coeffs = coeffs_or_fun.coef()
domain = coeffs_or_fun.xmin(), coeffs_or_fun.xmax()
else:
coeffs = coeffs_or_fun
low, high = domain
# Reverse the coefficients, and use cheb2poly to make it in the polynomial domain
poly_coeffs = cheb2poly(coeffs)[::-1].tolist()
if not text:
return poly_coeffs
s = 'coeffs = %s\n' %poly_coeffs
delta = high - low
delta_sum = high + low
# Generate the expression
s += 'horner(coeffs, %.18g*(x - %.18g))' %(2.0/delta, 0.5*delta_sum)
# return the string
return s
def cheb_to_poly(coeffs_or_fun, domain=None):
"""Just call horner on the outputs!"""
from fluids.numerics import horner as horner_poly
if isinstance(coeffs_or_fun, Chebfun):
coeffs = coeffs_or_fun.coefficients()
domain = coeffs_or_fun._domain
elif hasattr(coeffs_or_fun, '__class__') and coeffs_or_fun.__class__.__name__ == 'ChebyshevExpansion':
coeffs = coeffs_or_fun.coef()
domain = coeffs_or_fun.xmin(), coeffs_or_fun.xmax()
else:
coeffs = coeffs_or_fun
low, high = domain
coeffs = cheb2poly(coeffs)[::-1].tolist() # Convert to polynomial basis
# Mix in limits to make it a normal polynomial
my_poly = Polynomial([-0.5*(high + low)*2.0/(high - low), 2.0/(high - low)])
poly_coeffs = horner_poly(coeffs, my_poly).coef[::-1].tolist()
return poly_coeffs
def cheb_range_simplifier(low, high, text=False):
'''
>>> low, high = 0.0023046250851646434, 4.7088985707840125
>>> cheb_range_simplifier(low, high, text=True)
'chebval(0.42493574399544564724*(x + -2.3556015979345885647), coeffs)'
'''
constant = 0.5*(-low-high)
factor = 2.0/(high-low)
if text:
return 'chebval(%.20g*(x + %.20g), coeffs)' %(factor, constant)
return constant, factor
def cast_scalar(method):
"""Cast scalars to constant interpolating objects."""
@wraps(method)
def new_method(self, other):
if np.isscalar(other):
other = type(self)([other],self.domain())
return method(self, other)
return new_method
class Polyfun(object):
"""Construct a Lagrange interpolating polynomial over arbitrary points.
Polyfun objects consist in essence of two components: 1) An interpolant
on [-1,1], 2) A domain attribute [a,b]. These two pieces of information
are used to define and subsequently keep track of operations upon Chebyshev
interpolants defined on an arbitrary real interval [a,b].
"""
# ----------------------------------------------------------------
# Initialisation methods
# ----------------------------------------------------------------
class NoConvergence(Exception):
"""Raised when dichotomy does not converge."""
class DomainMismatch(Exception):
"""Raised when there is an interval mismatch."""
@classmethod
def from_data(self, data, domain=None):
"""Initialise from interpolation values."""
return self(data,domain)
@classmethod
def from_fun(self, other):
"""Initialise from another instance."""
return self(other.values(),other.domain())
@classmethod
def from_coeff(self, chebcoeff, domain=None, prune=True, vscale=1.):
"""
Initialise from provided coefficients
prune: Whether to prune the negligible coefficients
vscale: the scale to use when pruning
"""
coeffs = np.asarray(chebcoeff)
if prune:
N = self._cutoff(coeffs, vscale)
pruned_coeffs = coeffs[:N]
else:
pruned_coeffs = coeffs
values = self.polyval(pruned_coeffs)
return self(values, domain, vscale)
@classmethod
def dichotomy(self, f, kmin=2, kmax=12, raise_no_convergence=True,):
"""Compute the coefficients for a function f by dichotomy.
kmin, kmax: log2 of number of interpolation points to try
raise_no_convergence: whether to raise an exception if the dichotomy does not converge
"""
for k in range(kmin, kmax):
N = pow(2, k)
sampled = self.sample_function(f, N)
coeffs = self.polyfit(sampled)
# 3) Check for negligible coefficients
# If within bound: get negligible coeffs and bread
bnd = self._threshold(np.max(np.abs(coeffs)))
last = abs(coeffs[-2:])
if np.all(last <= bnd):
break
else:
if raise_no_convergence:
raise self.NoConvergence(last, bnd)
return coeffs
@classmethod
def from_function(self, f, domain=None, N=None):
"""Initialise from a function to sample.
N: optional parameter which indicates the range of the dichotomy
"""
# rescale f to the unit domain
domain = self.get_default_domain(domain)
a,b = domain[0], domain[-1]
map_ui_ab = lambda t: 0.5*(b-a)*t + 0.5*(a+b)
args = {'f': lambda t: f(map_ui_ab(t))}
if N is not None: # N is provided
nextpow2 = int(np.log2(N))+1
args['kmin'] = nextpow2
args['kmax'] = nextpow2+1
args['raise_no_convergence'] = False
else:
args['raise_no_convergence'] = True
# Find out the right number of coefficients to keep
coeffs = self.dichotomy(**args)
return self.from_coeff(coeffs, domain)
@classmethod
def _threshold(self, vscale):
"""Compute the threshold at which coefficients are trimmed."""
bnd = 128*emach*vscale
return bnd
@classmethod
def _cutoff(self, coeffs, vscale):
"""Compute cutoff index after which the coefficients are deemed
negligible."""
bnd = self._threshold(vscale)
inds = np.nonzero(abs(coeffs) >= bnd)
if len(inds[0]):
N = inds[0][-1]
else:
N = 0
return N+1
def __init__(self, values=0., domain=None, vscale=None):
"""Init an object from values at interpolation points.
values: Interpolation values
vscale: The actual vscale; computed automatically if not given
"""
avalues = np.asarray(values,)
avalues1 = np.atleast_1d(avalues)
N = len(avalues1)
points = self.interpolation_points(N)
self._values = avalues1
if vscale is not None:
self._vscale = vscale
else:
self._vscale = np.max(np.abs(self._values))
self.p = self.interpolator(points, avalues1)
domain = self.get_default_domain(domain)
self._domain = np.array(domain)
a,b = domain[0], domain[-1]
# maps from [-1,1] <-> [a,b]
self._ab_to_ui = lambda x: (2.0*x-a-b)/(b-a)
self._ui_to_ab = lambda t: 0.5*(b-a)*t + 0.5*(a+b)
def same_domain(self, fun2):
"""Returns True if the domains of two objects are the same."""
return np.allclose(self.domain(), fun2.domain(), rtol=1e-14, atol=1e-14)
# ----------------------------------------------------------------
# String representations
# ----------------------------------------------------------------
def __repr__(self):
"""Display method."""
a, b = self.domain()
vals = self.values()
return (
'%s \n '
' domain length endpoint values\n '
' [%5.1f, %5.1f] %5d %5.2f %5.2f\n '
'vscale = %1.2e') % (
str(type(self)).split('.')[-1].split('>')[0][:-1],
a,b,self.size(),vals[-1],vals[0],self._vscale,)
def __str__(self):
return "<{0}({1})>".format(
str(type(self)).split('.')[-1].split('>')[0][:-1],self.size(),)
# ----------------------------------------------------------------
# Basic Operator Overloads
# ----------------------------------------------------------------
def __call__(self, x):
return self.p(self._ab_to_ui(x))
def __getitem__(self, s):
"""Components s of the fun."""
return self.from_data(self.values().T[s].T)
def __bool__(self):
"""Test for difference from zero (up to tolerance)"""
return not np.allclose(self.values(), 0)
__nonzero__ = __bool__
def __eq__(self, other):
return not(self - other)
def __ne__(self, other):
return not (self == other)
@cast_scalar
def __add__(self, other):
"""Addition."""
if not self.same_domain(other):
raise self.DomainMismatch(self.domain(),other.domain())
ps = [self, other]
# length difference
diff = other.size() - self.size()
# determine which of self/other is the smaller/bigger
big = diff > 0
small = not big
# pad the coefficients of the small one with zeros
small_coeffs = ps[small].coefficients()
big_coeffs = ps[big].coefficients()
padded = np.zeros_like(big_coeffs)
padded[:len(small_coeffs)] = small_coeffs
# add the values and create a new object with them
chebsum = big_coeffs + padded
new_vscale = np.max([self._vscale, other._vscale])
return self.from_coeff(
chebsum, domain=self.domain(), vscale=new_vscale
)
__radd__ = __add__
@cast_scalar
def __sub__(self, other):
"""Subtraction."""
return self + (-other)
def __rsub__(self, other):
return -(self - other)
def __rmul__(self, other):
return self.__mul__(other)
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __neg__(self):
"""Negation."""
return self.from_data(-self.values(),domain=self.domain())
def __abs__(self):
return self.from_function(lambda x: abs(self(x)),domain=self.domain())
# ----------------------------------------------------------------
# Attributes
# ----------------------------------------------------------------
def size(self):
return self.p.n
def coefficients(self):
return self.polyfit(self.values())
def values(self):
return self._values
def domain(self):
return self._domain
# ----------------------------------------------------------------
# Integration and differentiation
# ----------------------------------------------------------------
def integrate(self):
raise NotImplementedError()
def differentiate(self):
raise NotImplementedError()
def dot(self, other):
r"""Return the Hilbert scalar product :math:`\int f.g`."""
prod = self * other
return prod.sum()
def norm(self):
"""
Return: square root of scalar product with itself.
"""
norm = np.sqrt(self.dot(self))
return norm
# ----------------------------------------------------------------
# Miscellaneous operations
# ----------------------------------------------------------------
def restrict(self,subinterval):
"""Return a Polyfun that matches self on subinterval."""
if (subinterval[0] < self._domain[0]) or (subinterval[1] > self._domain[1]):
raise ValueError("Can only restrict to subinterval")
return self.from_function(self, subinterval)
# ----------------------------------------------------------------
# Class method aliases
# ----------------------------------------------------------------
diff = differentiate
cumsum = integrate
class Chebfun(Polyfun):
"""Eventually set this up so that a Chebfun is a collection of Chebfuns.
This will enable piecewise smooth representations al la Matlab Chebfun v2.0.
"""
# ----------------------------------------------------------------
# Standard construction class methods.
# ----------------------------------------------------------------
@classmethod
def get_default_domain(self, domain=None):
if domain is None:
return [-1., 1.]
else:
return domain
@classmethod
def identity(self, domain=[-1., 1.]):
"""The identity function x -> x."""
return self.from_data([domain[1],domain[0]], domain)
@classmethod
def basis(self, n):
"""Chebyshev basis functions T_n."""
if n == 0:
return self(np.array([1.]))
vals = np.ones(n+1)
vals[1::2] = -1
return self(vals)
# ----------------------------------------------------------------
# Integration and differentiation
# ----------------------------------------------------------------
def sum(self):
"""Evaluate the integral over the given interval using Clenshaw-Curtis
quadrature."""
ak = self.coefficients()
ak2 = ak[::2]
n = len(ak2)
Tints = 2/(1-(2*np.arange(n))**2)
val = np.sum((Tints*ak2.T).T, axis=0)
a_, b_ = self.domain()
return 0.5*(b_-a_)*val
def integrate(self):
"""Return the object representing the primitive of self over the domain.
The output starts at zero on the left-hand side of the domain.
"""
coeffs = self.coefficients()
a,b = self.domain()
int_coeffs = 0.5*(b-a)*poly.chebyshev.chebint(coeffs)
antiderivative = self.from_coeff(int_coeffs, domain=self.domain())
return antiderivative - antiderivative(a)
def differentiate(self, n=1):
"""n-th derivative, default 1."""
ak = self.coefficients()
a_, b_ = self.domain()
for _ in range(n):
ak = self.differentiator(ak)
return self.from_coeff((2./(b_-a_))**n*ak, domain=self.domain())
# ----------------------------------------------------------------
# Roots
# ----------------------------------------------------------------
def roots(self):
"""Utilises Boyd's O(n^2) recursive subdivision algorithm.
The chebfun
is recursively subsampled until it is successfully represented to
machine precision by a sequence of piecewise interpolants of degree
100 or less. A colleague matrix eigenvalue solve is then applied to
each of these pieces and the results are concatenated.
See:
J. P. Boyd, Computing zeros on a real interval through Chebyshev
expansion and polynomial rootfinding, SIAM J. Numer. Anal., 40
(2002), pp. 1666–1682.
"""
if self.size() == 1:
return np.array([])
elif self.size() <= 100:
ak = self.coefficients()
v = np.zeros_like(ak[:-1])
v[1] = 0.5
C1 = toeplitz(v)
C2 = np.zeros_like(C1)
C1[0,1] = 1.
C2[-1,:] = ak[:-1]
C = C1 - .5/ak[-1] * C2
eigenvalues = eigvals(C)
roots = [eig.real for eig in eigenvalues
if np.allclose(eig.imag,0,atol=1e-10)
and np.abs(eig.real) <=1]
scaled_roots = self._ui_to_ab(np.array(roots))
return scaled_roots
else:
try:
# divide at a close-to-zero split-point
split_point = self._ui_to_ab(0.0123456789)
return np.concatenate(
(self.restrict([self._domain[0],split_point]).roots(),
self.restrict([split_point,self._domain[1]]).roots()))
except:
# Seems to have many fake roots for high degree fits
coeffs = self.coefficients()
domain = self._domain
possibilities = Chebyshev(coeffs, domain).roots()
return np.array([float(i.real) for i in possibilities if i.imag == 0.0])
# ----------------------------------------------------------------
# Interpolation and evaluation (go from values to coefficients)
# ----------------------------------------------------------------
@classmethod
def interpolation_points(self, N):
"""N Chebyshev points in [-1, 1], boundaries included."""
if N == 1:
return np.array([0.])
return np.cos(np.arange(N)*np.pi/(N-1))
@classmethod
def sample_function(self, f, N):
"""Sample a function on N+1 Chebyshev points."""
x = self.interpolation_points(N+1)
return f(x)
@classmethod
def polyfit(self, sampled):
"""Compute Chebyshev coefficients for values located on Chebyshev
points.
sampled: array; first dimension is number of Chebyshev points
"""
asampled = np.asarray(sampled)
if len(asampled) == 1:
return asampled
evened = even_data(asampled)
coeffs = dct(evened)
return coeffs
@classmethod
def polyval(self, chebcoeff):
"""Compute the interpolation values at Chebyshev points.
chebcoeff: Chebyshev coefficients
"""
N = len(chebcoeff)
if N == 1:
return chebcoeff
data = even_data(chebcoeff)/2
data[0] *= 2
data[N-1] *= 2
fftdata = 2*(N-1)*fftpack_ifft(data, axis=0)
complex_values = fftdata[:N]
# convert to real if input was real
if np.isrealobj(chebcoeff):
values = np.real(complex_values)
else:
values = complex_values
return values
@classmethod
def interpolator(self, x, values):
"""Returns a polynomial with vector coefficients which interpolates the
values at the Chebyshev points x."""
# hacking the barycentric interpolator by computing the weights in advance
from scipy.interpolate import BarycentricInterpolator as Bary
p = Bary([0.])
N = len(values)
weights = np.ones(N)
weights[0] = .5
weights[1::2] = -1
weights[-1] *= .5
p.wi = weights
p.xi = x
p.set_yi(values)
return p
# ----------------------------------------------------------------
# Helper for differentiation.
# ----------------------------------------------------------------
@classmethod
def differentiator(self, A):
"""Differentiate a set of Chebyshev polynomial expansion coefficients
Originally from http://www.scientificpython.net/pyblog/chebyshev-
differentiation.
+ (lots of) bug fixing + pythonisation
"""
m = len(A)
SA = (A.T* 2*np.arange(m)).T
DA = np.zeros_like(A)
if m == 1: # constant
return np.zeros_like(A[0:1])
if m == 2: # linear
return A[1:2,]
DA[m-3:m-1,] = SA[m-2:m,]
for j in range(m//2 - 1):
k = m-3-2*j
DA[k] = SA[k+1] + DA[k+2]
DA[k-1] = SA[k] + DA[k+1]
DA[0] = (SA[1] + DA[2])*0.5
return DA
# ----------------------------------------------------------------
# General utilities
# ----------------------------------------------------------------
def even_data(data):
"""
Construct Extended Data Vector (equivalent to creating an
even extension of the original function)
Return: array of length 2(N-1)
For instance, [0,1,2,3,4] --> [0,1,2,3,4,3,2,1]
"""
return np.concatenate([data, data[-2:0:-1]],)
def dct(data):
"""Compute DCT using FFT."""
N = len(data)//2
fftdata = fftpack_fft(data, axis=0)[:N+1]
fftdata /= N
fftdata[0] /= 2.
fftdata[-1] /= 2.
if np.isrealobj(data):
data = np.real(fftdata)
else:
data = fftdata
return data
# ----------------------------------------------------------------
# Add overloaded operators
# ----------------------------------------------------------------
def _add_operator(cls, op):
def method(self, other):
if not self.same_domain(other):
raise self.DomainMismatch(self.domain(), other.domain())
return self.from_function(
lambda x: op(self(x).T, other(x).T).T, domain=self.domain(), )
cast_method = cast_scalar(method)
name = '__'+op.__name__+'__'
cast_method.__name__ = name
cast_method.__doc__ = "operator {}".format(name)
setattr(cls, name, cast_method)
def rdiv(a, b):
return b/a
for _op in [operator.mul, operator.truediv, operator.pow, rdiv]:
_add_operator(Polyfun, _op)
# ----------------------------------------------------------------
# Add numpy ufunc delegates
# ----------------------------------------------------------------
def _add_delegate(ufunc, nonlinear=True):
def method(self):
return self.from_function(lambda x: ufunc(self(x)), domain=self.domain())
name = ufunc.__name__
method.__name__ = name
method.__doc__ = "delegate for numpy's ufunc {}".format(name)
setattr(Polyfun, name, method)
# Following list generated from:
# https://github.com/numpy/numpy/blob/master/numpy/core/code_generators/generate_umath.py
for func in [np.arccos, np.arccosh, np.arcsin, np.arcsinh, np.arctan, np.arctanh, np.cos, np.sin, np.tan, np.cosh, np.sinh, np.tanh, np.exp, np.exp2, np.expm1, np.log, np.log2, np.log1p, np.sqrt, np.ceil, np.trunc, np.fabs, np.floor, ]:
_add_delegate(func)
# ----------------------------------------------------------------
# General Aliases
# ----------------------------------------------------------------
## chebpts = interpolation_points
# ----------------------------------------------------------------
# Constructor inspired by the Matlab version
# ----------------------------------------------------------------
def chebfun(f=None, domain=[-1,1], N=None, chebcoeff=None,):
"""Create a Chebyshev polynomial approximation of the function $f$ on the
interval :math:`[-1, 1]`.
:param callable f: Python, Numpy, or Sage function
:param int N: (default = None) specify number of interpolating points
:param np.array chebcoeff: (default = np.array(0)) specify the coefficients
"""
# Chebyshev coefficients
if chebcoeff is not None:
return Chebfun.from_coeff(chebcoeff, domain)
# another instance
if isinstance(f, Polyfun):
return Chebfun.from_fun(f)
# callable
if hasattr(f, '__call__'):
return Chebfun.from_function(f, domain, N)
# from here on, assume that f is None, or iterable
if np.isscalar(f):
f = [f]
try:
iter(f) # interpolation values provided
except TypeError:
pass
else:
return Chebfun(f, domain)
raise TypeError('Impossible to initialise the object from an object of type {}'.format(type(f)))
|
Ransomed Roads: Poll Results (Fave US Parts) & New Poll Announced!
1st - The West Coast at 47% with 8 out of 17!
2nd - This was a 3-way tie between New England, the Rocky Mountain area, and Hawaii. They each took 6 votes (35%).
3rd - The Southwest came in right behind with 5 votes (29%).
All other choices were under the 25% mark, with Alaska coming in dead last with only one vote.
So, what does this mean? Well, that's where the data takes a bit of a turn. After all, what states constitute each of the areas listed in the poll? I realized that I hadn't gone and specifically researched that beforehand, and just did a little searching.
What I found is that no one really knows. For each area, there is a great deal of overlap. The Southwest sometimes often includes Texas, which makes sense, though Texas could also be claimed by the Deep South. The Southwest then sometimes goes farther than Arizona and New Mexico (which is what I thought of), and claims parts of Nevada, Utah, Colorado, and Oklahoma. To complicate matters more, The Rocky Mountains claims not only Northern Rockies states, but also the aforementioned (excluding Oklahoma). And don't get me started on what is considered "Mid"West.
This month, I'm trying to focus on some geographical and physical aspects of traveling on this rock we call Earth. At times, it will be about boundaries (like last week), climate, and (as with this week) types of terrain.
I find that we are all drawn to different types of places. I could name each of these as equally interesting, and each for their own reason. For that very reason, I made this one a "choose only one" poll, forcing us all to dig deep and see what really drives us. I expect it will be painful. I'm a Pisces, after all.
I did a bit more research on this one, as well, to go beyond what my brain initially thought of as "terrain." Although not listed under that heading, I reminded myself that cities and towns can also be a draw for people. That, of course, opens up the question of what constitutes proper terrain, topography, etc., and now we're down the rabbit hole. So feel free to comment below or on Facebook. Let's get a discussion going, and straighten out this conundrum once and for all...or at least leave it firmly crooked.
Let me know if you have any more troubles; I'm not trying to make this difficult for you...or am I?
*NOTE: I realized as I was posting, that swamps were left out and decided to put that under the category of "Wetlands" with Rainforests.
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import pipes
import subprocess
import sys
import time
from jinja2.runtime import Undefined
from six import iteritems
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure
from ansible.playbook.conditional import Conditional
from ansible.playbook.task import Task
from ansible.template import Templar
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.unicode import to_unicode
from ansible.vars.unsafe_proxy import UnsafeProxy
from ansible.utils.debug import debug
__all__ = ['TaskExecutor']
class TaskExecutor:
'''
This is the main worker class for the executor pipeline, which
handles loading an action plugin to actually dispatch the task to
a given host. This class roughly corresponds to the old Runner()
class.
'''
# Modules that we optimize by squashing loop items into a single call to
# the module
SQUASH_ACTIONS = frozenset(C.DEFAULT_SQUASH_ACTIONS)
def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj):
self._host = host
self._task = task
self._job_vars = job_vars
self._play_context = play_context
self._new_stdin = new_stdin
self._loader = loader
self._shared_loader_obj = shared_loader_obj
try:
from __main__ import display
self._display = display
except ImportError:
from ansible.utils.display import Display
self._display = Display()
def run(self):
'''
The main executor entrypoint, where we determine if the specified
task requires looping and either runs the task with
'''
debug("in run()")
try:
# lookup plugins need to know if this task is executing from
# a role, so that it can properly find files/templates/etc.
roledir = None
if self._task._role:
roledir = self._task._role._role_path
self._job_vars['roledir'] = roledir
items = self._get_loop_items()
if items is not None:
if len(items) > 0:
item_results = self._run_loop(items)
# loop through the item results, and remember the changed/failed
# result flags based on any item there.
changed = False
failed = False
for item in item_results:
if 'changed' in item and item['changed']:
changed = True
if 'failed' in item and item['failed']:
failed = True
# create the overall result item, and set the changed/failed
# flags there to reflect the overall result of the loop
res = dict(results=item_results)
if changed:
res['changed'] = True
if failed:
res['failed'] = True
res['msg'] = 'One or more items failed'
else:
res['msg'] = 'All items completed'
else:
res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
else:
debug("calling self._execute()")
res = self._execute()
debug("_execute() done")
# make sure changed is set in the result, if it's not present
if 'changed' not in res:
res['changed'] = False
def _clean_res(res):
if isinstance(res, dict):
for k in res.keys():
res[k] = _clean_res(res[k])
elif isinstance(res, list):
for idx,item in enumerate(res):
res[idx] = _clean_res(item)
elif isinstance(res, UnsafeProxy):
return res._obj
return res
debug("dumping result to json")
res = _clean_res(res)
debug("done dumping result, returning")
return res
except AnsibleError as e:
return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr'))
finally:
try:
self._connection.close()
except AttributeError:
pass
except Exception as e:
debug("error closing connection: %s" % to_unicode(e))
def _get_loop_items(self):
'''
Loads a lookup plugin to handle the with_* portion of a task (if specified),
and returns the items result.
'''
# create a copy of the job vars here so that we can modify
# them temporarily without changing them too early for other
# parts of the code that might still need a pristine version
vars_copy = self._job_vars.copy()
# now we update them with the play context vars
self._play_context.update_vars(vars_copy)
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=vars_copy)
items = None
if self._task.loop:
if self._task.loop in self._shared_loader_obj.lookup_loader:
#TODO: remove convert_bare true and deprecate this in with_
try:
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=True)
except AnsibleUndefinedVariable as e:
if 'has no attribute' in str(e):
loop_terms = []
self._display.deprecated("Skipping task due to undefined attribute, in the future this will be a fatal error.")
else:
raise
items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar).run(terms=loop_terms, variables=vars_copy)
else:
raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop)
if items:
from ansible.vars.unsafe_proxy import UnsafeProxy
for idx, item in enumerate(items):
if item is not None and not isinstance(item, UnsafeProxy):
items[idx] = UnsafeProxy(item)
return items
def _run_loop(self, items):
'''
Runs the task with the loop items specified and collates the result
into an array named 'results' which is inserted into the final result
along with the item for which the loop ran.
'''
results = []
# make copies of the job vars and task so we can add the item to
# the variables and re-validate the task with the item variable
task_vars = self._job_vars.copy()
items = self._squash_items(items, task_vars)
for item in items:
task_vars['item'] = item
try:
tmp_task = self._task.copy()
tmp_play_context = self._play_context.copy()
except AnsibleParserError as e:
results.append(dict(failed=True, msg=str(e)))
continue
# now we swap the internal task and play context with their copies,
# execute, and swap them back so we can do the next iteration cleanly
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
res = self._execute(variables=task_vars)
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
# now update the result with the item info, and append the result
# to the list of results
res['item'] = item
results.append(res)
return results
def _squash_items(self, items, variables):
'''
Squash items down to a comma-separated list for certain modules which support it
(typically package management modules).
'''
if len(items) > 0 and self._task.action in self.SQUASH_ACTIONS:
final_items = []
name = self._task.args.pop('name', None) or self._task.args.pop('pkg', None)
for item in items:
variables['item'] = item
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
if self._task.evaluate_conditional(templar, variables):
if templar._contains_vars(name):
new_item = templar.template(name)
final_items.append(new_item)
else:
final_items.append(item)
joined_items = ",".join(final_items)
self._task.args['name'] = joined_items
return [joined_items]
else:
return items
def _execute(self, variables=None):
'''
The primary workhorse of the executor system, this runs the task
on the specified host (which may be the delegated_to host) and handles
the retry/until and block rescue/always execution
'''
if variables is None:
variables = self._job_vars
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar)
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
# We also add "magic" variables back into the variables dict to make sure
# a certain subset of variables exist.
self._play_context.update_vars(variables)
self._play_context.post_validate(templar=templar)
# Evaluate the conditional (if any) for this task, which we do before running
# the final task post-validation. We do this before the post validation due to
# the fact that the conditional may specify that the task be skipped due to a
# variable not being present which would otherwise cause validation to fail
if not self._task.evaluate_conditional(templar, variables):
debug("when evaulation failed, skipping this task")
return dict(changed=False, skipped=True, skip_reason='Conditional check failed')
# Now we do final validation on the task, which sets all fields to their final values.
# In the case of debug tasks, we save any 'var' params and restore them after validating
# so that variables are not replaced too early.
prev_var = None
if self._task.action == 'debug' and 'var' in self._task.args:
prev_var = self._task.args.pop('var')
original_args = self._task.args.copy()
self._task.post_validate(templar=templar)
if '_variable_params' in self._task.args:
variable_params = self._task.args.pop('_variable_params')
if isinstance(variable_params, dict):
self._display.deprecated("Using variables for task params is unsafe, especially if the variables come from an external source like facts")
variable_params.update(self._task.args)
self._task.args = variable_params
if prev_var is not None:
self._task.args['var'] = prev_var
# if this task is a TaskInclude, we just return now with a success code so the
# main thread can expand the task list for the given host
if self._task.action == 'include':
include_variables = original_args
include_file = include_variables.get('_raw_params')
del include_variables['_raw_params']
return dict(include=include_file, include_variables=include_variables)
# get the connection and the handler for this execution
self._connection = self._get_connection(variables)
self._connection.set_host_overrides(host=self._host)
self._handler = self._get_action_handler(connection=self._connection, templar=templar)
# And filter out any fields which were set to default(omit), and got the omit token value
omit_token = variables.get('omit')
if omit_token is not None:
self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token)
# Read some values from the task, so that we can modify them if need be
retries = self._task.retries
if retries <= 0:
retries = 1
delay = self._task.delay
if delay < 0:
delay = 1
# make a copy of the job vars here, in case we need to update them
# with the registered variable value later on when testing conditions
vars_copy = variables.copy()
debug("starting attempt loop")
result = None
for attempt in range(retries):
if attempt > 0:
# FIXME: this should use the callback/message passing mechanism
print("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result))
result['attempts'] = attempt + 1
debug("running the handler")
try:
result = self._handler.run(task_vars=variables)
except AnsibleConnectionFailure as e:
return dict(unreachable=True, msg=str(e))
debug("handler run complete")
if self._task.async > 0:
# the async_wrapper module returns dumped JSON via its stdout
# response, so we parse it here and replace the result
try:
result = json.loads(result.get('stdout'))
except (TypeError, ValueError) as e:
return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e))
if self._task.poll > 0:
result = self._poll_async_result(result=result, templar=templar)
# update the local copy of vars with the registered value, if specified,
# or any facts which may have been generated by the module execution
if self._task.register:
vars_copy[self._task.register] = result
if 'ansible_facts' in result:
vars_copy.update(result['ansible_facts'])
# create a conditional object to evaluate task conditions
cond = Conditional(loader=self._loader)
def _evaluate_changed_when_result(result):
if self._task.changed_when is not None:
cond.when = [ self._task.changed_when ]
result['changed'] = cond.evaluate_conditional(templar, vars_copy)
def _evaluate_failed_when_result(result):
if self._task.failed_when is not None:
cond.when = [ self._task.failed_when ]
failed_when_result = cond.evaluate_conditional(templar, vars_copy)
result['failed_when_result'] = result['failed'] = failed_when_result
return failed_when_result
return False
if self._task.until:
cond.when = self._task.until
if cond.evaluate_conditional(templar, vars_copy):
_evaluate_changed_when_result(result)
_evaluate_failed_when_result(result)
break
elif (self._task.changed_when is not None or self._task.failed_when is not None) and 'skipped' not in result:
_evaluate_changed_when_result(result)
if _evaluate_failed_when_result(result):
break
elif 'failed' not in result:
if result.get('rc', 0) != 0:
result['failed'] = True
else:
# if the result is not failed, stop trying
break
if attempt < retries - 1:
time.sleep(delay)
else:
_evaluate_changed_when_result(result)
_evaluate_failed_when_result(result)
# do the final update of the local variables here, for both registered
# values and any facts which may have been created
if self._task.register:
variables[self._task.register] = result
if 'ansible_facts' in result:
variables.update(result['ansible_facts'])
# save the notification target in the result, if it was specified, as
# this task may be running in a loop in which case the notification
# may be item-specific, ie. "notify: service {{item}}"
if self._task.notify is not None:
result['_ansible_notify'] = self._task.notify
# and return
debug("attempt loop complete, returning result")
return result
def _poll_async_result(self, result, templar):
'''
Polls for the specified JID to be complete
'''
async_jid = result.get('ansible_job_id')
if async_jid is None:
return dict(failed=True, msg="No job id was returned by the async task")
# Create a new psuedo-task to run the async_status module, and run
# that (with a sleep for "poll" seconds between each retry) until the
# async time limit is exceeded.
async_task = Task().load(dict(action='async_status jid=%s' % async_jid))
# Because this is an async task, the action handler is async. However,
# we need the 'normal' action handler for the status check, so get it
# now via the action_loader
normal_handler = self._shared_loader_obj.action_loader.get(
'normal',
task=async_task,
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
templar=templar,
shared_loader_obj=self._shared_loader_obj,
)
time_left = self._task.async
while time_left > 0:
time.sleep(self._task.poll)
async_result = normal_handler.run()
if int(async_result.get('finished', 0)) == 1 or 'failed' in async_result or 'skipped' in async_result:
break
time_left -= self._task.poll
if int(async_result.get('finished', 0)) != 1:
return dict(failed=True, msg="async task did not complete within the requested time")
else:
return async_result
def _get_connection(self, variables):
'''
Reads the connection property for the host, and returns the
correct connection object from the list of connection plugins
'''
# FIXME: calculation of connection params/auth stuff should be done here
if not self._play_context.remote_addr:
self._play_context.remote_addr = self._host.address
if self._task.delegate_to is not None:
# since we're delegating, we don't want to use interpreter values
# which would have been set for the original target host
for i in variables.keys():
if i.startswith('ansible_') and i.endswith('_interpreter'):
del variables[i]
# now replace the interpreter values with those that may have come
# from the delegated-to host
delegated_vars = variables.get('ansible_delegated_vars', dict())
if isinstance(delegated_vars, dict):
for i in delegated_vars:
if i.startswith("ansible_") and i.endswith("_interpreter"):
variables[i] = delegated_vars[i]
conn_type = self._play_context.connection
if conn_type == 'smart':
conn_type = 'ssh'
if sys.platform.startswith('darwin') and self._play_context.password:
# due to a current bug in sshpass on OSX, which can trigger
# a kernel panic even for non-privileged users, we revert to
# paramiko on that OS when a SSH password is specified
conn_type = "paramiko"
else:
# see if SSH can support ControlPersist if not use paramiko
try:
cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
if "Bad configuration option" in err or "Usage:" in err:
conn_type = "paramiko"
except OSError:
conn_type = "paramiko"
connection = self._shared_loader_obj.connection_loader.get(conn_type, self._play_context, self._new_stdin)
if not connection:
raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
return connection
def _get_action_handler(self, connection, templar):
'''
Returns the correct action plugin to handle the requestion task action
'''
if self._task.action in self._shared_loader_obj.action_loader:
if self._task.async != 0:
raise AnsibleError("async mode is not supported with the %s module" % self._task.action)
handler_name = self._task.action
elif self._task.async == 0:
handler_name = 'normal'
else:
handler_name = 'async'
handler = self._shared_loader_obj.action_loader.get(
handler_name,
task=self._task,
connection=connection,
play_context=self._play_context,
loader=self._loader,
templar=templar,
shared_loader_obj=self._shared_loader_obj,
)
if not handler:
raise AnsibleError("the handler '%s' was not found" % handler_name)
return handler
|
Welcome to My Cruise Blog from the Cruise Village. Here you will find the latest Cruise News and low cost Cruise Deals from all cruise lines! Call free to book any cruise on 0800 810 8229 or visit www.thecruisevillage.com for our latest cruise deals!
The following information has been provided by Holland America Line.
Earlier this week Holland America Line announced the name for its new Pinnacle Class Ship which is scheduled for delivery February 2016; ms Konningsdam. While the name is in honor of Holland America Line's rich heritage and deep roots in the Netherlands, it also reflects a new era with a bold name that is an original in Holland America Lines 141-year history.
Since 1883 every Holland America passenger vessel has borne the “dam” suffix, and ms Koningsdam carries on that tradition. The word koning means “king” in Dutch, and the name celebrates the majestic new ship. The name also pays honor to His Majesty King Willem-Alexander, the first king of the Netherlands in over a century.
When it debuts in February 2016, ms Koningsdam will be a new type of ship for Holland America Line. At 99,500 gross tons and carrying 2,650 guests and 1,025 crew members, the vessel is the largest ever built for the company. The increased size provides more opportunities to add new public spaces and venues, and several innovative features will debut on ms Koningsdam. Familiar spaces and amenities currently featured across the Holland America Line fleet will also be found on the new ship.
To bring a fresh vision to ms Koningsdam, Holland America Line tapped Adam D. Tihany, one of the world’s pre-eminent hospitality designers. Tihany joins distinguished maritime designer and architect Bjorn Storbraaten who worked with Holland America Line on ms Nieuw Amsterdam and ms Eurodam. The two firms will create a modern and contemporary ambiance while incorporating Holland America Line’s classic hallmarks.
Visit The Cruise Village today for our latest cruise deals.
The Cruise Village Cruise News Blog. Powered by Blogger.
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import select
from contextlib import contextmanager
from pants.pantsd.pailgun_server import PailgunServer
from pants.pantsd.service.pants_service import PantsService
class PailgunService(PantsService):
"""A service that runs the Pailgun server."""
def __init__(self, bind_addr, runner_class, scheduler_service):
"""
:param tuple bind_addr: The (hostname, port) tuple to bind the Pailgun server to.
:param class runner_class: The `PantsRunner` class to be used for Pailgun runs.
:param SchedulerService scheduler_service: The SchedulerService instance for access to the
resident scheduler.
"""
super(PailgunService, self).__init__()
self._bind_addr = bind_addr
self._runner_class = runner_class
self._scheduler_service = scheduler_service
self._logger = logging.getLogger(__name__)
self._pailgun = None
@property
def pailgun(self):
if not self._pailgun:
self._pailgun = self._setup_pailgun()
return self._pailgun
@property
def pailgun_port(self):
return self.pailgun.server_port
def _setup_pailgun(self):
"""Sets up a PailgunServer instance."""
# Constructs and returns a runnable PantsRunner.
def runner_factory(sock, arguments, environment):
return self._runner_class.create(
sock,
arguments,
environment,
self.fork_lock,
self._scheduler_service
)
# Plumb the daemon's lifecycle lock to the `PailgunServer` to safeguard teardown.
@contextmanager
def lifecycle_lock():
with self.lifecycle_lock:
yield
return PailgunServer(self._bind_addr, runner_factory, lifecycle_lock)
def run(self):
"""Main service entrypoint. Called via Thread.start() via PantsDaemon.run()."""
self._logger.info('starting pailgun server on port {}'.format(self.pailgun_port))
try:
# Manually call handle_request() in a loop vs serve_forever() for interruptability.
while not self.is_killed:
self.pailgun.handle_request()
except select.error:
# SocketServer can throw `error: (9, 'Bad file descriptor')` on teardown. Ignore it.
self._logger.warning('pailgun service shutting down')
def terminate(self):
"""Override of PantsService.terminate() that cleans up when the Pailgun server is terminated."""
# Tear down the Pailgun TCPServer.
if self.pailgun:
self.pailgun.server_close()
super(PailgunService, self).terminate()
|
CCT Original – In her family’s new home, a young girl is surrounded by boxes containing all of her belongings. She searches for her favorite toy, but instead finds an imaginary friend! This new friend shows her how to turn ordinary into extraordinary. Backed with whimsical music and magical sound effects, this Theatre for the Very Young show follows the story of two friends who transform the piles of boxes into an adventure-filled playground!
|
import flask
import jsonschema
import pantry.common.pantry_error as perror
import pantry.v1.backend as backend
targets_blueprint = flask.Blueprint("targets", __name__)
@targets_blueprint.route('/targets/', methods=['GET'])
def list_targets():
return flask.jsonify(
{"targets": backend.get_targets(flask.request.args)})
@targets_blueprint.route('/targets/<int:target_id>/', methods=['GET'])
def get_target(target_id):
result = backend.get_target(target_id, flask.request.args)
if not result:
raise perror.PantryError(
"Could not find target with id {}".format(target_id),
status_code=404)
print(result)
return flask.jsonify(result)
@targets_blueprint.route('/targets/', methods=['POST'])
def create_target():
json_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"hostname": {
"type": "string"
},
"nickname": {
"type": "string"
},
"description": {
"type": "string"
},
"maintainer": {
"type": "string"
},
"healthPercent": {
"type": "number"
},
"active": {
"type": "boolean"
},
"tags": {
"type": "array"
}
},
"required": [
"hostname",
"description",
"maintainer"
]
}
content = flask.request.get_json(force=True)
# validate the provided json
try:
jsonschema.validate(content, json_schema)
except jsonschema.ValidationError as e:
raise perror.PantryError("invalid data: {}".format(e.message),
status_code=400)
# construct response with correct location header
target_id = backend.create_target(content)
r = flask.jsonify(backend.get_target(target_id))
r.headers['Location'] = "/targets/{}".format(target_id)
r.status_code = 201
return r
@targets_blueprint.route('/targets/<int:target_id>/', methods=['DELETE'])
def delete_target(target_id):
result = backend.delete_target(target_id)
if not result:
raise perror.PantryError(
f"Could not find target with id {target_id}",
status_code=404)
return "", 204
|
how do i clean my front load washer how to clean your washing machine how do i clean my kenmore front load washer.
how do i clean my front load washer the filter is located at the bottom right side in the front of the washing how do i clean my frigidaire front load washer.
how do i clean my front load washer affresh washer machine cleaner 6 tablets 84 oz how do i clean my front load washer with bleach.
how do i clean my front load washer how to clean your washing machine cleaning the inside of front or top loading washing machine how do i clean my front load washer with mold on it.
how do i clean my front load washer great tutorial on how to clean your washing machine and get rid of that washing machine how do i clean my front load washer with bleach.
how do i clean my front load washer awesome tutorial on how to clean your washing machine a must read how do i clean my ge front load washer.
how do i clean my front load washer how to naturally clean any washing machine via clean mama how do i clean my he front load washer.
how do i clean my front load washer how much water should a washing machine use front loader how do i clean my maytag front load washer.
how do i clean my front load washer front load washer and clothes dryer in laundry room how do i clean my frigidaire front load washer.
how do i clean my front load washer ultra large smart wi fi enabled front load washer how do i clean my frigidaire front load washer.
how do i clean my front load washer how to clean your front loader washing machine how do i clean my samsung front load washer.
how do i clean my front load washer front load washing machine how do i clean my frigidaire front load washer.
how do i clean my front load washer image titled clean a front load washer step 6 how do i clean my samsung front load washer.
how do i clean my front load washer he washer freshness cleaning window how do i clean my kenmore front load washer.
how do i clean my front load washer ultra large smart wi fi enabled front load washer how do i clean my front load washer.
how do i clean my front load washer zanussi washing machine zwf16581wjpg clean mop heads in the washer lg cu ft cycle frontloading smart wifi washer with lg cu ft cycle frontloading smart wi.
how do i clean my front load washer how to clean your front loader washing machine how do i clean my frigidaire front load washer.
how do i clean my front load washer front loading washer repairs how do i clean my samsung front load washer.
how do i clean my front load washer how to clean front load washer door gasket how do i clean my front load washer with bleach.
how do i clean my front load washer front loading washers have a tendency of pooling water at the bottom of their drums which can be a breeding ground bacteria and mildew how do i clean my lg front load washer.
how do i clean my front load washer how to clean front load washer how do i clean my samsung front load washer.
how do i clean my front load washer how to clean washing machine top loading how do i clean my frigidaire front load washer.
how do i clean my front load washer how to clean your washing machine how do i clean my samsung front load washer.
how do i clean my front load washer how do i clean my lg front load washer.
how do i clean my front load washer how do i clean my front load washer with bleach.
how do i clean my front load washer how to clean your front loader washing machine how do i clean my front load washer door seal.
how do i clean my front load washer how to maintain a clean front load washer how do i clean my maytag front load washer.
how do i clean my front load washer how to wash pillows in a front load washing machine ask anna how do i clean my kenmore front load washer.
how do i clean my front load washer front load washing machine how do i clean my front load washer.
how do i clean my front load washer got a front loader you need to clean it how do i clean my front load washer door seal.
|
# -*- coding: utf-8 -*-
from eventlet.greenthread import sleep
from eventlet.green import urllib2
import simplejson as json
from collections import defaultdict
import urllib
from kral.utils import fetch_json
def stream(queries, queue, settings, kral_start_time):
api_url = "http://www.reddit.com/search.json?"
prev_items = defaultdict(list)
user_agent = settings.get('DEFAULT', 'user_agent', '')
while True:
for query in queries:
p = {
'q' : query,
'sort' : settings.get('Reddit', 'orderby', 'relevance'),
}
url = api_url + urllib.urlencode(p)
request = urllib2.Request(url)
if user_agent:
request.add_header('User-agent', user_agent)
response = fetch_json(request)
if not response:
sleep(5)
break
if 'data' in response and 'children' in response['data']:
#api returns back 25 items
for item in response['data']['children']:
item_id = item['data']['id']
#if we've seen this item in the last 50 items skip it
if item_id not in prev_items[query]:
post = {
'service' : 'reddit',
'query' : query,
'user' : {
'name' : item['data']['author'],
},
'id' : item_id,
'date' : item['data']['created_utc'],
'text' : item['data']['title'],
'source' : item['data']['url'],
'likes': item['data'].get('likes', 0),
'dislikes': item['data'].get('downs', 0),
'comments': item['data'].get('num_comments', 0),
'favorites': item['data'].get('saved', 0),
}
queue.put(post)
prev_items[query].append(item_id)
#keep dupe buffer 50 items long
#TODO: look into using deque with maxlength
prev_items[query] = prev_items[query][:50]
sleep(30)
|
Thrilled to announce I’m setting up shop @spoonfed.la this Friday, April 12th from 10am - until 6pm.
Check out our latest spring / summer fashion accessories, learn more about Accoutrements LA or just say Hi while having delicious food!
Spoonfed LA was just chosen by @eater_laas one hottest places to have brunch in Los Angeles.
The food is amazing, they serve all day brunch menu (did I mention peanut butter stuffed French toast!) and you won’t find a better garden setting in #Hollywood than this one!
Can’t wait to see you and if you’re going to #coachella2019 then you must grab one of our many straw bags!
|
from pymongo import MongoClient
from datetime import datetime
mongodb_url = 'mongodb://192.168.0.30:27017/'
mongodb_url = 'mongodb://127.0.0.1:27017/'
client = MongoClient(mongodb_url)
db = client['web_jobs_server']
db = client['test_web_jobs_server']
print "** DB Collections: ", db.collection_names()
collection = db['jobs_settings']
print collection
job_settings = [
#######
{'job_target':'appid_to_asin', 'settings_key':'web_access_interval', 'settings_value':1000},
{'job_target':'appid_to_asin', 'settings_key':'client_job_request_count', 'settings_value':10},
{'job_target':'appid_to_asin', 'settings_key':'connection_try_max', 'settings_value':10},
########
{'job_target':'thingiverse', 'settings_key':'web_access_interval', 'settings_value':1000},
{'job_target':'thingiverse', 'settings_key':'client_job_request_count', 'settings_value':10},
{'job_target':'thingiverse', 'settings_key':'connection_try_max', 'settings_value':10},
########
{'job_target':'topsy_fortuner_tops', 'settings_key':'web_access_interval', 'settings_value':1000},
{'job_target':'topsy_fortuner_tops', 'settings_key':'client_job_request_count', 'settings_value':10},
{'job_target':'topsy_fortuner_tops', 'settings_key':'connection_try_max', 'settings_value':10},
########
{'job_target':'topsy_fortuner_tops_full', 'settings_key':'web_access_interval', 'settings_value':1000},
{'job_target':'topsy_fortuner_tops_full', 'settings_key':'client_job_request_count', 'settings_value':10},
{'job_target':'topsy_fortuner_tops_full', 'settings_key':'connection_try_max', 'settings_value':10},
########
{'job_target':'topsy_fortuner_twibes', 'settings_key':'web_access_interval', 'settings_value':1000},
{'job_target':'topsy_fortuner_twibes', 'settings_key':'client_job_request_count', 'settings_value':10},
{'job_target':'topsy_fortuner_twibes', 'settings_key':'connection_try_max', 'settings_value':10},
########
{'job_target':'appid_asin_pairs', 'settings_key':'web_access_interval', 'settings_value':1000},
{'job_target':'appid_asin_pairs', 'settings_key':'client_job_request_count', 'settings_value':10},
{'job_target':'appid_asin_pairs', 'settings_key':'connection_try_max', 'settings_value':10}
]
def job_settings_init():
for job in job_settings:
job_target = job['job_target']
settings_key = job['settings_key']
settings_value = job['settings_value']
create_date = str(datetime.now())
update_date = str(datetime.now())
job_setting_upsert(job_target, settings_key, settings_value, create_date, update_date)
print "setting: ", job_target, settings_key, settings_value
## insert: only be used for fresh insert, as existing _id would cause duplicate insert and then error
## save: same as _update method, but would create collection if it is not exist
## consider with ejdb does not support custom _id, so I have to use upsert
def job_setting_upsert(job_target, settings_key, settings_value, create_date, update_date):
job = {
"job_target": job_target,
"settings_key":settings_key,
"settings_value": settings_value,
"create_date": create_date,
"update_date": update_date
}
j = db.jobs_settings.update({'job_target': job_target, 'settings_key': settings_key, }, {'$set':job}, upsert=True, multi=True)
print j
if __name__ == "__main__":
print "start"
job_settings_init()
#db.close()
print 'done'
|
When you travel somewhere for an extended stay, you generally have several options as to lodging. The two most common options? You can book a room in a hotel (even an extended stay hotel); or you can stay in a furnished apartment through a corporate housing agency. Among these options, not only is corporate housing usually less expensive than the others — it’s also a safer choice. Let’s explore the other options and see why corporate housing may be safer than a hotel.
Entry to your room: Virtually all hotel staff have access to enter your room — and not all of them are honest.
Sanitation: Cleaning staff sometimes cut corners in sanitation practices, and bedding in particular doesn’t always get as clean as it should.
Predators: Even with secured entries, as a public facility, a hotel often attracts predatory people who wait in the wings to steal or to cause harm.
The apartments are private. A corporate housing agency leases and furnishes actual apartments in real residential complexes, often with secure entrances — much like living in your own home or apartment.
Limited access. The only people with access to your unit are you and the management company — the same as if you lived in apartment.
Carefully vetted for location and amenities. The agency only works with the best residential communities to provide optimal, safe housing.
Whether your extended stay is personal or business-related, you deserve to stay in housing where you feel comfortable and safe. If your travels bring you to Atlanta or Jacksonville, give TP Corporate Lodging a call at 800.428.9997.
For business and leisure travel of any length, many people are now turning to the Airbnb platform as an alternative to hotel websites and travel platforms like Expedia. The idea of living temporarily in a fully furnished home or apartment versus a stale hotel room appeals to these travelers because they can have something like a home-away-from-home, usually for less money. A growing number of agencies are even starting to use the platform to place listings for high-quality business rentals.
The challenge is that Airbnb hosts all sorts of accommodations, including many privately-owned facilities with little regulation or vetting. How does one separate the professionally managed properties from the informal arrangements with very little regulatory oversight? How do you secure reliable temporary housing while avoiding the more questionable listings? Let’s discuss how you can locate and identify a certified business rental through Airbnb.
When you’re looking at places on Airbnb, don’t just look at the location and photos — they may not be accurate to the unit, anyway. Rather, look at the person or agency listing the property. How many reviews do they have? Are the reviews mostly positive? What negative feedback, if any, has been left? How accessible is the owner or property manager for questions or problems? You can often tell even by the language whether you’re dealing with an individual renting out their extra space, or a professional company that offers corporate-style housing.
In an attempt to answer some of the complaints from people who have had negative experience with Airbnb listings, the platform has begun a new category called Airbnb Plus — a verified service in which the unit is professionally inspected before rental to ensure it meets top-tier standards. This new category may cost a bit more per night on average, but many travelers find it offers greater peace of mind. As Airbnb opens its platforms to more and more boutique hotels and lodging companies, you can expect to find many of their listings under the Plus Verified category.
If you don’t care who rents the unit as long as it’s top-notch, the next best thing to do with Airbnb is to look for those listings rented by “Superhosts” — those whom the platform has identified as offering superior service and generating stellar ratings over an extended period of time. This category also provides a greater level of confidence among travelers that they are working with someone whom Airbnb recognizes and trusts more than most.
Whether you book through Airbnb or some other platform, if you’re traveling on business and need comfortable lodging for an extended stay in Atlanta or Jacksonville, TP Corporate offers a variety of fully-furnished luxury apartments with plenty of amenities and all the comforts of home — for remarkably affordable rates. To learn more, call us at 800.428.9997.
When you travel on business, especially if you’re away from home for weeks at a time, you probably already know staying healthy can be a challenge. Not only can the disruption in your schedule cause you to cut corners on healthy practices, but at this time of year during a particularly dangerous flu season, you definitely want to keep your immune system at its best. Thankfully, you can boost your health by taking a few proactive steps. Here are five common-sense tips for staying healthy on an extended business trip.
Yes, you may be under pressure to meet deadlines or to make your business trip as productive as possible, but if you get sick, all of that goes out the window. Sleep helps your body reset, restores energy and stamina, and boosts your immune system. Know when to call it a day, and maybe skip the karaoke bar with your colleagues and go to bed early when you’re tired.
Eating healthy can be a challenge during business trips, but it can be done. Try to grab fruits and veggies for snacks instead of reaching for the candy bar. If you must eat on the go, skip the hot dog stand and find a to-go salad bar. Better yet, take 30 minutes for yourself and sit down for a healthy meal. You’ll have more energy and more stamina if you do.
On a busy trip, who has time to work out? You do, if you make time for it. Avail yourself of your hotel’s fitness center, or ask your work place if they offer access to a gym close by. Even an occasional brisk walk to clear your head will help.
When you travel to a new city, you likely have less resistance to the specific viruses that breed there. Since most illnesses are transmitted through touching, make a point of washing hands regularly, especially after touching public surfaces and before you eat.
Many of the points listed above (especially eat, sleep and exercise) can be accomplished more easily if you set up a daily routine for yourself while on your extended trip. It may be challenging, and sometimes work demands may disrupt it; if so, just return to your routine whenever possible. It makes healthy habits easier to maintain.
6. Consider short-term corporate housing over hotel stays.
Another tip that makes healthy habits easier is to stay in a furnished corporate apartment during your extended business trip, rather than a hotel. Not only is it usually a cheaper option, but it provides a home-away-from-home environment that enables you to set up healthy routines more easily. You can prepare healthy food in your fully equipped kitchen rather than heading to the restaurant, and many corporate apartments include gym access for exercise. Best of all, you have a private retreat where you can truly relax and get the rest you need.
If your business travels bring you to Atlanta or Jacksonville, TP Corporate Lodging can put you in a fully furnished apartment with plenty of amenities and conveniences to make your extended stay as comfortable and restful as possible. To learn more, call us at 800.428.9997.
When your company requires you to send employees to out-of-town locations for extended stays, you have numerous options available. You have the standard hotel stay, which can be convenient but quite costly (and not very comfortable for extended periods of time). You also have the option nowadays of utilizing platforms like Airbnb to provide more home-like environments — but due to quality control inconsistencies, these platforms require lots of research and a little luck to find the right place. However, when you work with a corporate housing company, a lot of these problems and risks are eliminated for you. Let’s look at 5 specific benefits your company can receive by using a certified corporate housing provider.
One of the first and most important benefits of working with a corporate housing agency is consistency and quality in the housing itself. It’s the provider’s job to make sure all its units meet certain quality standards — not yours — and when you find a provider you trust, you can leave the details the agency’s hands knowing your employees will be comfortable.
When you choose corporate housing, your team members stay in fully furnished apartments with full kitchen and all the basics of a home-away-from-home — not to mention possible amenities on-site they might not even get at home (such as fitness facilities, pool, recreation areas, etc.). This homelike environment makes it easier for your employees to get the rest they need, meaning they come to work fresh and more productive.
Booking individual rooms in hotels or Airbnb for employees can quickly escalate into a full-time job, taking away quality productive time from other tasks. Working with a certified agency enables you to work with one central provider who handles the booking details for all your employees, freeing up valuable time.
Corporate housing is consistently much cheaper than putting up your employees in a hotel, especially considering the employee gets to stay in a fully furnished apartment rather than a small room or suite.
In most cases, temporary corporate housing for your employees counts as a business expense, which you can easily track and write off on taxes.
One of the best reasons to work with a certified corporate housing provider is that once you’ve found one you trust, you can leave the details to them! You can rest easy knowing that your employees will have comfortable and convenient accommodations so that you can turn your attention to the work at hand.
If you’re considering whether to choose corporate housing or Airbnb, we recommend giving us a try. To see what we have available in Atlanta or Jacksonville, call TP Corporate Lodging today at 800.428.9997.
When you need to accommodate a unit of military personnel on a temporary basis (e.g., for training exercises), finding affordable yet comfortable short-term housing can be a challenge. Not only is there the issue of finding lodging that meets government rules, regulations and standards, but typical short-term solutions like hotels or Airbnb can be both inconsistent and expensive. In many cases, a better solution is to work with a short-term housing partner, like a corporate housing agency, who can provide fully furnished apartments to suit the needs of your personnel. Here are some tips for choosing such a partner.
When partnering with a housing agency, it’s important to select one that operates in that local area. If you need to bring a group to Jacksonville for training, for example, you want a company that is local to Jacksonville. The reason for this is that finding appropriate housing is not just about the accommodations themselves; it’s also about location. A local agency will have a better working knowledge of neighborhoods convenient to local bases, as well as a thorough understanding of traffic patterns in the city, so personnel spend more time being productive and less time in transit.
Since housing requirements for military personnel and other government employees are subject to a specific set of rules and regulations, you should look for a short-term housing company that is familiar with these requirements and can arrange enough accommodations to meet them. This step alone could save a lot of time and money that could otherwise be wasted.
The more familiar an agency is with the workings of the military, the more efficiently they will work to meet your unit’s short-term housing needs. Arranging housing for a large group of people can be stressful enough as it is, and the last thing you need is to have to instruct the agency about your processes and procedures. A good military partner will already know the ropes so the process of procuring housing can occur seamlessly and stress-free.
If you need to find temporary lodging for military personnel in Jacksonville or Atlanta, TP Corporate Lodging would love to be your short-term housing partner. We have a deep understanding of these local areas, and we are well versed in the ins and outs of military housing protocols. We can provide high-quality, fully furnished apartments that will help your people be more comfortable, and therefore more productive. For more information, call us at 800.428.9997.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import gobject
import gtk
import action,pastebin
import gettext,os
gettext.textdomain('inforevealer')
_ = gettext.gettext
__version__="0.5.1"
pixbuf=None
for icon_file in ['/usr/share/icons/hicolor/scalable/apps/inforevealer.svg','icons/inforevealer.svg']:
if os.path.isfile(icon_file):
pixbuf = gtk.gdk.pixbuf_new_from_file(icon_file)
ui_info ='''<ui>
<menubar name='MenuBar'>
<menu action='FileMenu'>
<menuitem action='Quit'/>
</menu>
<menu action='HelpMenu'>
<menuitem action='About'/>
</menu>
</menubar>
</ui>'''
class Application(gtk.Window):
def __init__(self, configfile, list_category, parent=None):
self.verbosity=False
self.configfile=configfile
self.check_list=list_category
self.category=None
self.dumpfile='/tmp/inforevealer'
try:
#Create an empty file (to be seen by the GUI)
foo = open(self.dumpfile, 'w')
foo.close()
except IOError:
sys.stderr.write("Error: Cannot open %s" %self.output)
# Create the toplevel window
gtk.Window.__init__(self)
self.set_icon(pixbuf)
try:
self.set_screen(parent.get_screen())
except AttributeError:
self.connect('destroy', lambda *w: gtk.main_quit())
self.set_title("Inforevealer") #FIXME
#self.set_default_size(200, 200)
self.set_position(gtk.WIN_POS_CENTER)
self.set_resizable(False)
merge = gtk.UIManager()
#self.set_data("ui-manager", merge)
merge.insert_action_group(self.__create_action_group(), 0)
#should be added to the top level window so that the Action accelerators can be used by your users
self.add_accel_group(merge.get_accel_group())
# Create Menu
try:
mergeid = merge.add_ui_from_string(ui_info)
except gobject.GError, msg:
print("building menus failed: %s" % msg)
bar = merge.get_widget("/MenuBar")
# Create TABLE
mainbox = gtk.VBox(False, 0)
self.add(mainbox)
#Add Menu into TABLE
mainbox.pack_start(bar, False, False, 0)
bar.show()
box1 = gtk.VBox(False, 4)
box1.set_border_width(10)
mainbox.pack_start(box1, False, False, 0)
#Add info
label = gtk.Label();
label.set_markup(_("Select one of the following category:"))
box1.pack_start(label, False, False, 0)
self.__create_radio_buttons(box1)
separator = gtk.HSeparator()
box1.pack_start(separator,False,False,0)
self.__create_option_menu(box1)
#buttons (bottom)
# Create TABLE
box2 = gtk.HBox(True, 0)
box1.pack_start(box2, False, False, 0)
#quit
bouton = gtk.Button(stock=gtk.STOCK_CLOSE)
bouton.connect("clicked", self.quit_prog,self, None)
box2.pack_start(bouton, True, True, 0)
bouton.show()
#apply
bouton = gtk.Button(stock=gtk.STOCK_APPLY)
bouton.connect("clicked", self.generate,self, None)
box2.pack_start(bouton, True, True, 0)
bouton.set_flags(gtk.CAN_DEFAULT)
bouton.grab_default()
bouton.show()
box2.show()
box1.show()
self.show_all()
def __create_option_menu(self,box):
frame = gtk.Expander(_("Options"))
box.pack_start(frame, True, True,0)
box2 = gtk.VBox(False, 0)
frame.add(box2)
box2.show()
#VERBOSE MODE
self.verbose_button = gtk.CheckButton(_("Verbose mode: add commands or files producing long output"))
#not connected, read it before using self.verbosity
box2.pack_start(self.verbose_button,True, True, 0)
self.verbose_button.show()
#FILECHOOSER
hbox = gtk.HBox(False, 0)
box2.pack_start(hbox,True, True, 0)
hbox.show()
#Add info
label = gtk.Label();
label.set_markup(_("Dumpfile: "))
hbox.pack_start(label, False, False, 10)
self.label = gtk.Label();
self.label.set_markup(self.dumpfile)
hbox.pack_start(self.label, False, False, 20)
button = gtk.Button(_('Modify'))
button.connect("clicked", self.opendumpfile)
hbox.pack_end(button, False, False, 20)
button.show()
#ENDFILECHOOSER
frame.show()
def opendumpfile(self,w):
""" Open the dumpfile"""
filechooser = FileDialog()
self.dumpfile=filechooser.get_filename(action='save')
if self.dumpfile==None:
self.dumpfile = "/tmp/inforevealer"
self.label.set_text(self.dumpfile)
def __create_radio_buttons(self,box):
""" Create the category list """
first=True
for item in self.check_list:
tmphbox= gtk.HBox(False, 0)
#Radiobutton
if first:
button = gtk.RadioButton(group=None, label=None)
self.category=item
else:
button = gtk.RadioButton(group=button, label=None)
button.connect("toggled", self.callback_radio_buttons, item)
tmphbox.pack_start(button,False,False,0)
#Label
text_label = "<b>"+str(item)+"</b> "+ str(self.check_list[item])
tmplabel= gtk.Label();
tmplabel.set_markup(text_label)
tmphbox.pack_start(tmplabel,False,False,0)
box.pack_start(tmphbox, True, True, 0)
button.show()
first=False
def callback_radio_buttons(self,widget,data=None):
""" Get the selected radio button """
if widget.get_active():
self.category=data
def __create_action_group(self):
""" Create the top menu entry """
# GtkActionEntry
entries = (
( "FileMenu", None, _("File") ), # name, stock id, label
( "HelpMenu", None, _("Help") ), # name, stock id, label
( "Quit", gtk.STOCK_QUIT, # name, stock id
_("Quit"), "", # label, accelerator
"Quit", # tooltip
self.activate_action ),
( "About", gtk.STOCK_ABOUT, # name, stock id
_("About"), "", # label, accelerator
"About", # tooltip
self.activate_about ),
);
# Create the menubar and toolbar
action_group = gtk.ActionGroup("AppWindowActions")
action_group.add_actions(entries)
return action_group
def activate_about(self, action):
""" About dialog """
license="""
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
"""
dialog = gtk.AboutDialog()
dialog.set_logo(pixbuf)
dialog.set_license(license)
dialog.set_name("Inforevealer") #FIXME
dialog.set_copyright("\302\251 Copyright 2010 Francois Boulogne")
dialog.set_website("http://github.com/sciunto/inforevealer")
## Close dialog on user response
dialog.connect ("response", lambda d, r: d.destroy())
dialog.show()
def activate_action(self, action):
dialog = gtk.MessageDialog(self, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_INFO, gtk.BUTTONS_CLOSE,
'You activated action: "%s" of type "%s"' % (action.get_name(), type(action)))
# Close dialog on user response
dialog.connect ("response", lambda d, r: d.destroy())
dialog.show()
def generate(self,widget,evnmt,data=None):
""" Do the work """
tmp_configfile="/tmp/inforevealer_tmp.conf" #tmp configuration file (substitute)
self.verbosity = self.verbose_button.get_active()
action.action(self.category,self.dumpfile,self.configfile,tmp_configfile,self.verbosity,gui=True)
TextViewer(self.dumpfile)#open a new window with the result.
def quit_prog(self,widget,evnmt,data=None):
""" Quit the software """
gtk.main_quit()
class TextViewer:
def change_editable(self, case, textview):
textview.set_editable(case.get_active())
def change_curseur_visible(self, case, textview):
textview.set_cursor_visible(case.get_active())
def quit_prog(self, widget):
self.fenetre.destroy()
#gtk.main_quit()
def __init__(self,output_file):
self.output=output_file
self.fenetre = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.fenetre.set_icon(pixbuf)
self.fenetre.set_resizable(True)
self.fenetre.set_position(gtk.WIN_POS_CENTER)
self.fenetre.set_default_size(600, 400)
self.fenetre.connect("destroy", self.quit_prog)
self.fenetre.set_title("Inforevealer") #FIXME
self.fenetre.set_border_width(0)
boite1 = gtk.VBox(False, 0)
self.fenetre.add(boite1)
boite1.show()
boite2 = gtk.VBox(False, 10)
boite2.set_border_width(10)
boite1.pack_start(boite2, True, True, 0)
boite2.show()
#Add info
label = gtk.Label();
output_string=_("The following report is availlable in %s") %str(self.output)
label.set_markup(output_string)
label.show()
boite2.pack_start(label,False,False,0)
# TEXT BOX
fd = gtk.ScrolledWindow()
fd.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
textview = gtk.TextView()
textview.set_editable(False)
buffertexte = textview.get_buffer()
fd.add(textview)
fd.show()
textview.show()
textview.set_cursor_visible(False)
boite2.pack_start(fd)
#load file
try:
fichier = open(self.output, "r")
self.text = fichier.read()
fichier.close()
buffertexte.set_text(self.text)
except IOError:
sys.stderr.write("Error: Cannot open %s" %self.output)
#END TEXTBOX
# PASTEBIN
boiteH = gtk.HBox(True,0)
boite2.pack_start(boiteH, False, False, 0)
boiteH.show()
label = gtk.Label();
label.set_markup(_("Send the report on pastebin "))
label.show()
boiteH.pack_start(label,True,False,0)
self.pastebin_list = pastebin.preloadPastebins()
self.combobox = gtk.combo_box_new_text()
self.website=list()
boiteH.pack_start(self.combobox, True, False, 0)
for k in self.pastebin_list:
self.combobox.append_text(k)
self.website.append(k)
self.combobox.set_active(0)
self.combobox.show()
bouton = gtk.Button(_("Send"))
bouton.connect("clicked", self.send_pastebin)
bouton.show()
boiteH.pack_start(bouton, True, False, 0)
#END PASTEBIN
# BUTTONS (close...)
boiteH = gtk.HBox(True,0)
boite2.pack_start(boiteH, False, False, 0)
boiteH.show()
bouton = gtk.Button(_("Copy to clipboard"))
bouton.connect("clicked", self.copy_clipboard)
boiteH.pack_start(bouton, False, False, 0)
bouton.show()
bouton = gtk.Button(stock=gtk.STOCK_CLOSE)
bouton.connect("clicked", self.quit_prog)
boiteH.pack_start(bouton, False, False, 0)
bouton.set_flags(gtk.CAN_DEFAULT)
#bouton.grab_default()
bouton.show()
self.fenetre.show()
def copy_clipboard(self,widget):
""" Copy self.text in clipboard """
clipb = gtk.Clipboard()
clipb.set_text(self.text, len=-1)
def send_pastebin(self, widget): #IMPROVEME : Design + clipboard ?
""" Send the content on pastebin """
link = "http://" + self.website[self.combobox.get_active()]+"/"
link=pastebin.sendFileContent(self.output,title=None,website=link,version=None)
message = _("File sent on\n%s") %link
md = gtk.MessageDialog(None,
gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO,
gtk.BUTTONS_CLOSE, message)
md.set_title(_("Pastebin link"))
md.run()
md.destroy()
def yesNoDialog(title=" ",question="?"):
'''
returns True if yes
False if no
#inspired from http://www.daa.com.au/pipermail/pygtk/2002-June/002962.html
'''
#create window+ Vbox + question
window=gtk.Window()
window.set_position(gtk.WIN_POS_CENTER)
window.set_icon(pixbuf)
window.set_title(title)
vbox = gtk.VBox(True, 0)
window.add(vbox)
label = gtk.Label();
label.set_markup(question)
vbox.pack_start(label, False, False, 0)
hbox = gtk.HButtonBox()
vbox.pack_start(hbox, False, False, 0)
def delete_event(widget, event, window):
window.callback_return=-1
return False
window.connect("delete_event", delete_event, window)
def callback(widget, data):
window=data[0]
data=data[1]
window.hide()
window.callback_return=data
yes = gtk.Button(stock=gtk.STOCK_YES)
yes.set_flags(gtk.CAN_DEFAULT)
window.set_default(yes)
yes.connect("clicked", callback, (window, True))
hbox.pack_start(yes)
no = gtk.Button(stock=gtk.STOCK_NO)
no.connect("clicked", callback, (window, False))
hbox.pack_start(no)
window.set_modal(True)
window.show_all()
window.callback_return=None
while window.callback_return==None:
gtk.main_iteration(True) # block until event occurs
return window.callback_return
def askPassword(title=" ",question="?"):
""" Dialog box for a password.
Return the password
return false if the dialog is closed"""
#create window+ Vbox + question
window=gtk.Window()
window.set_position(gtk.WIN_POS_CENTER)
window.set_icon(pixbuf)
window.set_title(title)
vbox = gtk.VBox(True, 0)
window.add(vbox)
label = gtk.Label();
label.set_markup(question)
vbox.pack_start(label, False, False, 0)
def delete_event(widget, event, window):
window.callback_return=False
return False
window.connect("delete_event", delete_event, window)
def callback(widget,data):
window=data[0]
window.hide()
window.callback_return=pword.get_text()
# Message for the window
pword = gtk.Entry()
pword.set_visibility(False)
pword.set_activates_default(True)
vbox.pack_start(pword, False, False, 0)
hbox = gtk.HButtonBox()
vbox.pack_start(hbox, False, False, 0)
# OK button
but = gtk.Button(stock=gtk.STOCK_OK)
but.set_flags(gtk.CAN_DEFAULT)
window.set_default(but)
hbox.add(but)
but.connect("clicked", callback, (window,True))
window.set_modal(True)
window.show_all()
window.callback_return=None
while window.callback_return==None:
gtk.main_iteration(True) # block until event occurs
return window.callback_return
class FileDialog(object):
"""Handle a pair of file dialogs (open and save).
Useful to keep the selected filename sync'ed between both
dialogs. Eliminates redundant code too.
"""
def __init__(self):
self.filename = None
def get_filename(self, action='open'):
"""Run a dialog and return a filename or None.
Valid actions are 'open' and 'save'.
"""
# I used to create the dialogs only once (on object
# initialization), and hide and show them, but I can not
# manage to pre-select a filename after a dialog have been
# used once. I guess file chooser dialogs are just throwaway
# objects. Thus, recreate them every time.
if action == 'open':
chooser = gtk.FileChooserDialog(
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_title(_('Open file:'))
elif action == 'save':
chooser = gtk.FileChooserDialog(
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_title(_('Save as:'))
else:
raise Exception("action must be 'open' or 'save' (got '%s')"
% action)
if self.filename:
chooser.select_filename(self.filename)
response = chooser.run()
filename = chooser.get_filename()
chooser.destroy()
# By default, the GTK loop would wait until the process is
# idle to process events. Now, it is very probable that file
# I/O will be performed right after this method call and that
# would delay hiding the dialog until I/O are done. So,
# process pending events to hide the dialog right now.
while gtk.events_pending():
gtk.main_iteration(False)
if response == gtk.RESPONSE_OK:
self.filename = filename
return filename
else:
return None
def main(configfile,list):
Application(configfile,list)
gtk.main()
|
Did you have a Tamagotchi or play Neopets as a kid? Or perhaps you were one of the millions who were obsessed with Pokémon GO? Now there’s a new virtual pet game on the block, but there’s a lot more at stake, and the target audience is definitely not young children.
CryptoKitties is one of the world’s first blockchain games, and the biggest. Created by Canadian startup Axiom Zen, CryptoKitties operates on the Ethereum blockchain network and turns the act of buying and selling blockchain into a game. Players can dictate the price of their kitties when they list them on the marketplace. The first (and most expensive) cat sold on the game had a value equivalent to $117,712 USD, but as of today you can buy a kitty for 0.0019 Ethereum - currently equal to $1.09. The most expensive kitty on the marketplace is listed at 100,000 ETH (over $57 million).
The concept might sound like a novelty, but the tech industry clearly sees a bright future for blockchain gaming. After spinning off from Axiom Zen to become its own company, CryptoKitties raised $12 million in a March 2018 funding round.
|
"""
# Name: meas/settings.py
# Description:
# Created by: Phuc Le-Sanh
# Date Created: Oct 10 2016
# Last Modified: Nov 23 2016
# Modified by: Phuc Le-Sanh
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'g(yej*3clhw8mh1lge2jd*f7h0uam9exedd$ya50n-^n1#p2(9'
INTERNAL_IPS = ['127.0.0.1']
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'fontawesome',
'autofixture',
'ckeditor',
'rest_framework',
'rest_framework.authtoken',
'djoser',
# Web application
'webapp.apps.WebAppConfig',
# Models
'meas_models.apps.MeasModelsConfig',
# CMS
'cms.apps.CmsConfig',
# Common
'meas_common.apps.MeasCommonConfig',
# API
'api.apps.ApiConfig',
'apiv2.apps.Apiv2Config',
# Search
'search.apps.SearchConfig',
# Import Export
'import_export',
# Haystack
# 'drf-haystack',
'haystack',
# 'whoosh_index',
# Django-Extensions
'django_extensions',
'corsheaders',
'debug_toolbar',
)
MIDDLEWARE_CLASSES = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
def show_toolbar(request):
return True
SHOW_TOOLBAR_CALLBACK = show_toolbar
ROOT_URLCONF = 'meas.urls'
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
CORS_ALLOW_METHODS = (
'DELETE',
'GET',
'OPTIONS',
'PATCH',
'POST',
'PUT',
)
CORS_ALLOW_HEADERS = (
'accept',
'accept-encoding',
'authorization',
'content-type',
'dnt',
'origin',
'user-agent',
'x-csrftoken',
'x-requested-with',
'if-modified-since'
)
CSRF_TRUSTED_ORIGINS = (
'localhost:8080',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Whoosh
WHOOSH_INDEX = os.path.join(BASE_DIR, 'whoosh_index')
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': WHOOSH_INDEX,
},
}
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
)
WSGI_APPLICATION = 'meas.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'meas_development',
'USER': 'root',
'PASSWORD': '123456'
},
}
GRAPH_MODELS = {
'all_applications': True,
'group_models': True,
}
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Singapore'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
LOGOUT_REDIRECT_URL = '/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
CKEDITOR_CONFIGS = {
'default': {
'skin': 'moono',
# 'skin': 'office2013',
'toolbar_Basic': [
['Source', '-', 'Bold', 'Italic']
],
'toolbar_YourCustomToolbarConfig': [
{'name': 'document', 'items': [
'Source', '-', 'Save', 'NewPage', 'Preview', 'Print', '-',
'Templates']},
{'name': 'clipboard', 'items': [
'Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-',
'Undo', 'Redo']},
{'name': 'editing', 'items': [
'Find', 'Replace', '-', 'SelectAll']},
{'name': 'forms',
'items': ['Form', 'Checkbox', 'Radio', 'TextField', 'Textarea',
'Select', 'Button', 'ImageButton',
'HiddenField']},
'/',
{'name': 'basicstyles',
'items': ['Bold', 'Italic', 'Underline', 'Strike', 'Subscript',
'Superscript', '-', 'ReplacemoveFormat']},
{'name': 'paragraph',
'items': ['NumberedList', 'BulletedList', '-', 'Outdent',
'Indent', '-', 'Blockquote', 'CreateDiv', '-',
'JustifyLeft', 'JustifyCenter', 'JustifyRight',
'JustifyBlock', '-', 'BidiLtr', 'BidiRtl',
'Language']},
{'name': 'links', 'items': ['Link', 'Unlink', 'Anchor']},
{'name': 'insert',
'items': ['Image', 'Flash', 'Table', 'HorizontalRule', 'Smiley',
'SpecialChar', 'PageBreak', 'Iframe']},
'/',
{'name': 'styles', 'items': [
'Styles', 'Format', 'Font', 'FontSize']},
{'name': 'colors', 'items': ['TextColor', 'BGColor']},
{'name': 'tools', 'items': ['Maximize', 'ShowBlocks']},
{'name': 'about', 'items': ['About']},
'/',
{'name': 'yourcustomtools', 'items': [
'Preview',
'Mathjax',
'Maximize',
]},
],
'toolbar': 'YourCustomToolbarConfig',
'toolbarGroups': [{'name': 'document', 'groups': ['mode', 'document',
'doctools']}],
'height': 291,
'width': '100%',
'filebrowserWindowHeight': 725,
'filebrowserWindowWidth': 940,
'toolbarCanCollapse': True,
'mathJaxLib': '//cdn.mathjax.org/mathjax/latest/' +
'MathJax.js?config=TeX-MML-AM_CHTML',
'tabSpaces': 4,
'extraPlugins': ','.join(
[
'div',
'autolink',
'autoembed',
'embedsemantic',
'autogrow',
'devtools',
'widget',
'lineutils',
'clipboard',
'dialog',
'dialogui',
'elementspath',
'mathjax'
]),
}
}
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',),
'EXCEPTION_HANDLER': 'rest_framework.views.exception_handler',
}
|
I am a Global Business Developer and Recruiter for Forever Living, as well as an Independent Distributor for a wide range of products including health, wellness, beauty, fitness, cosmetics and animal health care. I am available for parties and events. I am currently expanding my successful business as a Full Time mum to 2 children and looking for enthusiastic and motivated people to join my growing team.
Be the first to review Aloe Beautiful - Health, Wealth & Beauty Solutions by Sarah.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.