text
stringlengths 29
850k
|
|---|
# Copyright (C) 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import itertools
from collections import namedtuple
from typing import(Any, Dict, Iterable, Optional)
from google.protobuf import descriptor_pb2
from gapic.schema import wrappers
# Injected dummy test types
@dataclasses.dataclass(frozen=True)
class DummyMethod:
input: bool = False
output: bool = False
lro: bool = False
void: bool = False
paged_result_field: bool = False
client_streaming: bool = False
server_streaming: bool = False
flattened_fields: Dict[str, Any] = dataclasses.field(default_factory=dict)
DummyIdent = namedtuple("DummyIdent", ["name"])
DummyMessage = namedtuple(
"DummyMessage", ["fields", "type", "options", "ident"])
DummyMessage.__new__.__defaults__ = (False,) * len(DummyMessage._fields)
DummyField = namedtuple("DummyField",
["message",
"enum",
"name",
"repeated",
"field_pb",
"meta",
"is_primitive",
"type"])
DummyField.__new__.__defaults__ = (False,) * len(DummyField._fields)
DummyService = namedtuple("DummyService", ["methods", "client_name"])
DummyApiSchema = namedtuple("DummyApiSchema",
["services", "naming", "messages"])
DummyApiSchema.__new__.__defaults__ = (False,) * len(DummyApiSchema._fields)
DummyNaming = namedtuple(
"DummyNaming", ["warehouse_package_name", "name", "version", "versioned_module_name", "module_namespace"])
DummyNaming.__new__.__defaults__ = (False,) * len(DummyNaming._fields)
def message_factory(exp: str,
repeated_iter=itertools.repeat(False),
enum: Optional[wrappers.EnumType] = None,
) -> DummyMessage:
# This mimics the structure of MessageType in the wrappers module:
# A MessageType has a map from field names to Fields,
# and a Field has an (optional) MessageType.
# The 'exp' parameter is a dotted attribute expression
# used to describe the field and type hierarchy,
# e.g. "mollusc.cephalopod.coleoid"
toks = exp.split(".")
messages = [DummyMessage({}, tok.upper() + "_TYPE") for tok in toks]
if enum:
messages[-1] = enum
for base, field, attr_name, repeated_field in zip(
messages, messages[1:], toks[1:], repeated_iter
):
base.fields[attr_name] = (DummyField(message=field, repeated=repeated_field)
if isinstance(field, DummyMessage)
else DummyField(enum=field))
return messages[0]
def enum_factory(name: str, variants: Iterable[str]) -> wrappers.EnumType:
enum_pb = descriptor_pb2.EnumDescriptorProto(
name=name,
value=tuple(
descriptor_pb2.EnumValueDescriptorProto(name=v, number=i)
for i, v in enumerate(variants)
)
)
enum = wrappers.EnumType(
enum_pb=enum_pb,
values=[wrappers.EnumValueType(enum_value_pb=v) for v in enum_pb.value]
)
return enum
|
We help you save money while shopping online at Ross-Simons.com. We offer the best coupons, deals, discount sales and promotional codes for thousands of online stores, including Ross-Simons and more. Ross-Simons coupon codes are a great way to save money and to get the discount price for the products you want to buy at Ross-Simons online store. After you are finished shopping, enter the Ross-Simons promotional code into the coupon or deal box during the checkout process at Ross-Simons website.
Search: Ross-Simons, Ross-Simons.com online shopping, discount, online store and leading outlet store chain, offering valuable products for women, men and gifts. www.Ross-Simons.com online store offers variety of high quality products and goods with Ross-Simons.com online coupon code.
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reusable utility functions.
"""
import collections
import multiprocessing
import os
import tensorflow as tf
from tensorflow.python.lib.io import file_io
_DEFAULT_CHECKPOINT_GSURL = 'gs://cloud-ml-data/img/flower_photos/inception_v3_2016_08_28.ckpt'
def is_in_IPython():
try:
import IPython # noqa
return True
except ImportError:
return False
def default_project():
from google.datalab import Context
return Context.default().project_id
def _get_latest_data_dir(input_dir):
latest_file = os.path.join(input_dir, 'latest')
if not file_io.file_exists(latest_file):
raise Exception(('Cannot find "latest" file in "%s". ' +
'Please use a preprocessing output dir.') % input_dir)
with file_io.FileIO(latest_file, 'r') as f:
dir_name = f.read().rstrip()
return os.path.join(input_dir, dir_name)
def get_train_eval_files(input_dir):
"""Get preprocessed training and eval files."""
data_dir = _get_latest_data_dir(input_dir)
train_pattern = os.path.join(data_dir, 'train*.tfrecord.gz')
eval_pattern = os.path.join(data_dir, 'eval*.tfrecord.gz')
train_files = file_io.get_matching_files(train_pattern)
eval_files = file_io.get_matching_files(eval_pattern)
return train_files, eval_files
def get_labels(input_dir):
"""Get a list of labels from preprocessed output dir."""
data_dir = _get_latest_data_dir(input_dir)
labels_file = os.path.join(data_dir, 'labels')
with file_io.FileIO(labels_file, 'r') as f:
labels = f.read().rstrip().split('\n')
return labels
def read_examples(input_files, batch_size, shuffle, num_epochs=None):
"""Creates readers and queues for reading example protos."""
files = []
for e in input_files:
for path in e.split(','):
files.extend(file_io.get_matching_files(path))
thread_count = multiprocessing.cpu_count()
# The minimum number of instances in a queue from which examples are drawn
# randomly. The larger this number, the more randomness at the expense of
# higher memory requirements.
min_after_dequeue = 1000
# When batching data, the queue's capacity will be larger than the batch_size
# by some factor. The recommended formula is (num_threads + a small safety
# margin). For now, we use a single thread for reading, so this can be small.
queue_size_multiplier = thread_count + 3
# Convert num_epochs == 0 -> num_epochs is None, if necessary
num_epochs = num_epochs or None
# Build a queue of the filenames to be read.
filename_queue = tf.train.string_input_producer(files, num_epochs, shuffle)
options = tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP)
example_id, encoded_example = tf.TFRecordReader(options=options).read_up_to(
filename_queue, batch_size)
if shuffle:
capacity = min_after_dequeue + queue_size_multiplier * batch_size
return tf.train.shuffle_batch(
[example_id, encoded_example],
batch_size,
capacity,
min_after_dequeue,
enqueue_many=True,
num_threads=thread_count)
else:
capacity = queue_size_multiplier * batch_size
return tf.train.batch(
[example_id, encoded_example],
batch_size,
capacity=capacity,
enqueue_many=True,
num_threads=thread_count)
def override_if_not_in_args(flag, argument, args):
"""Checks if flags is in args, and if not it adds the flag to args."""
if flag not in args:
args.extend([flag, argument])
def loss(loss_value):
"""Calculates aggregated mean loss."""
total_loss = tf.Variable(0.0, False)
loss_count = tf.Variable(0, False)
total_loss_update = tf.assign_add(total_loss, loss_value)
loss_count_update = tf.assign_add(loss_count, 1)
loss_op = total_loss / tf.cast(loss_count, tf.float32)
return [total_loss_update, loss_count_update], loss_op
def accuracy(logits, labels):
"""Calculates aggregated accuracy."""
is_correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.reduce_sum(tf.cast(is_correct, tf.int32))
incorrect = tf.reduce_sum(tf.cast(tf.logical_not(is_correct), tf.int32))
correct_count = tf.Variable(0, False)
incorrect_count = tf.Variable(0, False)
correct_count_update = tf.assign_add(correct_count, correct)
incorrect_count_update = tf.assign_add(incorrect_count, incorrect)
accuracy_op = tf.cast(correct_count, tf.float32) / tf.cast(
correct_count + incorrect_count, tf.float32)
return [correct_count_update, incorrect_count_update], accuracy_op
def check_dataset(dataset, mode):
"""Validate we have a good dataset."""
names = [x['name'] for x in dataset.schema]
types = [x['type'] for x in dataset.schema]
if mode == 'train':
if (set(['image_url', 'label']) != set(names) or any(t != 'STRING' for t in types)):
raise ValueError('Invalid dataset. Expect only "image_url,label" STRING columns.')
else:
if (set(['image_url']) != set(names) and set(['image_url', 'label']) != set(names)) or \
any(t != 'STRING' for t in types):
raise ValueError('Invalid dataset. Expect only "image_url" or "image_url,label" ' +
'STRING columns.')
def get_sources_from_dataset(p, dataset, mode):
"""get pcollection from dataset."""
import apache_beam as beam
import csv
from google.datalab.ml import CsvDataSet, BigQueryDataSet
check_dataset(dataset, mode)
if type(dataset) is CsvDataSet:
source_list = []
for ii, input_path in enumerate(dataset.files):
source_list.append(p | 'Read from Csv %d (%s)' % (ii, mode) >>
beam.io.ReadFromText(input_path, strip_trailing_newlines=True))
return (source_list |
'Flatten Sources (%s)' % mode >>
beam.Flatten() |
'Create Dict from Csv (%s)' % mode >>
beam.Map(lambda line: csv.DictReader([line], fieldnames=['image_url',
'label']).next()))
elif type(dataset) is BigQueryDataSet:
bq_source = (beam.io.BigQuerySource(table=dataset.table) if dataset.table is not None else
beam.io.BigQuerySource(query=dataset.query))
return p | 'Read source from BigQuery (%s)' % mode >> beam.io.Read(bq_source)
else:
raise ValueError('Invalid DataSet. Expect CsvDataSet or BigQueryDataSet')
def decode_and_resize(image_str_tensor):
"""Decodes jpeg string, resizes it and returns a uint8 tensor."""
# These constants are set by Inception v3's expectations.
height = 299
width = 299
channels = 3
image = tf.image.decode_jpeg(image_str_tensor, channels=channels)
# Note resize expects a batch_size, but tf_map supresses that index,
# thus we have to expand then squeeze. Resize returns float32 in the
# range [0, uint8_max]
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, squeeze_dims=[0])
image = tf.cast(image, dtype=tf.uint8)
return image
def resize_image(image_str_tensor):
"""Decodes jpeg string, resizes it and re-encode it to jpeg."""
image = decode_and_resize(image_str_tensor)
image = tf.image.encode_jpeg(image, quality=100)
return image
def load_images(image_files, resize=True):
"""Load images from files and optionally resize it."""
images = []
for image_file in image_files:
with file_io.FileIO(image_file, 'r') as ff:
images.append(ff.read())
if resize is False:
return images
# To resize, run a tf session so we can reuse 'decode_and_resize()'
# which is used in prediction graph. This makes sure we don't lose
# any quality in prediction, while decreasing the size of the images
# submitted to the model over network.
image_str_tensor = tf.placeholder(tf.string, shape=[None])
image = tf.map_fn(resize_image, image_str_tensor, back_prop=False)
feed_dict = collections.defaultdict(list)
feed_dict[image_str_tensor.name] = images
with tf.Session() as sess:
images_resized = sess.run(image, feed_dict=feed_dict)
return images_resized
def process_prediction_results(results, show_image):
"""Create DataFrames out of prediction results, and display images in IPython if requested."""
import pandas as pd
if (is_in_IPython() and show_image is True):
import IPython
for image_url, image, label_and_score in results:
IPython.display.display_html('<p style="font-size:28px">%s(%.5f)</p>' % label_and_score,
raw=True)
IPython.display.display(IPython.display.Image(data=image))
result_dict = [{'image_url': url, 'label': r[0], 'score': r[1]} for url, _, r in results]
return pd.DataFrame(result_dict)
def repackage_to_staging(output_path):
"""Repackage it from local installed location and copy it to GCS."""
import google.datalab.ml as ml
# Find the package root. __file__ is under [package_root]/mltoolbox/image/classification.
package_root = os.path.join(os.path.dirname(__file__), '../../../')
# We deploy setup.py in the same dir for repackaging purpose.
setup_py = os.path.join(os.path.dirname(__file__), 'setup.py')
staging_package_url = os.path.join(output_path, 'staging', 'image_classification.tar.gz')
ml.package_and_copy(package_root, setup_py, staging_package_url)
return staging_package_url
|
However, business owners are not as helpless as they may feel in controlling the cost of workers compensation insurance. Our knowledgeable agents at Sesco Group Inc. in Bristol, Virginia are available to assist with loss control in several different ways. The following strategies can effectively help you keep your workers compensation costs to a minimum.
Agent commissions on workers compensation policies can range from 5% to 15% of the premiums. Commensurate with the amount of commission, insurance agents can provide a variety of services in addition to placing the policy, including safety and loss control services, claims reviews, Experience Mod Audits, and statistical analysis of claims. When you work with an experienced agent at Sesco Group Inc. in Bristol, Virginia, you will never have to pay a third party to provide what is covered by your premium commission.
August 7th, 2018 by Sesco Group Inc.
|
# InsertHandlers.py
# Francois Plamondon
# Summer 2003
import Tkinter
import tkFileDialog
import tkMessageBox
import Graphics
import AbstractHandler as EventHandler
#the insert() method is implemented by the RectangleInsertHandler and OvalInsertHandler classes.
class GeneralBoxInsertHandler(EventHandler.EventHandler):
def __init__(self, editor, eventHandler):
raise NotImplementedError, "GeneralBoxInsertHandler is an abstract class"
def start(self):
"""start the handler"""
self.current = None
def stop(self):
"""stop the handler"""
return self.current
#canvas events
def onCanvasButton(self, event):
"""on button 1: insert a new box object (rectangle or oval)
on button 3: cancel current insertion"""
if event.num == 1:
self.zoom = self.editor.getZoom()
x = event.x/self.zoom
y = event.y/self.zoom
self.xy = []
self.xy.append(x)
self.xy.append(y)
self.xy.append(x+1)
self.xy.append(y+1)
self.current = self.insert()
elif event.num == 3:
if self.current != None:
self.editor.delete([self.current])
self.current = None
def onCanvasButtonMotion(self, event):
"""set xy[2], xy[3] to the new position of the mouse"""
if self.current != None:
newXY2 =event.x/self.zoom
newXY3 = event.y/self.zoom
if abs(newXY2 - self.xy[0]) >= 1 and abs(newXY3 - self.xy[1]) >= 1: #avoid zero width or height
self.xy[2] = newXY2
self.xy[3] = newXY3
self.current.setCoords(self.xy)
def onCanvasShiftButtonMotion(self, event):
"""set xy[2], xy[3] to make a square box"""
if self.current != None:
x = event.x/self.zoom
y = event.y/self.zoom
side = max(abs(x - self.xy[0]), abs(y - self.xy[1]))
if x > self.xy[0]:
self.xy[2] = self.xy[0] + side
else:
self.xy[2] = self.xy[0] - side
if y > self.xy[1]:
self.xy[3] = self.xy[1] + side
else:
self.xy[3] = self.xy[1] - side
self.current.setCoords(self.xy)
def onCanvasButtonRelease(self, event):
"""stop on button 1 release if insertion was not canceled"""
if event.num == 1 and self.current != None:
current = self.stop()
self.eventHandler.onInsertHandlerStopped(current)
# Rectangle Insertion handler
class RectangleInsertHandler(GeneralBoxInsertHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
def insert(self):
"""insert a rectangle"""
return self.editor.createRectangle(self.xy)
# Oval Insertion handler
class OvalInsertHandler(GeneralBoxInsertHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
def insert(self):
"""insert an oval"""
return self.editor.createOval(self.xy)
# Line Insertion handler
class LineInsertHandler(EventHandler.EventHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
def start(self):
"""start the handler"""
self.current = None
def stop(self):
"""stop the handler"""
return self.current
#canvas events
def onCanvasButton(self, event):
"""on button 1: insert new line
on button 3: cancel current insertion"""
if event.num == 1:
self.zoom = self.editor.getZoom()
x = event.x/self.zoom
y = event.y/self.zoom
self.xy = []
self.xy.append(x)
self.xy.append(y)
self.xy.append(x)
self.xy.append(y)
self.current = self.editor.createLine(self.xy)
elif event.num == 3:
if self.current != None:
self.editor.delete([self.current])
self.current = None
def onCanvasButtonMotion(self, event):
"""set xy[2], xy[3] to the new position of the cursor"""
if self.current != None:
self.xy[2] = event.x/self.zoom
self.xy[3] = event.y/self.zoom
self.current.setCoords(self.xy)
def onCanvasShiftButtonMotion(self, event):
"""set xy[2], xy[3] to make a perfectly horizontal or vertical line, depending which one is closer"""
if self.current != None:
x = event.x/self.zoom
y = event.y/self.zoom
if abs(x - self.xy[0]) > abs(y - self.xy[1]):
self.xy[2] = x
self.xy[3] = self.xy[1]
else:
self.xy[2] = self.xy[0]
self.xy[3] = y
self.current.setCoords(self.xy)
def onCanvasButtonRelease(self, event):
"""stop on button 1 release if insertion was not canceled."""
if event.num == 1 and self.current != None:
current = self.stop()
self.eventHandler.onInsertHandlerStopped(current)
# Base class for Polyline and Polygon Insertion
class PolyInsertHandler(EventHandler.EventHandler):
def __init__(self, editor, eventHandler):
raise NotImplementedError, "PolyInsertHandler is an abstract class"
def start(self, smooth=0):
"""start the handler"""
self.current = None
self.smooth = smooth # smooth option
self.inserting = 0
def stop(self):
"""stop the handler. if there are less than 2 points, cancel insertion"""
if self.current != None:
if len(self.xy) < self.minimumCoords:
self.editor.delete([self.current])
self.current = None
return self.current
def create(self):
pass
#canvas events
def onCanvasButton(self, event):
if event.num == 1:
self.inserting = 1
self.zoom = self.editor.getZoom()
x = event.x/self.zoom
y = event.y/self.zoom
if self.current == None:
self.xy = []
self.xy.append(x)
self.xy.append(y)
self.xy.append(x)
self.xy.append(y)
self.current = self.create()
else:
self.xy.append(x)
self.xy.append(y)
self.current.setCoords(self.xy)
elif event.num == 3:
if self.inserting: #if button 1 also being pressed, cancel insertion
self.editor.delete([self.current])
self.current = None
self.inserting = 0
else:
self.stop()
self.eventHandler.onInsertHandlerStopped(self.current)
def onCanvasDoubleButton(self, event):
self.onCanvasButton(event)
def onCanvasButtonMotion(self, event):
if self.current != None and self.inserting:
x = event.x/self.zoom
y = event.y/self.zoom
self.xy[len(self.xy) - 2] = x
self.xy[len(self.xy) - 1] = y
self.current.setCoords(self.xy)
def onCanvasShiftButtonMotion(self, event):
if self.current != None and self.inserting:
x = event.x/self.zoom
y = event.y/self.zoom
if abs(x - self.xy[len(self.xy) - 4]) > abs(y - self.xy[len(self.xy) - 3]):
self.xy[len(self.xy) - 2] = x
self.xy[len(self.xy) - 1] = self.xy[len(self.xy) - 3]
else:
self.xy[len(self.xy) - 2] = self.xy[len(self.xy) - 4]
self.xy[len(self.xy) - 1] = y
self.current.setCoords(self.xy)
def onCanvasButtonRelease(self, event):
if event.num == 1:
self.inserting = 0
#fill color event
def onFillColor(self, color):
if self.current != None:
self.current.setFillColor(color)
#line width event
def onLineWidth(self, lineWidth):
if self.current != None:
self.current.setWidth(lineWidth)
class PolylineInsertHandler(PolyInsertHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
self.minimumCoords = 4 # minimum number of coordinates to make a polyline
def create(self):
return self.editor.createLine(self.xy, smooth=self.smooth)
class PolygonInsertHandler(PolyInsertHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
self.minimumCoords = 6 #minimum number of coordinates to make a polygon
def create(self):
return self.editor.createPolygon(self.xy, smooth=self.smooth)
def onOutlineColor(self, color):
if self.current != None:
self.current.setOutlineColor(color)
def onOutlineFillOption(self, option):
if self.current != None:
self.current.setOutlineOption(option[0])
self.current.setFillOption(option[1])
# Connector Insertion handler
class ConnectorInsertHandler(EventHandler.EventHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
#starts the handler.
def start(self):
self.current = None
# stops the handler.
def stop(self):
return self.current
#canvas events
def onCanvasButton(self, event):
if event.num == 1:
self.zoom = self.editor.getZoom()
x = event.x/self.zoom
y = event.y/self.zoom
self.current = self.editor.createConnector([x,y])
self.eventHandler.onInsertHandlerStopped(self.current)
#Image Insertion Handler
class ImageInsertHandler(EventHandler.EventHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
#starts the handler.
def start(self):
self.current = None
# stops the handler.
def stop(self):
return self.current
#canvas events
def onCanvasButton(self, event):
if event.num == 1:
self.zoom = self.editor.getZoom()
x = event.x/self.zoom
y = event.y/self.zoom
filename = tkFileDialog.askopenfilename(title="Open Image File",
filetypes=[("GIF files", "*.gif"),("All files", "*")] )
self.editor.root.focus_force()
if( filename != "" and filename[-4:].upper() == '.GIF' ):
if( 1):#try:
self.current = self.editor.createImage([x,y], filename)
else:#except:
tkMessageBox.showerror("Open Image File","Cannot open file:\nFormat not recognized")
self.eventHandler.onInsertHandlerStopped(None)
return
self.eventHandler.onInsertHandlerStopped(self.current)
return
else:
self.eventHandler.onInsertHandlerStopped(None)
return
# Text Insertion Handler
# The encapsulation of TextGF is broken here. Tkinter provides text editing
# capabilities that are used directly on the text item of the TextGF object.
class TextInsertHandler(EventHandler.EventHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
#starts the handler.
def start(self):
self.current = None
# stops the handler.
def stop(self):
return self.current
#canvas events
def onCanvasButton(self, event):
if event.num == 1:
self.zoom = self.editor.getZoom()
x = event.x/self.zoom
y = event.y/self.zoom
self.current = self.editor.createText([x,y], "")
self.eventHandler.onInsertHandlerStopped(self.current)
# Named Connector Insertion handler
class NamedConnectorInsertHandler(EventHandler.EventHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
#starts the handler.
def start(self):
self.current = None
# stops the handler.
def stop(self):
return self.current
#canvas events
def onCanvasButton(self, event):
if event.num == 1:
self.zoom = self.editor.getZoom()
x = event.x/self.zoom
y = event.y/self.zoom
self.current = self.editor.createNamedConnector([x,y])
self.eventHandler.onInsertHandlerStopped(self.current)
# Attribute Insertion handler
class AttributeInsertHandler(EventHandler.EventHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
#starts the handler.
def start(self):
self.current = None
# stops the handler.
def stop(self):
return self.current
#canvas events
def onCanvasButton(self, event):
if event.num == 1:
self.zoom = self.editor.getZoom()
x = event.x/self.zoom
y = event.y/self.zoom
self.current = self.editor.createAttribute([x,y], "attribute")
self.eventHandler.onInsertHandlerStopped(self.current)
|
You acknowledge that the content included on this site, including but not limited to text, graphics, logos, button icons, images, audio clips, software, and the selection and arrangements thereof, is and shall remain the sole and exclusive property of CWI or its content suppliers and protected by Canadian and international copyright laws. Absent the consent of CWI, the content on this site may be used for personal, non-commercial use only. Any other use, including the reproduction, modification, distribution, republication or display, of the content on this site is strictly prohibited and an infringement of copyright or proprietary rights in the information.
The CWI name, trade names, trademarks and logo, and all related product and service names, design marks and slogans are the trademarks, service marks or registered trademarks of CWI or its affiliates and may not be used in any commercial manner without the prior written consent of CWI. All other products and service marks contained on the site are the trade-marks of their respective owners. Reference to any products, services, processes or other information by trade name, trademark, manufacturer, supplier, or otherwise does not necessarily constitute or imply the endorsement, sponsorship or recommendation by CWI.
THE INFORMATION, CONTENT, PRODUCTS, SERVICES, AND MATERIALS CONTAINED IN OR PROVIDED OR AVAILABLE THROUGH THIS SITE ARE PROVIDED ON AN “AS IS” BASIS WITH NO WARRANTY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, CWI DISCLAIMS ALL REPRESENTATIONS AND WARRANTIES, EXPRESS OR IMPLIED, WITH RESPECT TO SUCH INFORMATION, CONTENT, SERVICES, PRODUCTS, AND MATERIALS, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE, NONINFRINGEMENT, FREEDOM FROM ERROR, INTERRUPTION, COMPUTER VIRUS, OR OTHER HARMFUL COMPONENTS AND IMPLIED WARRANTIES ARISING FROM COURSE OF DEALING OR COURSE OF PERFORMANCE. IN ADDITION, CWI DOES NOT REPRESENT OR WARRANT THAT THE INFORMATION ACCESSIBLE VIA THIS SITE IS ACCURATE, COMPLETE OR CURRENT. CWI HAS THE RIGHT TO MAKE CHANGES AND UPDATES TO ANY INFORMATION AVAILABLE THROUGH THE SITE WITHOUT PRIOR NOTICE.
This site may provide links or references to other web sites. However, CWI has no control over or responsibility for content on third party sites and transactions that occur therein. CWI has provided links to other sites merely as a convenience to users, and shall not be liable for any damages or injury arising from content on such third party web sites or transactions occurring therein. The terms and conditions and privacy policies governing this site may differ significantly from the policies of third party web sites. There are risks in using any information, software, or products found on the Internet or in otherwise entering transactions through the Internet. Accordingly, CWI cautions you to make certain you understand these risks and any applicable policies of third party web sites before retrieving, using, relying upon, or purchasing anything, or before otherwise transacting, via the Internet.
IN NO EVENT SHALL CWI OR ITS AFFILIATES BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, PUNITIVE, INCIDENTAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER, EVEN IF CWI OR ITS AFFILIATES HAVE BEEN PREVIOUSLY ADVISED OF THE POSSIBILITY OF SUCH DAMAGES, WHETHER IN AN ACTION UNDER CONTRACT, NEGLIGENCE, TORT OR ANY OTHER THEORY, ARISING OUT OF OR IN CONNECTION WITH THE USE, INABILITY TO USE, OR PERFORMANCE OF THE INFORMATION, SERVICES, CONTENT, PRODUCTS, AND MATERIALS AVAILABLE FROM THIS SITE OR THE INTERNET GENERALLY.
Because some jurisdictions do not allow the limitation or exclusion of liability for incidental or consequential damages, some of the above limitations may not apply to you. In such jurisdictions, CWI’s liability is limited to the greatest extent permitted by law.
You agree to indemnify and hold CWI’s, its parents, subsidiaries, affiliates, officers and employees, harmless from any claim or demand, including reasonable attorneys’ fees, made by any third party due to or arising out of your use of the site, your violation of these terms and conditions or your infringement of any intellectual property or other right of any person or entity.
These terms and conditions, and the respective rights and obligations of the parties hereunder, shall be governed by, and construed in accordance with, the laws of the Province of Manitoba. Any dispute arising between you and CWI will be submitted to arbitration in the Province of Manitoba in accordance with the rules of the standing judiciary committee then in effect. Nothing shall deprive you of the benefits of your province’s consumer protection laws.
|
"""
This file contains the unit tests for the :mod:`communication` app.
Since this app has no models there is model and view tests:
* :class:`~communication.tests.CommunicationModelTests`
* :class:`~communication.tests.CommunicationViewTests`
"""
from lab_website.tests import BasicTests
from communication.models import LabAddress,LabLocation,Post
from personnel.models import Address, Person
from papers.models import Publication
from projects.models import Project
class CommunicationModelTests(BasicTests):
'''This class tests the views associated with models in the :mod:`communication` app.'''
fixtures = ['test_address',]
def test_create_new_lab_address(self):
'''This test creates a :class:`~communication.models.LabAddress` with the required information.'''
test_address = LabAddress(type='Primary', address=Address.objects.get(pk=1)) #repeat for all required fields
test_address.save()
self.assertEqual(test_address.pk, 1) #presumes no models loaded in fixture data
def test_lab_address_unicode(self):
'''This tests the unicode representation of a :class:`~communication.models.LabAddress`.'''
test_address = LabAddress(type='Primary', address=Address.objects.get(pk=1)) #repeat for all required fields
test_address.save()
self.assertEqual(test_address.pk, 1) #presumes no models loaded in fixture data
self.assertEqual(test_address.__unicode__(), Address.objects.get(pk=1).__unicode__())
def test_create_new_lab_location(self):
'''This test creates a :class:`~communication.models.LabLocation` with the required information only.'''
test_location = LabLocation(name = 'Memphis',
type='City',
priority=1) #repeat for all required fields
test_location.save()
self.assertEqual(test_location.pk, 1) #presumes no models loaded in fixture data
def test_create_new_lab_location_all(self):
'''This test creates a :class:`~communication.models.LabLocation` with all fields included.'''
test_location = LabLocation(name = 'Memphis',
type='City',
priority=1,
address=Address.objects.get(pk=1),
url = 'www.cityofmemphis.org',
description = 'some description about the place',
lattitude = 35.149534,
longitude = -90.04898,) #repeat for all required fields
test_location.save()
self.assertEqual(test_location.pk, 1) #presumes no models loaded in fixture data
def test_lab_location_unicode(self):
'''This test creates a :class:`~communication.models.LabLocation` with the required information only.'''
test_location = LabLocation(name = 'Memphis',
type='City',
priority=1) #repeat for all required fields
test_location.save()
self.assertEqual(test_location.pk, 1)
self.assertEqual(test_location.__unicode__(), 'Memphis')
class CommunicationViewTests(BasicTests):
'''This class tests the views associated with the :mod:`communication` app.'''
def test_feed_details_view(self):
"""This tests the feed-details view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/feeds')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'feed_details.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('google_calendar_id' in test_response.context)
def test_lab_rules_view(self):
'''This tests the lab-rules view.
The tests ensure that the correct template is used.
It also tests whether the correct context is passed (if included).
his view uses a user with superuser permissions so does not test the permission levels for this view.'''
test_response = self.client.get('/lab-rules')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'lab_rules.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('lab_rules' in test_response.context)
self.assertTrue('lab_rules_source' in test_response.context)
def test_lab_rules_view(self):
'''This tests the data-resource-sharing view.
The tests ensure that the correct template is used.
It also tests whether the correct context is passed (if included).
his view uses a user with superuser permissions so does not test the permission levels for this view.'''
test_response = self.client.get('/data-resource-sharing')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'data_sharing_policy.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('data_sharing_policy' in test_response.context)
self.assertTrue('data_sharing_policy_source' in test_response.context)
def test_twitter_view(self):
'''This tests the twitter view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/twitter')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'twitter_timeline.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('timeline' in test_response.context)
def test_calendar_view(self):
'''This tests the google-calendar view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/calendar')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'calendar.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('google_calendar_id' in test_response.context)
#
# def test_wikipedia_view(self):
# '''This tests the google-calendar view.
#
# Currently it just ensures that the template is loading correctly.
# '''
# test_response = self.client.get('/wikipedia')
# self.assertEqual(test_response.status_code, 200)
# self.assertTemplateUsed(test_response, 'wikipedia_edits.html')
# self.assertTemplateUsed(test_response, 'base.html')
# self.assertTemplateUsed(test_response, 'jquery_script.html')
# self.assertTrue('pages' in test_response.context)
def test_news_view(self):
'''This tests the lab-news view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/news')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'lab_news.html')
self.assertTemplateUsed(test_response, 'base.html')
#self.assertTrue('statuses' in test_response.context)
self.assertTrue('links' in test_response.context)
#self.assertTrue('milestones' in test_response.context)
def test_contact_page(self):
'''This tests the contact-page view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/contact/')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'contact.html')
self.assertTemplateUsed(test_response, 'base.html')
def test_location_page(self):
'''This tests the location view.
Currently it ensures that the template is loading, and that that the location_list context is passed.
'''
test_response = self.client.get('/location')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'location.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('lablocation_list' in test_response.context)
class PostModelTests(BasicTests):
'''This class tests various aspects of the :class:`~papers.models.Post` model.'''
fixtures = ['test_publication','test_publication_personnel', 'test_project', 'test_personnel']
def test_create_new_post_minimum(self):
'''This test creates a :class:`~papers.models.Post` with the required information only.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md')
test_post.save()
self.assertEqual(test_post.pk, 1)
def test_create_new_post_all(self):
'''This test creates a :class:`~papers.models.Post` with all fields entered.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md',
paper = Publication.objects.get(pk=1),
project = Project.objects.get(pk=1))
test_post.save()
self.assertEqual(test_post.pk, 1)
def test_post_unicode(self):
'''This test creates a :class:`~papers.models.Post` and then verifies the unicode representation is correct.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md')
test_post.save()
self.assertEqual(test_post.__unicode__(), "Test Post")
def test_post_slugify(self):
'''This test creates a :class:`~papers.models.Post` and then verifies the unicode representation is correct.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md')
test_post.save()
self.assertEqual(test_post.post_slug, "test-post")
class PostViewTests(BasicTests):
'''These test the views associated with post objects.'''
fixtures = ['test_post','test_publication','test_publication_personnel', 'test_project', 'test_personnel']
def test_post_details_view(self):
"""This tests the post-details view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/fixture-post')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_detail.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'disqus_snippet.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
self.assertTrue('post' in test_response.context)
test_response = self.client.get('/posts/not-a-fixture-post')
self.assertEqual(test_response.status_code, 404)
def test_post_list(self):
"""This tests the post-list view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_list.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
self.assertTrue('post_list' in test_response.context)
def test_post_new(self):
"""This tests the post-new view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/new')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_form.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
def test_post_edit(self):
"""This tests the post-edit view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/fixture-post/edit')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_form.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
test_response = self.client.get('/posts/not-a-fixture-post/edit')
self.assertEqual(test_response.status_code, 404)
def test_post_delete(self):
"""This tests the post-edit view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/fixture-post/delete')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'confirm_delete.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
test_response = self.client.get('/posts/not-a-fixture-post/delete')
self.assertEqual(test_response.status_code, 404)
|
Labour’s grassroots campaign group Momentum has been fined over £16,000 for multiple breaches of electoral law during the 2017 general election.
Chris Williamson has been suspended from Labour after accusing the party of being “too apologetic” over allegations of anti-semitism.
The event, taking place in Liverpool, is set to run alongside the Labour Party conference.
The Scottish Labour Leader spoke to Julia Hartley-Brewer about the reported Unite-Momentum pact.
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class SaleOder(orm.Model):
""" Sale order note
"""
_inherit = 'sale.order'
def onchange_note(self, cr, uid, ids, item_id, field, context=None):
''' On change pre
'''
res = {'value': {}}
if item_id:
res['value'][field] = self.pool.get('res.note.template').browse(
cr, uid, item_id, context=context)['text']
return res
_columns = {
'text_note_pre_id': fields.many2one('res.note.template',
'Set pre'),
'text_note_post_id': fields.many2one('res.note.template',
'Set post'),
'text_note_pre': fields.text('Pre text'),
'text_note_post': fields.text('Post text'),
'text_delivery_note': fields.text('Delivery note'),
}
class SaleOderLine(orm.Model):
""" Sale order line note
"""
_inherit = 'sale.order.line'
def onchange_note(self, cr, uid, ids, item_id, field, context=None):
''' On change pre
'''
res = {'value': {}}
if item_id:
res['value'][field] = self.pool.get('res.note.template').browse(
cr, uid, item_id, context=context)['text']
return res
_columns = {
'text_note_pre_id': fields.many2one('res.note.template',
'Set pre'),
'text_note_post_id': fields.many2one('res.note.template',
'Set post'),
'text_note_pre': fields.text('Pre text'),
'text_note_post': fields.text('Post text'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Luxury boxed matches available in 3 stunning designs from Chase and Wonder. Each box has been letter pressed using a vintage windmill press. These exquisite designs are edged in gold foil and the box containing 125 longer length safety matches. A great gift, stocking filler or a more attractive option of keeping your matches by the fireplace. Each box and designed in produced in the UK.
3 designs to choose from - please select in drop down menu.
|
"""
Visibility Transformer implementation.
"""
from openedx.core.lib.block_structure.transformer import BlockStructureTransformer, FilteringTransformerMixin
from .utils import collect_merged_boolean_field
class VisibilityTransformer(FilteringTransformerMixin, BlockStructureTransformer):
"""
A transformer that enforces the visible_to_staff_only field on
blocks by removing blocks from the block structure for which the
user does not have access. The visible_to_staff_only field on a
block is percolated down to its descendants, so that all blocks
enforce the visibility settings from their ancestors.
For a block with multiple parents, access is denied only if
visibility is denied for all its parents.
Staff users are exempted from visibility rules.
"""
VERSION = 1
MERGED_VISIBLE_TO_STAFF_ONLY = 'merged_visible_to_staff_only'
@classmethod
def name(cls):
"""
Unique identifier for the transformer's class;
same identifier used in setup.py.
"""
return "visibility"
@classmethod
def _get_visible_to_staff_only(cls, block_structure, block_key):
"""
Returns whether the block with the given block_key in the
given block_structure should be visible to staff only per
computed value from ancestry chain.
"""
return block_structure.get_transformer_block_field(
block_key, cls, cls.MERGED_VISIBLE_TO_STAFF_ONLY, False
)
@classmethod
def collect(cls, block_structure):
"""
Collects any information that's necessary to execute this
transformer's transform method.
"""
collect_merged_boolean_field(
block_structure,
transformer=cls,
xblock_field_name='visible_to_staff_only',
merged_field_name=cls.MERGED_VISIBLE_TO_STAFF_ONLY,
)
def transform_block_filters(self, usage_info, block_structure):
# Users with staff access bypass the Visibility check.
if usage_info.has_staff_access:
return [block_structure.create_universal_filter()]
return [
block_structure.create_removal_filter(
lambda block_key: self._get_visible_to_staff_only(block_structure, block_key),
)
]
|
Talented workers often leave jobs at high-profile tech companies to set off on their own projects. From Instagram to Nest and beyond, many successful companies trace their origins to employees who once worked at Google, Apple and so on.
Nikhil Bhogal, the co-founder and chief technology officer of June, is one such entrepreneur. Bhogal worked on software for Apple’s iPhone and iPad before departing for Path. Shortly thereafter, he went on to create a new product of his own: A smart oven that June claims can help foodies cook the perfect steak or salmon.
|
#!/usr/bin/env python3
import sys
import os.path
from math import atan, pi
import argparse
try:
import numpy as np
except:
sys.exit("Please install numpy")
try:
import cv2
except:
sys.exit("Please install OpenCV")
# Parser
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--imagefolder", type=str,
dest="imagefolder", help="Path of images", required=True)
parser.add_argument("-s", "--facescale", type=str, dest="facescale",
help="scale of the face (default is 1/3)")
parser.add_argument("-f", "--fps", type=str, dest="fps",
help="fps of the resulting file (default is 24)")
parser.add_argument("-n", "--nameoftargetfile", type=str, dest="outputfile",
help="name of the output file")
parser.add_argument("-w", "--write", action="store_true", dest="write",
default=False, help="to write every single image to file")
parser.add_argument("-r", "--reverse", action="store_true", dest="reverse",
default=False, help="iterate the files reversed")
parser.add_argument("-q", "--quiet", action="store_false", dest="quiet",
default=True, help="the output should be hidden")
parser.add_argument("-m", "--multiplerender", action="store_true",
dest="multiplerender", default=False,
help="render the images multiple times")
# parsing the input
args = parser.parse_args()
imagefolder = args.imagefolder + "/"
if imagefolder is None:
sys.exit("No images given")
facescale = args.facescale
if facescale is None:
facescale = float(1.0 / 3)
else:
facescale = float(facescale)
if args.fps is None:
fps = 24
else:
fps = float(args.fps)
outputfile = args.outputfile
if outputfile is None:
outputfile = "animation"
write = bool(args.write)
reverse = bool(args.reverse)
quiet = bool(args.quiet)
multiplerender = bool(args.multiplerender)
# OpenCV files
if (os.path.isfile("haarcascade_frontalface_default.xml")):
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
else:
sys.exit("haarcascade_frontalface_default.xml not found")
if (os.path.isfile("haarcascade_eye.xml")):
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
else:
sys.exit("haarcascade_eye.xml not found")
def dectectFace(gray):
"""detecting faces"""
if multiplerender:
for i in np.arange(1.05, 1.65, 0.02)[::-1]:
faces = face_cascade.detectMultiScale(
gray, scaleFactor=i, minNeighbors=5, minSize=(60, 60))
if len(faces) == 1:
return faces
elif len(faces) > 1:
return None
# print(str(i) + "- useless calc:" + str(faces))
# print("no face found")
return None
else:
return face_cascade.detectMultiScale(
gray, scaleFactor=1.3, minNeighbors=5, minSize=(60, 60))
def detectEye(roi_gray):
"""detecting eyes"""
if multiplerender:
for i in np.arange(1.01, 1.10, 0.01)[::-1]:
eyes = eye_cascade.detectMultiScale(
roi_gray, scaleFactor=i, minNeighbors=5, minSize=(25, 25))
if len(eyes) == 2:
return eyes
elif len(eyes) > 2:
return None
# print(str(i) + "- useless calc:" + str(eyes))
# print("no eyes found")
return None
else:
return eye_cascade.detectMultiScale(
roi_gray, scaleFactor=1.05, minNeighbors=5, minSize=(25, 25))
def drawFaces(faces, img):
"""drawing faces (for debug)"""
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 1)
def drawEyes(eyes, img):
"""drawing eyes (for debug)"""
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(img, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 1)
def detect(img, gray):
"""getting the image and returns the face and eyes"""
faces = dectectFace(gray)
# for making sure only having one face
if faces is None or len(faces) != 1:
return None, None
# drawFaces(faces, img)
for (x, y, w, h) in faces:
roi_gray = gray[y:y + h, x:x + w]
# roi_color = img[y:y + h, x:x + w]
eyes = detectEye(roi_gray)
# making sure only having two eyes
if eyes is None or len(eyes) != 2:
return None, None
# drawEyes(eyes, roi_color)
return faces, eyes
def matrixPicture(face, eyes, height, width):
"""calculation of rotation and movement of the image"""
center = tuple((face[0] + (face[2] / 2), face[1] + (face[3] / 2)))
moveMatrix = np.float32([[1, 0, (width / 2) - center[0]],
[0, 1, (height / 2) - center[1]]])
scale = float(min(height, width)) / float(face[2]) * facescale
eye1 = tuple((eyes[0][0] + (eyes[0][2] / 2),
eyes[0][1] + (eyes[0][3] / 2)))
eye2 = tuple((eyes[1][0] + (eyes[1][2] / 2),
eyes[1][1] + (eyes[1][3] / 2)))
x = (float(eye2[0]) - float(eye1[0]))
y = (float(eye2[1]) - float(eye1[1]))
if x == 0:
angle = 0
else:
angle = atan(y / x) * 180 / pi
rotMatrix = cv2.getRotationMatrix2D(center, angle, scale)
return moveMatrix, rotMatrix
def calculatePicture(file):
"""gettings infos of the image and applie the matrixes"""
img = cv2.imread(file)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces, eyes = detect(img, gray)
# print("faces: " + str(faces) + " # eyes:" + str(eyes))
height, width, channels = img.shape
if faces is None or eyes is None:
return None
face = faces[0]
eye = [eyes[0], eyes[1]]
moveMatrix, rotMatrix = matrixPicture(face, eye, height, width)
dst = cv2.warpAffine(img, moveMatrix, (width, height))
dst = cv2.warpAffine(dst, rotMatrix, (width, height))
return dst
def checkInput():
""" check input and return files """
files = []
if imagefolder:
for file in os.listdir(imagefolder):
if os.path.isfile(os.path.join(imagefolder, file)) and not file.startswith("."):
files.append(imagefolder + file)
if len(files) == 0:
sys.exit("No files found")
if reverse:
files.sort(reverse=True)
else:
files.sort()
return files
def toMovie():
""" iterating the files and save them to movie-file """
files = checkInput()
codecs = cv2.VideoWriter_fourcc(*'FMP4')
height, width, channel = cv2.imread(files[0]).shape
video = cv2.VideoWriter(outputfile + ".mkv", codecs,
fps, (width, height), True)
if not video.isOpened():
sys.exit("Error when writing video file")
images = 0
found = 0
for file in files:
dst = calculatePicture(file)
images = images + 1
if quiet:
sys.stdout.flush()
sys.stdout.write("\rimages: " + str(images) + "/" +
str(len(files)) + " and " + str(found) +
" added to movie")
if dst is not None and video.isOpened():
found = found + 1
video.write(dst)
video.release()
if quiet:
print()
print("saved to " + outputfile + ".mkv")
def toFile():
""" iterating files and save them seperately """
destdir = os.path.join(os.path.abspath(".") + r"/tmp/")
import subprocess
files = checkInput()
if not os.path.exists(destdir):
os.makedirs(destdir)
for file in files:
dst = calculatePicture(file)
if dst is not None:
"""
try:
cv2.imshow('face2gif', dst)
cv2.waitKey(0)
except (KeyboardInterrupt):
cv2.destroyAllWindows()
cv2.destroyAllWindows()
"""
cv2.imwrite(destdir + os.path.basename(file), dst)
if quiet:
print("all files are safed in: " + str(destdir))
print("now generating gif ...")
print(subprocess.call(["convert", "-delay", fps,
"-loop", "0", "tmp/*.jpeg",
outputfile + ".gif"]))
else:
subprocess.call(["convert", "-delay", fps,
"-loop", "0", "tmp/*.jpeg", outputfile + ".gif"])
if __name__ == '__main__':
if write:
toFile()
else:
toMovie()
|
REVISED: 01/22/2019 – The project period was reduced to one year, instead of two.
IMPORTANT! If you have questions about the RFA, please submit them to the DRDC here. We will consult with the CDC and provide answers directly to applicants, usually within 2 days, and post all questions and answers within 24 hours of the resolution on the DRDC website here. Please remember to check this page often.
Eligible Applicants. Proposals are invited from universities, research or health institutions, and non-profit organizations, which have or can gain access to the EHDI data that is collected/maintained at the Department of Health or Department of Education or another designated state agency. This opportunity is open to applicants from U.S. institutions only.
Center/Division goal(s) and thematic area priorities aligned with this research project. Ensuring that all newborns are screened and assessed for hearing loss and receive appropriate intervention.
Purpose. To study the overall impact of Early Hearing Detection and Intervention (EHDI) on the academic setting or placement, and cost of educating Deaf or Hard of Hearing (D/HH) children.
Since the passage of the EHDI Act in 2006 all states and territories and the District of Columbia have established EHDI programs that provide hearing screening and follow-up for all newborns. Many D/HH children, who have been identified by and benefited from the EHDI program, are now enrolled in school, but many questions remain, such as: Has EHDI impacted the academic placement of D/HH children? Has EHDI affected both the type of school in which D/HH children attend and the type of educational services they receive? What is the cost of educating D/HH children before and after EHDI programs became a widespread practice?
Research Goals and Objectives. This funding is intended to explore the academic placements, educational services and the cost of educating D/HH children before and after the widespread adoption of EHDI in the United States. The collection and analysis of data regarding the usage of varying types of educational plans and approaches (e.g., Individualized Educational Plans (IEP), 504 Plans, mainstream approach to placement, special individualized services provided to D/HH students) will be helpful in understanding the educational landscape. Similarly, the estimated costs associated with the various approaches, as well as best estimates of the overall costs of educating D/HH children before and after EHDI programs were established, should be compared and adjusted for inflation.
Before an EHDI program was established in the state.
D/HH children with additional co-morbidity can either be excluded or stratified as a sub-group.
Data should include variables that allow for control or stratification of D/HH children according to the type of hearing technology (hearing aids and cochlear implant) used.
Should have a plan for estimating inflation-adjusted costs of education before and after the widespread implementation of EHDI programs in the catchment area.
An analysis plan that details how data will be stratified and analyzed.
Detailed budget, including identification of any sub-contractors.
Description of plans and IRB requirements and timeline regarding the protection of human subjects.
Type of school or school placement can be defined as regular school where D/HH children are mainstreamed and educated with non D/HH children.
School of the deaf, either private or state school, is an educational setting where majority of the students are D/HH and educated by deaf teachers or teachers trained in deafness.
Inclusion or mainstream placement where the student is placed in a regular classroom with same age peers.
Resource room placement is when a student leaves a regular classroom for a designated time period to receive specialized instruction in language, reading, or math.
Self-contained classroom placement where the student is placed in a small controlled class with a special-education teacher.
Type of educational resources provided can include speech/language therapy or services, any form of aural/oral habilitation, or any educational remedial assistance, intervention or services.
The anticipated impact from a successfully completed project will elucidate whether there have been educational cost savings associated with this public health effort and the effect of EHDI on the education of D/HH children.
|
# -*- coding: utf-8 -*-
import unittest
from mock import patch, PropertyMock, Mock
import requests
from hover.client import HoverClient
class TestHover(unittest.TestCase):
def setUp(self):
self.DNS_ID = 12345
with patch('requests.post') as patched_post, patch('requests.request') as patched_request:
type(patched_post.return_value).ok = PropertyMock(return_value=True)
type(patched_post.return_value).cookies = PropertyMock(
return_value={"hoverauth": "foo",
"domains": []})
type(patched_request.return_value).ok = PropertyMock(return_value=True)
type(patched_request.return_value).json = Mock(
return_value={"succeeded": True,
"domains": [{"domain_name": "worldofchris.com",
"id": self.DNS_ID}]})
username = 'mrfoo'
password = 'keyboardcat'
domain_name = 'worldofchris.com'
self.client = HoverClient(username=username,
password=password,
domain_name=domain_name)
def testInitClient(self):
"""
Initalise the client
"""
self.assertEqual(self.client.dns_id, self.DNS_ID)
def testAddCname(self):
"""
Add a CNAME
"""
with patch('requests.request') as patched_request:
type(patched_request.return_value).json = Mock(
return_value={"succeeded": True})
expected = {"succeeded": True}
actual = self.client.add_record(type="CNAME",
name="megatron",
content="crazyland.aws.com")
self.assertEqual(actual, expected)
def testGetRecord(self):
"""
Get a record so we can check if it exists and has the
expected value
"""
with patch('requests.request') as patched_request:
type(patched_request.return_value).json = Mock(
side_effect=[{"succeeded": True,
"domains": [{"entries": [{"type": "CNAME",
"name": "megatron",
"content": "crazyland.aws.com",
"id": "dns1234"}]}
]}])
expected = {"name": "megatron",
"type": "CNAME",
"content": "crazyland.aws.com",
"id": "dns1234"}
actual = self.client.get_record(type="CNAME",
name="megatron")
self.assertEqual(actual, expected)
def testUpdateCname(self):
"""
Update content for an existing record
"""
with patch('requests.request') as patched_request:
type(patched_request.return_value).json = Mock(
side_effect=[{"succeeded": True,
"domains": [{"entries": [{"type": "CNAME",
"name": "megatron",
"content": "blah",
"id": "dns1234"}]}
]},
{"succeeded": True}])
expected = {"succeeded": True}
actual = self.client.update_record(type="CNAME",
name="megatron",
content="foo.aws.com")
self.assertEqual(actual, expected)
def testRemoveCname(self):
"""
Remove a CNAME
"""
with patch('requests.request') as patched_request:
type(patched_request.return_value).json = Mock(
side_effect=[{"succeeded": True,
"domains": [{"entries": [{"type": "CNAME",
"name": "megatron",
"content": "blah",
"id": "dns1234"}]}
]},
{"succeeded": True}])
expected = {"succeeded": True}
actual = self.client.remove_record(type="CNAME",
name="megatron")
self.assertEqual(actual, expected)
|
Bloomingdale’s. The Greatest Showman, a film chronicling the life and career of circus impresario P.T. Barnum, will debut December 20. To complement its release, Bloomingdale’s has outfitted its windows with breathtaking vignettes of delightful and death-defying circus acts, many bedecked in scintillating Swarovski crystals.
Saks Fifth Avenue. Hi ho, hi ho, it’s off to window shop we go. Disney’s Snow White and the Seven Dwarfs turns eighty this year, and to honor the occasion, all fourteen of Saks’s 5th Avenue windows now display beloved scenes from this classic tale that became the world’s first feature-length animated film.
Tiffany & Co. It’s true, you can now breakfast at Tiffany’s. You can also enjoy the five window displays paying tribute to the renowned Tiffany Blue Box. Each display features a whimsical scene and at least one miniature replica of the iconic Tiffany’s chandelier.
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Search engine API."""
import hashlib
from functools import partial
from elasticsearch import VERSION as ES_VERSION
from elasticsearch_dsl import FacetedSearch, Search
from elasticsearch_dsl.faceted_search import FacetedResponse
from elasticsearch_dsl.query import Bool, Ids
from flask import current_app, request
from .proxies import current_search_client
class DefaultFilter(object):
"""Shortcut for defining default filters with query parser."""
def __init__(self, query=None, query_parser=None):
"""Build filter property with query parser."""
self._query = query
self.query_parser = query_parser or (lambda x: x)
@property
def query(self):
"""Build lazy query if needed."""
return self._query() if callable(self._query) else self._query
def __get__(self, obj, objtype):
"""Return parsed query."""
return self.query_parser(self.query)
class MinShouldMatch(str):
"""Work-around for Elasticsearch DSL problem.
The ElasticSearch DSL Bool query tries to inspect the
``minimum_should_match`` parameter, but understands only integers and not
queries like "0<1". This class circumvents the specific problematic clause
in Elasticsearch DSL.
"""
def __lt__(self, other):
"""Circumvent problematic Elasticsearch DSL clause."""
return False
def __le__(self, other):
"""Circumvent problematic Elasticsearch DSL clause."""
return False
def __gt__(self, other):
"""Circumvent problematic Elasticsearch DSL clause."""
return False
def __ge__(self, other):
"""Circumvent problematic Elasticsearch DSL clause."""
return False
class RecordsSearch(Search):
"""Example subclass for searching records using Elastic DSL."""
class Meta:
"""Configuration for ``Search`` and ``FacetedSearch`` classes."""
index = '_all'
doc_types = None
fields = ('*', )
facets = {}
default_filter = None
"""Default filter added to search body.
Example: ``default_filter = DefaultFilter('_access.owner:"1"')``.
"""
def __init__(self, **kwargs):
"""Use Meta to set kwargs defaults."""
kwargs.setdefault('index', getattr(self.Meta, 'index', None))
kwargs.setdefault('doc_type', getattr(self.Meta, 'doc_types', None))
kwargs.setdefault('using', current_search_client)
kwargs.setdefault('extra', {})
min_score = current_app.config.get('SEARCH_RESULTS_MIN_SCORE')
if min_score:
kwargs['extra'].update(min_score=min_score)
super(RecordsSearch, self).__init__(**kwargs)
default_filter = getattr(self.Meta, 'default_filter', None)
if default_filter:
# NOTE: https://github.com/elastic/elasticsearch/issues/21844
self.query = Bool(minimum_should_match=MinShouldMatch("0<1"),
filter=default_filter)
def get_record(self, id_):
"""Return a record by its identifier.
:param id_: The record identifier.
:returns: The record.
"""
return self.query(Ids(values=[str(id_)]))
def get_records(self, ids):
"""Return records by their identifiers.
:param ids: A list of record identifier.
:returns: A list of records.
"""
return self.query(Ids(values=[str(id_) for id_ in ids]))
@classmethod
def faceted_search(cls, query=None, filters=None, search=None):
"""Return faceted search instance with defaults set.
:param query: Elastic DSL query object (``Q``).
:param filters: Dictionary with selected facet values.
:param search: An instance of ``Search`` class. (default: ``cls()``).
"""
search_ = search or cls()
class RecordsFacetedSearch(FacetedSearch):
"""Pass defaults from ``cls.Meta`` object."""
index = search_._index[0]
doc_types = getattr(search_.Meta, 'doc_types', ['_all'])
fields = getattr(search_.Meta, 'fields', ('*', ))
facets = getattr(search_.Meta, 'facets', {})
def search(self):
"""Use ``search`` or ``cls()`` instead of default Search."""
# Later versions of `elasticsearch-dsl` (>=5.1.0) changed the
# Elasticsearch FacetedResponse class constructor signature.
if ES_VERSION[0] > 2:
return search_.response_class(FacetedResponse)
return search_.response_class(partial(FacetedResponse, self))
return RecordsFacetedSearch(query=query, filters=filters or {})
def with_preference_param(self):
"""Add the preference param to the ES request and return a new Search.
The preference param avoids the bouncing effect with multiple
replicas, documented on ES documentation.
See: https://www.elastic.co/guide/en/elasticsearch/guide/current
/_search_options.html#_preference for more information.
"""
user_hash = self._get_user_hash()
if user_hash:
return self.params(preference=user_hash)
return self
def _get_user_agent(self):
"""Retrieve the request's User-Agent, if available.
Taken from Flask Login utils.py.
"""
user_agent = request.headers.get('User-Agent')
if user_agent:
user_agent = user_agent.encode('utf-8')
return user_agent or ''
def _get_user_hash(self):
"""Calculate a digest based on request's User-Agent and IP address."""
if request:
user_hash = '{ip}-{ua}'.format(ip=request.remote_addr,
ua=self._get_user_agent())
alg = hashlib.md5()
alg.update(user_hash.encode('utf8'))
return alg.hexdigest()
return None
|
kids bunk bed with slide toddler loft bed with slide google search little girls bunk beds with stairs bunk beds and bed home interiors and gifts pictures.
wet tile saw wet tile saw for sale adelaide.
living room desk ideas living room desk desk for living room best of desk for living room living room built in desk ideas office desk in living room ideas.
quartz kitchen countertops upgrade your kitchen with these new quartz colors buy quartz countertop price.
modern adirondack chairs burn burn landscape design wwwburncom modern adirondack chair plans free.
corner bed frame corner bed furniture corner bed headboard impressive corner bed headboard kids rooms and furniture at mattress corner bed corner bed frame queen.
bamboo flooring in bathroom bamboo flooring in bathroom everything you have to know about bamboo flooring bathroom modern chair on bamboo flooring in bathroom bamboo tile flooring bathroom.
|
#!/usr/bin/env python
"""Docstring."""
from collections import Counter
from functools import reduce
from common import (
get_input,
)
class RoomAnalyzer:
"""."""
def __init__(self, input_list=[]):
"""."""
self.input_list = input_list
def process_room_string(self, room_string):
"""."""
name = room_string[:-11].replace("-", "")
sector_id = int(room_string[-10:-7])
checksum = room_string[-6:-1]
return name, sector_id, checksum
def room_is_real(self, room_name, room_checksum):
"""."""
counter = Counter(room_name)
checked = ''.join(map(lambda x: x[0], sorted(counter.most_common(), key=lambda x: (-x[1], x[0]))))[:5]
return bool(checked == room_checksum)
def analyze_room(self, room_string):
"""."""
name, sector_id, checksum = self.process_room_string(room_string)
return sector_id if self.room_is_real(name, checksum) else 0
def analyze_input(self):
"""."""
return reduce((lambda x, y: x + y), list(map(self.analyze_room, self.input_list)))
if __name__ == "__main__":
puzzle_input = get_input()
analyzer = RoomAnalyzer(get_input())
print(analyzer.analyze_input())
|
For thousands of years, Albanians and Greeks have shared a common geography, history, culture, traditions, values and innumerable interests. Albanians and Greeks are the oldest foundations and have continually been the most consolidated nations in the Balkans.
Moreover, these two nations have co-existed in peace and cooperated throughout centuries. With a complete certainty it can be said that the co-existence between Albanians and Greeks has survived many great and fairly long historical challenges, while never generating destructive conflicts upon their people and assets.
The first problems among Albanians and Greeks took place from the Megali-Idea Doctrine of Greek Nationalism at the beginning of the XIX Century. The Megali-Idea Doctrine aimed at expanding the Greek nation-state in the East-European geopolitical territories that were inhabited by people that professed Eastern Orthodoxy religion. With these ambitions the architects of Greek Nationalistic Doctrine aspired to establish their state in the largest part of the Former Byzantine Empire.
Megali-Idea was not solely a doctrine; it also became a revolutionary movement. The first revolutionary campaign started in the outskirts of Romania’s regions, but it failed thanks to the counter attacks of Romanian resistance fighters. The second revolutionary campaign took place nearby Istanbul; this attempt was also badly crushed due to a major counter offensive led by the Ottoman Empire Army.
While they completely failed in territories relatively far from Greece, leaders of Greek Nationalism unraveled their third revolutionary stage in the Peloponnese Peninsula. In 1820-1829 the Greek revolutionary movement was supported by the Arvanits, indigenous Albanians in Greece that raised the fighting spirit and stamina within the Greek Army, who also gave their lives and invested almost 90 percent of Greek war heroes as well as invested important statesmen to the foundation of the Modern Greek nation.
The Doctrine of Megali-Idea started a new fight against the Slavs in the Balkans. A new conflict surged between Bulgarians and Serbs, because they belonged to the Eastern Orthodox religion, but they had a Slavic language, culture and identity. Megali – Idea had encountered a greater problem with the Russians, who were also part of the Orthodox Church, but they had a Slavic identity, language, culture and were a very large nation; Russians had amassed great benefits and been equipped with significant geopolitical interests. The Megali-Idea had completely failed within the territories of Southern Slavs and Russian Slavs.
After the failure of Megali-Idea, Greek nationalists were strengthening their nationalist doctrine on regions that were ever more close to their neighborhood. Within these territories Megali-Idea would become a Micro-Idea, while being directed specifically towards Albanian population in northern Greece.
This is why the geopolitical orientation of Greek national doctrine was named after northern Epirus. Megali – Idea of Northern Epirus was expanded during the Balkan Wars (1911-1913) and was developed during the First World War (1914-1918). On this phase begins a massive ethnic cleansing against Albanian population of Chameria, who were an indigenous ethnic group over a large swath of territory with over 13 thousand square kilometers.
The genocide and ethnic cleansing against Albanians of Chameria reached its apex during the Second World War (1943-1945) when the Greek Fascist Monarchy, under the consent of a major power at the time, massacred about five thousand Albanians of Chameria and violently displaced thousands of Albanians towards the mainland.
In consideration to its geostrategic aspects, the Greek nationalist doctrine has not been changed towards the Albanian nation. In 1949 Greek Armed Forces attacked Albanian sovereign territories in its south eastern regions. From 1945 until 1955 Greece blocked the Albanian membership to the United Nations. Such a situation had left both countries, Albania and Greece without any diplomatic relations until 1971, a time when Athens was under the Regime of the Colonels.
Crypto-Northern Epirus even continued after the integration of Greece to the European Union. Behind a curtain of European terminology and contemporary soft diplomatic language the doctrine of Northern Epirus has remained unchanged towards Albania and Albanians.
In our days there are three major sets of problems: there are problems inherited from history, at their center are the issues of Chameria; there exist problems of democratic transition in the Balkans, where immigration, corruption and illegal trafficking are the main concern; there are currently some problems that are very serious in terms of defining a border line between Albania and Greece that would cross the waters of the Ionian and Aegean Seas.
These sets of problems have been accumulated on the diplomatic files of both nations, meanwhile there are no real efforts made to solve them.
We see: high level meetings, public press releases, ambassadors giving interviews, but all of these optimistic actions are not followed-up by the respective negotiating diplomatic teams that could move step by step and bring lasting solutions that are necessary for both governments, nations and throughout the Balkans.
Only recently the Greek Ambassador to Tirana gave an interview. In her public appearance the Greek diplomat stated clearly that her nation’s foreign policy does not recognize the truth of Albanian genocide and ethnic cleansing against Albanian people of Chameria and she estimates that the bilateral border line over the Ionian-Aegian Seas is solved.
In regards to Chameria, this issue is still open. Albanian and Greek Diplomatic channels must consider it seriously and within a permanent negotiating attitude and not in a fragmentary and in a statement-like behavior. Chameria requires long and serious negotiations.
According to the border line on the coast line, the draft agreement signed by the two Ministers of Foreign Affairs in 2009 was considered anti-constitutional by the Constitutional Court of the Republic of Albania. In respect to the legal consequences the agreement of 2009 is null.
A totally new process of negotiations is needed between Albania and Greece in order to draft a brand new bilateral agreement.
|
'''
Please note that this file is an example, not an official Lotame-supported
tool. The Support team at Lotame does not provide support for this script,
as it's only meant to serve as a guide to help you use the Services API.
Filename: update_behavior_aliases.py
Author: Brett Coker
Python Version: 3.6.3
Updated: 12/19/17
Adds new aliases to behaviors. Takes an .xlsx as an argument.
The spreadsheet should be formatted as follows:
- Header row required
- First column is behavior IDs
- Second column is aliases.
'''
import sys
import openpyxl
import better_lotameapi
def main():
if len(sys.argv) == 1:
print(f'Usage: python {sys.argv[0]} aliases.xlsx')
return
lotame = better_lotameapi.Lotame()
option = 0
while option not in ['1', '2']:
print('Select option:')
print('1. Replace variants')
print('2. Append variants')
option = input('Option: ')
filename = sys.argv[1]
workbook = openpyxl.load_workbook(filename)
sheet_names = workbook.get_sheet_names()
sheet = workbook.get_sheet_by_name(sheet_names[0])
for row in range(2, sheet.max_row + 1):
behavior_id = str(sheet[f'A{row}'].value)
new_alias = str(sheet[f'B{row}'].value)
endpoint = f'behaviors/{behavior_id}/aliases'
info = lotame.get(endpoint).json()
if option == '1': # Replace
info['alias'] = [new_alias]
else: # Append
info['alias'].append(new_alias)
status = lotame.put(endpoint, info).status_code
print(f'Behavior {behavior_id} | HTTP {status}')
if __name__ == '__main__':
main()
|
elf on the shelf skirt the elf on the shelf couture collection collectors edition winter sparkle skirt elf on the shelf skirt canada.
elf on the shelf skirt elf on shelf skirt target.
elf on the shelf skirt the elf on the shelf doll clothes suit for scarf skates clothing set skirts elf on the shelf skirt canada.
elf on the shelf skirt new girl elf on the shelf edition with elf storybook and skirt elf on the shelf girl skirt australia.
elf on the shelf skirt elf on the shelf clothes elf on the shelf skirt ireland.
elf on the shelf skirt on the shelves awesome elf on the shelf couture collection dazzling dress of elf on shelf skirt target.
elf on the shelf skirt share elf on the shelf skirt pattern free.
elf on the shelf skirt elf shelf skirt.
elf on the shelf skirt elf on the shelf clothes tartan plaid skirt and boots w sweater elf shelf skirt.
elf on the shelf skirt loved the skirts and made a little elf in the shelf closet in the tree for the storing and changing of twinkles outfits elf shelf skirt.
elf on the shelf skirt perfect party set boots red black skirt elf on the shelf clothes girl elf on the shelf snowflake skirt pattern.
elf on the shelf skirt making a skirt for the elf on the shelf with gel printed fabric and glue by girl elf on the shelf skirt pattern.
elf on the shelf skirt please leave us the number of each style thanks 6 styles elf shelf elf on the shelf skirt pattern free.
elf on the shelf skirt pictures elf on the shelf snowflake skirt pattern.
elf on the shelf skirt elf circle skirt elf on the shelf skirt canada.
elf on the shelf skirt elf circle skirt b elf on the shelf skirt pattern.
elf on the shelf skirt elf on the shelf clothes elf ideas inspired by the couture elf on shelf clothes amazon elf on the shelf elf on shelf skirt target.
elf on the shelf skirt elf on the shelf reindeer skirt elf on the shelf snowflake skirt pattern.
elf on the shelf skirt the elf on the shelf clothing hello kitty skirt kitty ears boots elf on shelf skirt pattern.
elf on the shelf skirt elf on the shelf skirt tutorial elf on the shelf skirt australia.
elf on the shelf skirt making a skirt for the elf on the shelf with gel printed fabric and glue by elf on shelf skirt target.
|
from json import load
from re import match, search
from typing import Set
alphabet = 'abcdefghijklmnopqrstuvwxyz'
hangman_data = load(open('hangman.json', 'r'))
all_words = [[i['Word'] for i in word] for word in [category for category in hangman_data.values()]]
all_words = {j.lower() for i in all_words for j in i} # type: Set[str]
def solve_all(pattern: str, ignore_set: str = '') -> Set[str]:
re_alphabet = ''.join([letter for letter in alphabet if letter not in set(pattern)])
re_pattern = pattern.replace('_', '['+re_alphabet+']')
possible_words = set()
for word in all_words:
if ignore_set and search('['+ignore_set+']', word):
continue
if len(word) == len(pattern) and match(re_pattern, word):
possible_words.add(word)
print(possible_words)
return possible_words
def recommend_letter(word_set: Set[str], ignore_set: str = '') -> str:
"""Given a set of possible answers, return a guess to whittle down as many answers as possible."""
letter_histogram = {letter: abs(len(word_set)//2-sum(letter in word for word in word_set))
for letter in alphabet if letter not in ignore_set}
return sorted(letter_histogram.items(), key=lambda x: x[1])[0][0]
while 1:
status = input('>>> ')
try:
if ';' in status:
status, ignore = status.split(';')
else:
ignore = ''
solution_set = solve_all(status, ignore)
print(list(solution_set)[0] if len(solution_set) == 1 else recommend_letter(solution_set, ignore))
except Exception as e:
print(e)
|
Volume XXI has been edited at Curtin Law School, Perth Australia. The Editor-in-Chief is Professor Gabriël A Moens, Professor of Law, Curtin Law School and Emeritus Professor of Law, The University of Queensland. The Review is supervised by an international Board of Editors consisting of leading international trade law practitioners and academics from the European Union, the United States, Asia and Australia.
|
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from collections import defaultdict
from datetime import datetime
from itertools import chain
import dateutil.parser
from indico.modules.rb import rb_settings
from indico.modules.rb.models.reservations import RepeatFrequency
from indico.modules.rb.tasks import roombooking_end_notifications, roombooking_occurrences
pytest_plugins = 'indico.modules.rb.testing.fixtures'
settings = {
'notification_before_days': 2,
'notification_before_days_weekly': 5,
'notification_before_days_monthly': 7,
'end_notification_daily': 1,
'end_notification_weekly': 3,
'end_notification_monthly': 7
}
users = {
'x': {'first_name': 'Mister', 'last_name': 'Evil'},
'y': {'first_name': 'Doctor', 'last_name': 'No'}
}
rooms = {
'a': {
'notification_before_days': None,
'notification_before_days_weekly': None,
'notification_before_days_monthly': None,
'end_notification_daily': None,
'end_notification_weekly': None,
'end_notification_monthly': None
},
'b': {
'notification_before_days': 10,
'notification_before_days_weekly': 11,
'notification_before_days_monthly': 12,
'end_notification_daily': 2,
'end_notification_weekly': 4,
'end_notification_monthly': 8
}
}
reservations = [
{
'start_dt': '2017-03-31 15:00',
'end_dt': '2017-04-10 16:00',
'repeat_frequency': RepeatFrequency.DAY,
'room': 'a',
'user': 'x',
'notification': '2017-04-03',
},
{
'start_dt': '2017-04-03 12:00',
'end_dt': '2017-04-03 14:00',
'repeat_frequency': RepeatFrequency.NEVER,
'room': 'a',
'user': 'x',
'notification': '2017-04-03',
},
{
'start_dt': '2017-03-30 12:00',
'end_dt': '2017-05-04 14:00',
'repeat_frequency': RepeatFrequency.WEEK,
'room': 'a',
'user': 'x',
'notification': '2017-04-06',
},
{
'start_dt': '2017-04-08 12:00',
'end_dt': '2017-05-13 14:00',
'repeat_frequency': RepeatFrequency.MONTH,
'room': 'a',
'user': 'y',
'notification': '2017-04-08',
},
{
'start_dt': '2017-04-11 12:00',
'end_dt': '2017-04-11 14:00',
'repeat_frequency': RepeatFrequency.NEVER,
'room': 'b',
'user': 'x',
'notification': '2017-04-11', # today + 10
},
{
'start_dt': '2017-04-03 12:00',
'end_dt': '2017-04-03 14:00',
'repeat_frequency': RepeatFrequency.NEVER,
'room': 'b',
'user': 'x',
'notification': None, # room has today+10 not today+1
},
]
finishing_reservations = [
{
'start_dt': '2019-07-08 12:00',
'end_dt': '2019-07-08 14:00',
'repeat_frequency': RepeatFrequency.NEVER,
'room': 'b',
'user': 'x',
'end_notification': False
},
{
'start_dt': '2019-07-07 14:00',
'end_dt': '2019-07-07 14:30',
'repeat_frequency': RepeatFrequency.NEVER,
'room': 'a',
'user': 'x',
'end_notification': False
},
{
'start_dt': '2019-07-07 14:30',
'end_dt': '2019-07-09 15:00',
'repeat_frequency': RepeatFrequency.DAY,
'room': 'a',
'user': 'x',
'end_notification': True
},
{
'start_dt': '2019-07-07 15:00',
'end_dt': '2019-07-10 15:10',
'repeat_frequency': RepeatFrequency.DAY,
'room': 'a',
'user': 'x',
'end_notification': False
},
{
'start_dt': '2019-07-07 15:10',
'end_dt': '2019-07-10 15:20',
'repeat_frequency': RepeatFrequency.DAY,
'room': 'b',
'user': 'y',
'end_notification': True
},
{
'start_dt': '2019-07-07 15:20',
'end_dt': '2019-07-11 15:30',
'repeat_frequency': RepeatFrequency.DAY,
'room': 'b',
'user': 'y',
'end_notification': False
},
{
'start_dt': '2019-07-05 15:30',
'end_dt': '2019-07-12 15:40',
'repeat_frequency': RepeatFrequency.WEEK,
'room': 'b',
'user': 'y',
'end_notification': True
},
{
'start_dt': '2019-07-05 15:40',
'end_dt': '2019-07-15 15:50',
'repeat_frequency': RepeatFrequency.WEEK,
'room': 'b',
'user': 'y',
'end_notification': True
},
{
'start_dt': '2019-07-05 15:50',
'end_dt': '2019-07-19 16:00',
'repeat_frequency': RepeatFrequency.WEEK,
'room': 'b',
'user': 'y',
'end_notification': False
},
{
'start_dt': '2019-07-04 16:00',
'end_dt': '2019-07-11 16:10',
'repeat_frequency': RepeatFrequency.WEEK,
'room': 'a',
'user': 'x',
'end_notification': True
}
]
def test_roombooking_notifications(mocker, create_user, create_room, create_reservation, freeze_time):
rb_settings.set_multi(settings)
user_map = {key: create_user(id_, **data) for id_, (key, data) in enumerate(users.iteritems(), 1)}
room_map = {key: create_room(**data) for key, data in rooms.iteritems()}
notification_map = defaultdict(dict)
end_notification_map = defaultdict(dict)
for data in chain(reservations, finishing_reservations):
data['start_dt'] = dateutil.parser.parse(data['start_dt'])
data['end_dt'] = dateutil.parser.parse(data['end_dt'])
data['booked_for_user'] = user = user_map[data.pop('user')]
data['room'] = room_map[data['room']]
notification = data.pop('notification', None)
end_notification = data.pop('end_notification', None)
reservation = create_reservation(**data)
if notification:
notification_map[user][reservation] = dateutil.parser.parse(notification).date()
if end_notification is not None:
end_notification_map[user][reservation] = end_notification
notify_upcoming_occurrences = mocker.patch('indico.modules.rb.tasks.notify_upcoming_occurrences')
notify_about_finishing_bookings = mocker.patch('indico.modules.rb.tasks.notify_about_finishing_bookings')
freeze_time(datetime(2017, 4, 1, 8, 0, 0))
roombooking_occurrences()
for (user, occurrences), __ in notify_upcoming_occurrences.call_args_list:
notifications = notification_map.pop(user)
for occ in occurrences:
date = notifications.pop(occ.reservation)
assert occ.start_dt.date() == date
assert occ.notification_sent
past_occs = [x for x in occ.reservation.occurrences if x.start_dt.date() < date.today()]
future_occs = [x for x in occ.reservation.occurrences if x.start_dt.date() > date.today() and x != occ]
assert not any(x.notification_sent for x in past_occs)
if occ.reservation.repeat_frequency == RepeatFrequency.DAY:
assert all(x.notification_sent for x in future_occs)
else:
assert not any(x.notification_sent for x in future_occs)
assert not notifications # no extra notifications
assert not notification_map # no extra users
freeze_time(datetime(2019, 7, 8, 8, 0, 0))
roombooking_end_notifications()
for (user, user_finishing_reservations), __ in notify_about_finishing_bookings.call_args_list:
end_notifications = end_notification_map.pop(user)
for reservation in user_finishing_reservations:
should_be_sent = end_notifications.pop(reservation)
assert reservation.end_notification_sent == should_be_sent
assert all(not r.end_notification_sent for r in end_notifications)
assert not end_notification_map
|
Let's start. Here is the first overview before we dive into the details.
In Python, there is the aphorism from the Zen of Python (Tim Peters): "Explicit is better than implicit". This is a kind of a meta-rule in Python for writing good code. This meta-rule holds, in particular, true for the next two rules in the C++ core guidelines.
The variable cannot be changed by accident.
const or constexpr variables are by definition thread-safe.
const: You have to guarantee that the variable is initialised in a thread-safe way.
constexpr: The C++ runtime guarantees, that the variable is initialised in a thread-safe way.
Do you like such kind of code?
I hope not. Put the declaration of i into the for loop and you are fine. i will be bound to the lifetime of the for loop.
With C++17, you can declare your i just in an if or switch statement: C++17 - What's new in the language?
10 years ago, I thought that creating a variable length array on the stack is ISO C++.
In the first case, you should use an std::array and in the second case you can use a gsl::stack_array from the Guideline support library (GSL).
Why should you use std::array instead of C-array or gsl::array instead of C-array?
Variable length arrays such as int a2[m] are a security risk, because you may execute arbitrary code or get stack exhaustion.
I sometimes hear the question in my seminars: Why should I invoke a lambda function just in place? This rule gives an answer. You can put complex initialisation in it. This in place invocation is very valuable, if you variable should become const.
If you don't want to modify your variable after the initialisation, you should make it const according to the previous rule R.25. Fine. But sometimes the initialisation of the variable consist of more steps; therefore, you can make it not const.
Have a look here. The widget x in the following example should be const after its initialisation. It cannot be const because it will be changed a few times during its initialisation.
Now, a lambda function comes to our rescue. Put the initialisation stuff into a lambda function, capture the environment by reference, and initialise your const variable with the in-place invoked lambda function.
Admittedly, it looks a little bit strange to invoke a lambda function just in place, but from the conceptional view, I like it. You put the whole initialisation stuff just in a function body.
I will only paraphrase the next four rule to macros. Don't use macros for program test manipulation or for constants and functions. If you have to use them use unique names with ALL_CAPS.
Right! Don't define a (C-style) variadic function. Since C++11 we have variadic templates and since C++17 we have fold expressions. This all what we need.
You probably quite often used the (C-style) variadic function: printf. printf accepts a format string and arbitrary numbers of arguments and displays its arguments respectively. A call of printf has undefined behaviour if you don't use the correct format specifiers or the number of your arguments isn't correct.
By using variadic templates you can implement a type-safe printf function. Here is the simplified version of printf based on cppreference.com.
myPrintf can accept an arbitrary number of arguments. If arbitrary means 0, the first overload (1) is used. If arbitrary means more than 0, the second overload (2) is used. The function template (2) is quite interesting. It can accept an arbitrary number of arguments but the number must greater than 0. The first argument will be bound to value and written to std::cout (3). The rest of the arguments will be used in (4) to make a recursive call. This recursive call will create another function template myPrintf accepting one argument less. This recursion will go to zero. In this case, the function myPrintf (1) as boundary condition kicks in.
myPrintf is type-safe because all output will be handled by std::cout. This simplified implementation cannot deal with format strings such as %d, %f or 5.5f.
There is a lot to write about expression. The C++ core guidelines has about 25 rules for them; therefore, my next post will deal with expression.
Thanks a lot to my Patreon Supporters: Eric Pederson, Paul Baxter, Carlos Gomes Martinho, and SAI RAGHAVENDRA PRASAD POOSA.
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Talk'
db.create_table(u'core_talk', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('description', self.gf('django.db.models.fields.TextField')()),
('start_time', self.gf('django.db.models.fields.TimeField')(blank=True)),
))
db.send_create_signal(u'core', ['Talk'])
# Adding M2M table for field speakers on 'Talk'
m2m_table_name = db.shorten_name(u'core_talk_speakers')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('talk', models.ForeignKey(orm[u'core.talk'], null=False)),
('speaker', models.ForeignKey(orm[u'core.speaker'], null=False))
))
db.create_unique(m2m_table_name, ['talk_id', 'speaker_id'])
def backwards(self, orm):
# Deleting model 'Talk'
db.delete_table(u'core_talk')
# Removing M2M table for field speakers on 'Talk'
db.delete_table(db.shorten_name(u'core_talk_speakers'))
models = {
u'core.contact': {
'Meta': {'object_name': 'Contact'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Speaker']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'core.speaker': {
'Meta': {'object_name': 'Speaker'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'core.talk': {
'Meta': {'object_name': 'Talk'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'speakers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Speaker']", 'symmetrical': 'False'}),
'start_time': ('django.db.models.fields.TimeField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['core']
|
Recently, we have proposed a new image device called gigavision camera whose most important characteristic is that pixels have binary response. The response function of a gigavision sensor is non-linear and similar to a logarithmic function, which makes the camera suitable for high dynamic range imaging. One important parameter in the gigavision camera is the threshold for generating binary pixels. Threshold T relates to the number of photo-electrons necessary for the pixel output to switch from "0" to "1". In this paper, a theoretical analysis of the threshold influence in the gigavision camera is given. If the threshold in the gigavision sensor is large, there will be a "dead zone" in the response function of a gigavision sensor. A method of adding artificial light is proposed to solve the "dead zone" problem. Through theoretical analysis and experimental results based on synthesized images, we show that for high light intensity, the gigavision camera with a large threshold and added light works better than one with unity threshold. Experimental results with a prototype camera based on a single photon avalanche diodes (SPAD) camera are also presented.
|
#!/usr/bin/env python
import boto.ec2
import boto.vpc
from local_settings import AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY
import pprint
def region_connect(region_name):
vpc_conn = boto.vpc.connect_to_region(region_name,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
ec2_conn = boto.ec2.connect_to_region(region_name,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
return vpc_conn
def get_all_routetables(vpc_conn, filters={}):
raw_route_tables = vpc_conn.get_all_route_tables(filters=filters)
for rt in raw_route_tables:
#pprint.pprint(rt.__dict__)
for a in rt.associations:
if not a.subnet_id:
continue
pprint.pprint(a.__dict__)
for r in rt.routes:
gateway = r.gateway_id
if r.instance_id:
gateway = r.instance_id
print "%-20s -> %s" % (r.destination_cidr_block, gateway)
print "=="
def get_all_subnets(vpc_conn, filters={}):
raw_subnet_list = vpc_conn.get_all_subnets()
for s in raw_subnet_list:
get_all_routetables(vpc_conn, filters={'vpc_id': s.vpc_id})
#get_all_internet_gateways(vpc_conn)
def get_all_internet_gateways(vpc_conn, filters={}):
raw_igw_list = vpc_conn.get_all_internet_gateways(filters=filters)
for igw in raw_igw_list:
print igw
def main():
"Main"
vpc_conn = region_connect('ap-southeast-2')
get_all_subnets(vpc_conn)
if __name__ == '__main__':
main()
|
NTELS has developed a big data analytics service that can predict traffic accident risk by collaborating with Korea Road Traffic Authority and Korea University.
Many large cities have seen the problem of traffic accidents and overall congestion and there is an urgent need to institute new measures to combat it. NTELS expects that this new service will help cities make roads safer and give drivers more timely information on surrounding road conditions, for example one that indicates a traffic accident might be on the verge of happening, using cutting edge technologies such as artificial intelligence (AI) and big data analytics. It will be expanded nationwide as a total traffic safety service.
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
#import cherrypy
import cherrypy.lib.auth_basic
import os.path
import sickbeard
from sickbeard import logger
from sickbeard.webserve import WebInterface
from sickbeard.helpers import create_https_certificates
def initWebServer(options = {}):
options.setdefault('port', 8081)
options.setdefault('host', '0.0.0.0')
options.setdefault('log_dir', None)
options.setdefault('username', '')
options.setdefault('password', '')
options.setdefault('web_root', '/')
assert isinstance(options['port'], int)
assert 'data_root' in options
def http_error_401_hander(status, message, traceback, version):
""" Custom handler for 401 error """
if status != "401 Unauthorized":
logger.log(u"CherryPy caught an error: %s %s" % (status, message), logger.ERROR)
logger.log(traceback, logger.DEBUG)
return r'''
<html>
<head>
<title>%s</title>
</head>
<body>
<br/>
<font color="#0000FF">Error %s: You need to provide a valid username and password.</font>
</body>
</html>
''' % ('Access denied', status)
def http_error_404_hander(status, message, traceback, version):
""" Custom handler for 404 error, redirect back to main page """
return r'''
<html>
<head>
<title>404</title>
<script type="text/javascript" charset="utf-8">
<!--
location.href = "%s"
//-->
</script>
</head>
<body>
<br/>
</body>
</html>
''' % '/'
# cherrypy setup
enable_https = options['enable_https']
https_cert = options['https_cert']
https_key = options['https_key']
if enable_https:
# If either the HTTPS certificate or key do not exist, make some self-signed ones.
if not (https_cert and os.path.exists(https_cert)) or not (https_key and os.path.exists(https_key)):
if not create_https_certificates(https_cert, https_key):
logger.log(u"Unable to create cert/key files, disabling HTTPS")
sickbeard.ENABLE_HTTPS = False
enable_https = False
if not (os.path.exists(https_cert) and os.path.exists(https_key)):
logger.log(u"Disabled HTTPS because of missing CERT and KEY files", logger.WARNING)
sickbeard.ENABLE_HTTPS = False
enable_https = False
options_dict = {
'server.socket_port': options['port'],
'server.socket_host': options['host'],
'log.screen': False,
'error_page.401': http_error_401_hander,
'error_page.404': http_error_404_hander,
}
if enable_https:
options_dict['server.ssl_certificate'] = https_cert
options_dict['server.ssl_private_key'] = https_key
protocol = "https"
else:
protocol = "http"
logger.log(u"Starting Sick Beard on "+protocol+"://" + str(options['host']) + ":" + str(options['port']) + "/")
cherrypy.config.update(options_dict)
# setup cherrypy logging
if options['log_dir'] and os.path.isdir(options['log_dir']):
cherrypy.config.update({ 'log.access_file': os.path.join(options['log_dir'], "cherrypy.log") })
logger.log('Using %s for cherrypy log' % cherrypy.config['log.access_file'])
conf = {
'/': {
'tools.staticdir.root': options['data_root'],
'tools.encode.on': True,
'tools.encode.encoding': 'utf-8',
},
'/images': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'images'
},
'/js': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'js'
},
'/css': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'css'
},
}
app = cherrypy.tree.mount(WebInterface(), options['web_root'], conf)
# auth
if options['username'] != "" and options['password'] != "":
checkpassword = cherrypy.lib.auth_basic.checkpassword_dict({options['username']: options['password']})
app.merge({
'/': {
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'SickBeard',
'tools.auth_basic.checkpassword': checkpassword
},
'/api':{
'tools.auth_basic.on': False
},
'/api/builder':{
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'SickBeard',
'tools.auth_basic.checkpassword': checkpassword
}
})
cherrypy.server.start()
cherrypy.server.wait()
|
Garfield: Assassination by Granger - 5.625" x 8.000"
Garfield: Assassination art print by Granger. Our prints are produced on acid-free papers using archival inks to guarantee that they last a lifetime without fading or loss of color. All art prints include a 1" white border around the image to allow for future framing and matting, if desired.
There are no comments for Garfield: Assassination. Click here to post the first comment.
GARFIELD: ASSASSINATION. Posting the bulletins about the assassination of President James A. Garfield outside the offices of the New York 'Herald' on 2 July 1881. Wood engraving from an English newspaper of 1881.
|
# -*- encoding: utf-8 -*-
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An :ref:`Audit <audit_definition>` may be launched several times with the same
settings (:ref:`Goal <goal_definition>`, thresholds, ...). Therefore it makes
sense to save those settings in some sort of Audit preset object, which is
known as an :ref:`Audit Template <audit_template_definition>`.
An :ref:`Audit Template <audit_template_definition>` contains at least the
:ref:`Goal <goal_definition>` of the :ref:`Audit <audit_definition>`.
It may also contain some error handling settings indicating whether:
- :ref:`Watcher Applier <watcher_applier_definition>` stops the
entire operation
- :ref:`Watcher Applier <watcher_applier_definition>` performs a rollback
and how many retries should be attempted before failure occurs (also the latter
can be complex: for example the scenario in which there are many first-time
failures on ultimately successful :ref:`Actions <action_definition>`).
Moreover, an :ref:`Audit Template <audit_template_definition>` may contain some
settings related to the level of automation for the
:ref:`Action Plan <action_plan_definition>` that will be generated by the
:ref:`Audit <audit_definition>`.
A flag will indicate whether the :ref:`Action Plan <action_plan_definition>`
will be launched automatically or will need a manual confirmation from the
:ref:`Administrator <administrator_definition>`.
"""
import datetime
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from watcher._i18n import _
from watcher.api.controllers import base
from watcher.api.controllers import link
from watcher.api.controllers.v1 import collection
from watcher.api.controllers.v1 import types
from watcher.api.controllers.v1 import utils as api_utils
from watcher.common import context as context_utils
from watcher.common import exception
from watcher.common import policy
from watcher.common import utils as common_utils
from watcher.decision_engine.loading import default as default_loading
from watcher import objects
def hide_fields_in_newer_versions(obj):
"""This method hides fields that were added in newer API versions.
Certain node fields were introduced at certain API versions.
These fields are only made available when the request's API version
matches or exceeds the versions when these fields were introduced.
"""
pass
class AuditTemplatePostType(wtypes.Base):
_ctx = context_utils.make_context()
name = wtypes.wsattr(wtypes.text, mandatory=True)
"""Name of this audit template"""
description = wtypes.wsattr(wtypes.text, mandatory=False)
"""Short description of this audit template"""
goal = wtypes.wsattr(wtypes.text, mandatory=True)
"""Goal UUID or name of the audit template"""
strategy = wtypes.wsattr(wtypes.text, mandatory=False)
"""Strategy UUID or name of the audit template"""
scope = wtypes.wsattr(types.jsontype, mandatory=False, default=[])
"""Audit Scope"""
def as_audit_template(self):
return AuditTemplate(
name=self.name,
description=self.description,
goal_id=self.goal, # Dirty trick ...
goal=self.goal,
strategy_id=self.strategy, # Dirty trick ...
strategy_uuid=self.strategy,
scope=self.scope,
)
@staticmethod
def _build_schema():
SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "array",
"items": {
"type": "object",
"properties": AuditTemplatePostType._get_schemas(),
"additionalProperties": False
}
}
return SCHEMA
@staticmethod
def _get_schemas():
collectors = default_loading.ClusterDataModelCollectorLoader(
).list_available()
schemas = {k: c.SCHEMA for k, c
in collectors.items() if hasattr(c, "SCHEMA")}
return schemas
@staticmethod
def validate(audit_template):
available_goals = objects.Goal.list(AuditTemplatePostType._ctx)
available_goal_uuids_map = {g.uuid: g for g in available_goals}
available_goal_names_map = {g.name: g for g in available_goals}
if audit_template.goal in available_goal_uuids_map:
goal = available_goal_uuids_map[audit_template.goal]
elif audit_template.goal in available_goal_names_map:
goal = available_goal_names_map[audit_template.goal]
else:
raise exception.InvalidGoal(goal=audit_template.goal)
if audit_template.scope:
keys = [list(s)[0] for s in audit_template.scope]
if keys[0] not in ('compute', 'storage'):
audit_template.scope = [dict(compute=audit_template.scope)]
common_utils.Draft4Validator(
AuditTemplatePostType._build_schema()
).validate(audit_template.scope)
include_host_aggregates = False
exclude_host_aggregates = False
for rule in audit_template.scope[0]['compute']:
if 'host_aggregates' in rule:
include_host_aggregates = True
elif 'exclude' in rule:
for resource in rule['exclude']:
if 'host_aggregates' in resource:
exclude_host_aggregates = True
if include_host_aggregates and exclude_host_aggregates:
raise exception.Invalid(
message=_(
"host_aggregates can't be "
"included and excluded together"))
if audit_template.strategy:
try:
if (common_utils.is_uuid_like(audit_template.strategy) or
common_utils.is_int_like(audit_template.strategy)):
strategy = objects.Strategy.get(
AuditTemplatePostType._ctx, audit_template.strategy)
else:
strategy = objects.Strategy.get_by_name(
AuditTemplatePostType._ctx, audit_template.strategy)
except Exception:
raise exception.InvalidStrategy(
strategy=audit_template.strategy)
# Check that the strategy we indicate is actually related to the
# specified goal
if strategy.goal_id != goal.id:
available_strategies = objects.Strategy.list(
AuditTemplatePostType._ctx)
choices = ["'%s' (%s)" % (s.uuid, s.name)
for s in available_strategies]
raise exception.InvalidStrategy(
message=_(
"'%(strategy)s' strategy does relate to the "
"'%(goal)s' goal. Possible choices: %(choices)s")
% dict(strategy=strategy.name, goal=goal.name,
choices=", ".join(choices)))
audit_template.strategy = strategy.uuid
# We force the UUID so that we do not need to query the DB with the
# name afterwards
audit_template.goal = goal.uuid
return audit_template
class AuditTemplatePatchType(types.JsonPatchType):
_ctx = context_utils.make_context()
@staticmethod
def mandatory_attrs():
return []
@staticmethod
def validate(patch):
if patch.path == "/goal" and patch.op != "remove":
AuditTemplatePatchType._validate_goal(patch)
elif patch.path == "/goal" and patch.op == "remove":
raise exception.OperationNotPermitted(
_("Cannot remove 'goal' attribute "
"from an audit template"))
if patch.path == "/strategy":
AuditTemplatePatchType._validate_strategy(patch)
return types.JsonPatchType.validate(patch)
@staticmethod
def _validate_goal(patch):
patch.path = "/goal_id"
goal = patch.value
if goal:
available_goals = objects.Goal.list(
AuditTemplatePatchType._ctx)
available_goal_uuids_map = {g.uuid: g for g in available_goals}
available_goal_names_map = {g.name: g for g in available_goals}
if goal in available_goal_uuids_map:
patch.value = available_goal_uuids_map[goal].id
elif goal in available_goal_names_map:
patch.value = available_goal_names_map[goal].id
else:
raise exception.InvalidGoal(goal=goal)
@staticmethod
def _validate_strategy(patch):
patch.path = "/strategy_id"
strategy = patch.value
if strategy:
available_strategies = objects.Strategy.list(
AuditTemplatePatchType._ctx)
available_strategy_uuids_map = {
s.uuid: s for s in available_strategies}
available_strategy_names_map = {
s.name: s for s in available_strategies}
if strategy in available_strategy_uuids_map:
patch.value = available_strategy_uuids_map[strategy].id
elif strategy in available_strategy_names_map:
patch.value = available_strategy_names_map[strategy].id
else:
raise exception.InvalidStrategy(strategy=strategy)
class AuditTemplate(base.APIBase):
"""API representation of a audit template.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of an
audit template.
"""
_goal_uuid = None
_goal_name = None
_strategy_uuid = None
_strategy_name = None
def _get_goal(self, value):
if value == wtypes.Unset:
return None
goal = None
try:
if (common_utils.is_uuid_like(value) or
common_utils.is_int_like(value)):
goal = objects.Goal.get(
pecan.request.context, value)
else:
goal = objects.Goal.get_by_name(
pecan.request.context, value)
except exception.GoalNotFound:
pass
if goal:
self.goal_id = goal.id
return goal
def _get_strategy(self, value):
if value == wtypes.Unset:
return None
strategy = None
try:
if (common_utils.is_uuid_like(value) or
common_utils.is_int_like(value)):
strategy = objects.Strategy.get(
pecan.request.context, value)
else:
strategy = objects.Strategy.get_by_name(
pecan.request.context, value)
except exception.StrategyNotFound:
pass
if strategy:
self.strategy_id = strategy.id
return strategy
def _get_goal_uuid(self):
return self._goal_uuid
def _set_goal_uuid(self, value):
if value and self._goal_uuid != value:
self._goal_uuid = None
goal = self._get_goal(value)
if goal:
self._goal_uuid = goal.uuid
def _get_strategy_uuid(self):
return self._strategy_uuid
def _set_strategy_uuid(self, value):
if value and self._strategy_uuid != value:
self._strategy_uuid = None
strategy = self._get_strategy(value)
if strategy:
self._strategy_uuid = strategy.uuid
def _get_goal_name(self):
return self._goal_name
def _set_goal_name(self, value):
if value and self._goal_name != value:
self._goal_name = None
goal = self._get_goal(value)
if goal:
self._goal_name = goal.name
def _get_strategy_name(self):
return self._strategy_name
def _set_strategy_name(self, value):
if value and self._strategy_name != value:
self._strategy_name = None
strategy = self._get_strategy(value)
if strategy:
self._strategy_name = strategy.name
uuid = wtypes.wsattr(types.uuid, readonly=True)
"""Unique UUID for this audit template"""
name = wtypes.text
"""Name of this audit template"""
description = wtypes.wsattr(wtypes.text, mandatory=False)
"""Short description of this audit template"""
goal_uuid = wtypes.wsproperty(
wtypes.text, _get_goal_uuid, _set_goal_uuid, mandatory=True)
"""Goal UUID the audit template refers to"""
goal_name = wtypes.wsproperty(
wtypes.text, _get_goal_name, _set_goal_name, mandatory=False)
"""The name of the goal this audit template refers to"""
strategy_uuid = wtypes.wsproperty(
wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False)
"""Strategy UUID the audit template refers to"""
strategy_name = wtypes.wsproperty(
wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False)
"""The name of the strategy this audit template refers to"""
audits = wtypes.wsattr([link.Link], readonly=True)
"""Links to the collection of audits contained in this audit template"""
links = wtypes.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated audit template links"""
scope = wtypes.wsattr(types.jsontype, mandatory=False)
"""Audit Scope"""
def __init__(self, **kwargs):
super(AuditTemplate, self).__init__()
self.fields = []
fields = list(objects.AuditTemplate.fields)
for k in fields:
# Skip fields we do not expose.
if not hasattr(self, k):
continue
self.fields.append(k)
setattr(self, k, kwargs.get(k, wtypes.Unset))
self.fields.append('goal_id')
self.fields.append('strategy_id')
setattr(self, 'strategy_id', kwargs.get('strategy_id', wtypes.Unset))
# goal_uuid & strategy_uuid are not part of
# objects.AuditTemplate.fields because they're API-only attributes.
self.fields.append('goal_uuid')
self.fields.append('goal_name')
self.fields.append('strategy_uuid')
self.fields.append('strategy_name')
setattr(self, 'goal_uuid', kwargs.get('goal_id', wtypes.Unset))
setattr(self, 'goal_name', kwargs.get('goal_id', wtypes.Unset))
setattr(self, 'strategy_uuid',
kwargs.get('strategy_id', wtypes.Unset))
setattr(self, 'strategy_name',
kwargs.get('strategy_id', wtypes.Unset))
@staticmethod
def _convert_with_links(audit_template, url, expand=True):
if not expand:
audit_template.unset_fields_except(
['uuid', 'name', 'goal_uuid', 'goal_name',
'scope', 'strategy_uuid', 'strategy_name'])
# The numeric ID should not be exposed to
# the user, it's internal only.
audit_template.goal_id = wtypes.Unset
audit_template.strategy_id = wtypes.Unset
audit_template.links = [link.Link.make_link('self', url,
'audit_templates',
audit_template.uuid),
link.Link.make_link('bookmark', url,
'audit_templates',
audit_template.uuid,
bookmark=True)]
return audit_template
@classmethod
def convert_with_links(cls, rpc_audit_template, expand=True):
audit_template = AuditTemplate(**rpc_audit_template.as_dict())
hide_fields_in_newer_versions(audit_template)
return cls._convert_with_links(audit_template, pecan.request.host_url,
expand)
@classmethod
def sample(cls, expand=True):
sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c',
name='My Audit Template',
description='Description of my audit template',
goal_uuid='83e44733-b640-40e2-8d8a-7dd3be7134e6',
strategy_uuid='367d826e-b6a4-4b70-bc44-c3f6fe1c9986',
created_at=datetime.datetime.utcnow(),
deleted_at=None,
updated_at=datetime.datetime.utcnow(),
scope=[],)
return cls._convert_with_links(sample, 'http://localhost:9322', expand)
class AuditTemplateCollection(collection.Collection):
"""API representation of a collection of audit templates."""
audit_templates = [AuditTemplate]
"""A list containing audit templates objects"""
def __init__(self, **kwargs):
super(AuditTemplateCollection, self).__init__()
self._type = 'audit_templates'
@staticmethod
def convert_with_links(rpc_audit_templates, limit, url=None, expand=False,
**kwargs):
at_collection = AuditTemplateCollection()
at_collection.audit_templates = [
AuditTemplate.convert_with_links(p, expand)
for p in rpc_audit_templates]
at_collection.next = at_collection.get_next(limit, url=url, **kwargs)
return at_collection
@classmethod
def sample(cls):
sample = cls()
sample.audit_templates = [AuditTemplate.sample(expand=False)]
return sample
class AuditTemplatesController(rest.RestController):
"""REST controller for AuditTemplates."""
def __init__(self):
super(AuditTemplatesController, self).__init__()
from_audit_templates = False
"""A flag to indicate if the requests to this controller are coming
from the top-level resource AuditTemplates."""
_custom_actions = {
'detail': ['GET'],
}
def _get_audit_templates_collection(self, filters, marker, limit,
sort_key, sort_dir, expand=False,
resource_url=None):
additional_fields = ["goal_uuid", "goal_name", "strategy_uuid",
"strategy_name"]
api_utils.validate_sort_key(
sort_key, list(objects.AuditTemplate.fields) + additional_fields)
api_utils.validate_search_filters(
filters, list(objects.AuditTemplate.fields) + additional_fields)
limit = api_utils.validate_limit(limit)
api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.AuditTemplate.get_by_uuid(
pecan.request.context,
marker)
need_api_sort = api_utils.check_need_api_sort(sort_key,
additional_fields)
sort_db_key = (sort_key if not need_api_sort
else None)
audit_templates = objects.AuditTemplate.list(
pecan.request.context, filters, limit, marker_obj,
sort_key=sort_db_key, sort_dir=sort_dir)
audit_templates_collection = \
AuditTemplateCollection.convert_with_links(
audit_templates, limit, url=resource_url, expand=expand,
sort_key=sort_key, sort_dir=sort_dir)
if need_api_sort:
api_utils.make_api_sort(
audit_templates_collection.audit_templates, sort_key,
sort_dir)
return audit_templates_collection
@wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, wtypes.text,
types.uuid, int, wtypes.text, wtypes.text)
def get_all(self, goal=None, strategy=None, marker=None,
limit=None, sort_key='id', sort_dir='asc'):
"""Retrieve a list of audit templates.
:param goal: goal UUID or name to filter by
:param strategy: strategy UUID or name to filter by
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
context = pecan.request.context
policy.enforce(context, 'audit_template:get_all',
action='audit_template:get_all')
filters = {}
if goal:
if common_utils.is_uuid_like(goal):
filters['goal_uuid'] = goal
else:
filters['goal_name'] = goal
if strategy:
if common_utils.is_uuid_like(strategy):
filters['strategy_uuid'] = strategy
else:
filters['strategy_name'] = strategy
return self._get_audit_templates_collection(
filters, marker, limit, sort_key, sort_dir)
@wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, wtypes.text,
types.uuid, int, wtypes.text, wtypes.text)
def detail(self, goal=None, strategy=None, marker=None,
limit=None, sort_key='id', sort_dir='asc'):
"""Retrieve a list of audit templates with detail.
:param goal: goal UUID or name to filter by
:param strategy: strategy UUID or name to filter by
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
context = pecan.request.context
policy.enforce(context, 'audit_template:detail',
action='audit_template:detail')
# NOTE(lucasagomes): /detail should only work agaist collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "audit_templates":
raise exception.HTTPNotFound
filters = {}
if goal:
if common_utils.is_uuid_like(goal):
filters['goal_uuid'] = goal
else:
filters['goal_name'] = goal
if strategy:
if common_utils.is_uuid_like(strategy):
filters['strategy_uuid'] = strategy
else:
filters['strategy_name'] = strategy
expand = True
resource_url = '/'.join(['audit_templates', 'detail'])
return self._get_audit_templates_collection(filters, marker, limit,
sort_key, sort_dir, expand,
resource_url)
@wsme_pecan.wsexpose(AuditTemplate, wtypes.text)
def get_one(self, audit_template):
"""Retrieve information about the given audit template.
:param audit_template: UUID or name of an audit template.
"""
if self.from_audit_templates:
raise exception.OperationNotPermitted
context = pecan.request.context
rpc_audit_template = api_utils.get_resource('AuditTemplate',
audit_template)
policy.enforce(context, 'audit_template:get', rpc_audit_template,
action='audit_template:get')
return AuditTemplate.convert_with_links(rpc_audit_template)
@wsme.validate(types.uuid, AuditTemplatePostType)
@wsme_pecan.wsexpose(AuditTemplate, body=AuditTemplatePostType,
status_code=201)
def post(self, audit_template_postdata):
"""Create a new audit template.
:param audit_template_postdata: the audit template POST data
from the request body.
"""
if self.from_audit_templates:
raise exception.OperationNotPermitted
context = pecan.request.context
policy.enforce(context, 'audit_template:create',
action='audit_template:create')
context = pecan.request.context
audit_template = audit_template_postdata.as_audit_template()
audit_template_dict = audit_template.as_dict()
new_audit_template = objects.AuditTemplate(context,
**audit_template_dict)
new_audit_template.create()
# Set the HTTP Location Header
pecan.response.location = link.build_url(
'audit_templates', new_audit_template.uuid)
return AuditTemplate.convert_with_links(new_audit_template)
@wsme.validate(types.uuid, [AuditTemplatePatchType])
@wsme_pecan.wsexpose(AuditTemplate, wtypes.text,
body=[AuditTemplatePatchType])
def patch(self, audit_template, patch):
"""Update an existing audit template.
:param template_uuid: UUID of a audit template.
:param patch: a json PATCH document to apply to this audit template.
"""
if self.from_audit_templates:
raise exception.OperationNotPermitted
context = pecan.request.context
audit_template_to_update = api_utils.get_resource('AuditTemplate',
audit_template)
policy.enforce(context, 'audit_template:update',
audit_template_to_update,
action='audit_template:update')
if common_utils.is_uuid_like(audit_template):
audit_template_to_update = objects.AuditTemplate.get_by_uuid(
pecan.request.context,
audit_template)
else:
audit_template_to_update = objects.AuditTemplate.get_by_name(
pecan.request.context,
audit_template)
try:
audit_template_dict = audit_template_to_update.as_dict()
audit_template = AuditTemplate(**api_utils.apply_jsonpatch(
audit_template_dict, patch))
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Update only the fields that have changed
for field in objects.AuditTemplate.fields:
try:
patch_val = getattr(audit_template, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if audit_template_to_update[field] != patch_val:
audit_template_to_update[field] = patch_val
audit_template_to_update.save()
return AuditTemplate.convert_with_links(audit_template_to_update)
@wsme_pecan.wsexpose(None, wtypes.text, status_code=204)
def delete(self, audit_template):
"""Delete a audit template.
:param template_uuid: UUID or name of an audit template.
"""
context = pecan.request.context
audit_template_to_delete = api_utils.get_resource('AuditTemplate',
audit_template)
policy.enforce(context, 'audit_template:delete',
audit_template_to_delete,
action='audit_template:delete')
audit_template_to_delete.soft_delete()
|
When is it Time to Get Dentures?
Getting dentures is a very personal matter. Most likely you’ll know when it’s time and your dentist will be able to tell you as well.
Most importantly, if your dentures are causing your mouth pain and several teeth need to be pulled, it may be time for dentures, or maybe you only need a partial denture.
Another reason you might need dentures is if you were born with soft teeth. Soft teeth or teeth with little or no enamel (yes, some people are born like this), causes the tooth to become decayed and weakened. In this case, it’s very important to save your gums and your bones in your mouth from becoming infected, which will be a very painful experience.
If you have rotting teeth and can be very detrimental to your health. It is a known fact that bad teeth cause heart problems and other health issues.
Bad breath? Most likely from rotting teeth. It can be embarrassing for anyone to have to experience bad breath.
Aesthetics? Yes, dentures can give you a beautiful smile. Your teeth are one of the first things people notice when they look at you. Once you have dentures, you will most likely feel secure when smiling and much more confident.
What if your dentures are uncomfortable and you don’t want to wear them?
You should first visit your dentist to see if he can adjust the denture for a better fit. However, you may need a permanent or soft denture reline from your dentist. Your gums will shrink once your teeth are extracted and your dentures will ultimately become loose and cause your gums to be sore and end up with unbearable sore spots. While your gums heal you can use a soft reliner like ProSoft Denture Reliners, which also is a tissue conditioner like dentists use. The main ingredient in the liquid is ethyl alcohol and it will soothe sore gums and promote faster healing.
Most importantly, be sure to keep your regular dental visits. Your dentist knows best and will be able to see beyond the outer teeth with x-rays. He may also propose that you get implants. Yes, they can be expensive, but make wearing dentures much easier. In the meantime, have your dentist place a soft denture liner in your denture plate or use an over-the-counter denture reline kit or DIY denture reliner until you can see your dentist.
|
from __future__ import division
from nbgrader import utils
from sqlalchemy import (create_engine, ForeignKey, Column, String, Text,
DateTime, Interval, Float, Enum, UniqueConstraint, Boolean)
from sqlalchemy.orm import sessionmaker, scoped_session, relationship, column_property
from sqlalchemy.orm.exc import NoResultFound, FlushError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.exc import IntegrityError
from sqlalchemy.sql import and_
from sqlalchemy import select, func, exists, case, literal_column
from uuid import uuid4
Base = declarative_base()
class InvalidEntry(ValueError):
pass
class MissingEntry(ValueError):
pass
def new_uuid():
return uuid4().hex
class Assignment(Base):
"""Database representation of the master/source version of an assignment."""
__tablename__ = "assignment"
#: Unique id of the assignment (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Unique human-readable name for the assignment, such as "Problem Set 1"
name = Column(String(128), unique=True, nullable=False)
#: (Optional) Duedate for the assignment in datetime format, with UTC timezone
duedate = Column(DateTime())
#: A collection of notebooks contained in this assignment, represented
#: by :class:`~nbgrader.api.Notebook` objects
notebooks = relationship("Notebook", backref="assignment", order_by="Notebook.name")
#: A collection of submissions of this assignment, represented by
#: :class:`~nbgrader.api.SubmittedAssignment` objects.
submissions = relationship("SubmittedAssignment", backref="assignment")
#: The number of submissions of this assignment
num_submissions = None
#: Maximum score achievable on this assignment, automatically calculated
#: from the :attr:`~nbgrader.api.Notebook.max_score` of each notebook
max_score = None
#: Maximum coding score achievable on this assignment, automatically
#: calculated from the :attr:`~nbgrader.api.Notebook.max_code_score` of
#: each notebook
max_code_score = None
#: Maximum written score achievable on this assignment, automatically
#: calculated from the :attr:`~nbgrader.api.Notebook.max_written_score` of
#: each notebook
max_written_score = None
def to_dict(self):
"""Convert the assignment object to a JSON-friendly dictionary
representation.
"""
return {
"id": self.id,
"name": self.name,
"duedate": self.duedate.isoformat() if self.duedate is not None else None,
"num_submissions": self.num_submissions,
"max_score": self.max_score,
"max_code_score": self.max_code_score,
"max_written_score": self.max_written_score,
}
def __repr__(self):
return "Assignment<{}>".format(self.name)
class Notebook(Base):
"""Database representation of the master/source version of a notebook."""
__tablename__ = "notebook"
__table_args__ = (UniqueConstraint('name', 'assignment_id'),)
#: Unique id of the notebook (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Unique human-readable name for the notebook, such as "Problem 1". Note
#: the uniqueness is only constrained within assignments (e.g. it is ok for
#: two different assignments to both have notebooks called "Problem 1", but
#: the same assignment cannot have two notebooks with the same name).
name = Column(String(128), nullable=False)
#: The :class:`~nbgrader.api.Assignment` object that this notebook is a
#: part of
assignment = None
#: Unique id of :attr:`~nbgrader.api.Notebook.assignment`
assignment_id = Column(String(32), ForeignKey('assignment.id'))
#: A collection of grade cells contained within this notebook, represented
#: by :class:`~nbgrader.api.GradeCell` objects
grade_cells = relationship("GradeCell", backref="notebook")
#: A collection of solution cells contained within this notebook, represented
#: by :class:`~nbgrader.api.SolutionCell` objects
solution_cells = relationship("SolutionCell", backref="notebook")
#: A collection of source cells contained within this notebook, represented
#: by :class:`~nbgrader.api.SourceCell` objects
source_cells = relationship("SourceCell", backref="notebook")
#: A collection of submitted versions of this notebook, represented by
#: :class:`~nbgrader.api.SubmittedNotebook` objects
submissions = relationship("SubmittedNotebook", backref="notebook")
#: The number of submissions of this notebook
num_submissions = None
#: Maximum score achievable on this notebook, automatically calculated
#: from the :attr:`~nbgrader.api.GradeCell.max_score` of each grade cell
max_score = None
#: Maximum coding score achievable on this notebook, automatically
#: calculated from the :attr:`~nbgrader.api.GradeCell.max_score` and
#: :attr:`~nbgrader.api.GradeCell.cell_type` of each grade cell
max_code_score = None
#: Maximum written score achievable on this notebook, automatically
#: calculated from the :attr:`~nbgrader.api.GradeCell.max_score` and
#: :attr:`~nbgrader.api.GradeCell.cell_type` of each grade cell
max_written_score = None
#: Whether there are any submitted versions of this notebook that need to
#: be manually graded, automatically determined from the
#: :attr:`~nbgrader.api.SubmittedNotebook.needs_manual_grade` attribute of
#: each submitted notebook
needs_manual_grade = None
def to_dict(self):
"""Convert the notebook object to a JSON-friendly dictionary
representation.
"""
return {
"id": self.id,
"name": self.name,
"num_submissions": self.num_submissions,
"max_score": self.max_score,
"max_code_score": self.max_code_score,
"max_written_score": self.max_written_score,
"needs_manual_grade": self.needs_manual_grade
}
def __repr__(self):
return "Notebook<{}/{}>".format(self.assignment.name, self.name)
class GradeCell(Base):
"""Database representation of the master/source version of a grade cell."""
__tablename__ = "grade_cell"
__table_args__ = (UniqueConstraint('name', 'notebook_id'),)
#: Unique id of the grade cell (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Unique human-readable name of the grade cell. This need only be unique
#: within the notebook, not across notebooks.
name = Column(String(128), nullable=False)
#: Maximum score that can be assigned to this grade cell
max_score = Column(Float(), nullable=False)
#: The cell type, either "code" or "markdown"
cell_type = Column(Enum("code", "markdown"), nullable=False)
#: The :class:`~nbgrader.api.Notebook` that this grade cell is contained in
notebook = None
#: Unique id of the :attr:`~nbgrader.api.GradeCell.notebook`
notebook_id = Column(String(32), ForeignKey('notebook.id'))
#: The assignment that this cell is contained within, represented by a
#: :class:`~nbgrader.api.Assignment` object
assignment = association_proxy("notebook", "assignment")
#: A collection of grades assigned to submitted versions of this grade cell,
#: represented by :class:`~nbgrader.api.Grade` objects
grades = relationship("Grade", backref="cell")
def to_dict(self):
"""Convert the grade cell object to a JSON-friendly dictionary
representation. Note that this includes keys for ``notebook`` and
``assignment`` which correspond to the names of the notebook and
assignment, not the objects themselves.
"""
return {
"id": self.id,
"name": self.name,
"max_score": self.max_score,
"cell_type": self.cell_type,
"notebook": self.notebook.name,
"assignment": self.assignment.name
}
def __repr__(self):
return "GradeCell<{}/{}/{}>".format(
self.assignment.name, self.notebook.name, self.name)
class SolutionCell(Base):
__tablename__ = "solution_cell"
__table_args__ = (UniqueConstraint('name', 'notebook_id'),)
#: Unique id of the solution cell (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Unique human-readable name of the solution cell. This need only be unique
#: within the notebook, not across notebooks.
name = Column(String(128), nullable=False)
#: The :class:`~nbgrader.api.Notebook` that this solution cell is contained in
notebook = None
#: Unique id of the :attr:`~nbgrader.api.SolutionCell.notebook`
notebook_id = Column(String(32), ForeignKey('notebook.id'))
#: The assignment that this cell is contained within, represented by a
#: :class:`~nbgrader.api.Assignment` object
assignment = association_proxy("notebook", "assignment")
#: A collection of comments assigned to submitted versions of this grade cell,
#: represented by :class:`~nbgrader.api.Comment` objects
comments = relationship("Comment", backref="cell")
def to_dict(self):
"""Convert the solution cell object to a JSON-friendly dictionary
representation. Note that this includes keys for ``notebook`` and
``assignment`` which correspond to the names of the notebook and
assignment, not the objects themselves.
"""
return {
"id": self.id,
"name": self.name,
"notebook": self.notebook.name,
"assignment": self.assignment.name
}
def __repr__(self):
return "{}/{}".format(self.notebook, self.name)
class SourceCell(Base):
__tablename__ = "source_cell"
__table_args__ = (UniqueConstraint('name', 'notebook_id'),)
#: Unique id of the source cell (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Unique human-readable name of the source cell. This need only be unique
#: within the notebook, not across notebooks.
name = Column(String(128), nullable=False)
#: The cell type, either "code" or "markdown"
cell_type = Column(Enum("code", "markdown"), nullable=False)
#: Whether the cell is locked (e.g. the source saved in the database should
#: be used to overwrite the source of students' cells)
locked = Column(Boolean, default=False, nullable=False)
#: The source code or text of the cell
source = Column(Text())
#: A checksum of the cell contents. This should usually be computed
#: using :func:`nbgrader.utils.compute_checksum`
checksum = Column(String(128))
#: The :class:`~nbgrader.api.Notebook` that this source cell is contained in
notebook = None
#: Unique id of the :attr:`~nbgrader.api.SourceCell.notebook`
notebook_id = Column(String(32), ForeignKey('notebook.id'))
#: The assignment that this cell is contained within, represented by a
#: :class:`~nbgrader.api.Assignment` object
assignment = association_proxy("notebook", "assignment")
def to_dict(self):
"""Convert the source cell object to a JSON-friendly dictionary
representation. Note that this includes keys for ``notebook`` and
``assignment`` which correspond to the names of the notebook and
assignment, not the objects themselves.
"""
return {
"id": self.id,
"name": self.name,
"cell_type": self.cell_type,
"locked": self.locked,
"source": self.source,
"checksum": self.checksum,
"notebook": self.notebook.name,
"assignment": self.assignment.name
}
def __repr__(self):
return "SolutionCell<{}/{}/{}>".format(
self.assignment.name, self.notebook.name, self.name)
class Student(Base):
"""Database representation of a student."""
__tablename__ = "student"
#: Unique id of the student. This could be a student ID, a username, an
#: email address, etc., so long as it is unique.
id = Column(String(128), primary_key=True, nullable=False)
#: (Optional) The first name of the student
first_name = Column(String(128))
#: (Optional) The last name of the student
last_name = Column(String(128))
#: (Optional) The student's email address, if the :attr:`~nbgrader.api.Student.id`
#: does not correspond to an email address
email = Column(String(128))
#: A collection of assignments submitted by the student, represented as
#: :class:`~nbgrader.api.SubmittedAssignment` objects
submissions = relationship("SubmittedAssignment", backref="student")
#: The overall score of the student across all assignments, computed
#: automatically from the :attr:`~nbgrader.api.SubmittedAssignment.score`
#: of each submitted assignment.
score = None
#: The maximum possible score the student could achieve across all assignments,
#: computed automatically from the :attr:`~nbgrader.api.Assignment.max_score`
#: of each assignment.
max_score = None
def to_dict(self):
"""Convert the student object to a JSON-friendly dictionary
representation.
"""
return {
"id": self.id,
"first_name": self.first_name,
"last_name": self.last_name,
"email": self.email,
"score": self.score,
"max_score": self.max_score
}
def __repr__(self):
return "Student<{}>".format(self.id)
class SubmittedAssignment(Base):
"""Database representation of an assignment submitted by a student."""
__tablename__ = "submitted_assignment"
__table_args__ = (UniqueConstraint('assignment_id', 'student_id'),)
#: Unique id of the submitted assignment (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Name of the assignment, inherited from :class:`~nbgrader.api.Assignment`
name = association_proxy("assignment", "name")
#: The master version of this assignment, represented by a
#: :class:`~nbgrader.api.Assignment` object
assignment = None
#: Unique id of :attr:`~nbgrader.api.SubmittedAssignment.assignment`
assignment_id = Column(String(32), ForeignKey('assignment.id'))
#: The student who submitted this assignment, represented by a
#: :class:`~nbgrader.api.Student` object
student = None
#: Unique id of :attr:`~nbgrader.api.SubmittedAssignment.student`
student_id = Column(String(128), ForeignKey('student.id'))
#: (Optional) The date and time that the assignment was submitted, in date
#: time format with a UTC timezone
timestamp = Column(DateTime())
#: (Optional) An extension given to the student for this assignment, in
#: time delta format
extension = Column(Interval())
#: A collection of notebooks contained within this submitted assignment,
#: represented by :class:`~nbgrader.api.SubmittedNotebook` objects
notebooks = relationship("SubmittedNotebook", backref="assignment")
#: The score assigned to this assignment, automatically calculated from the
#: :attr:`~nbgrader.api.SubmittedNotebook.score` of each notebook within
#: this submitted assignment.
score = None
#: The maximum possible score of this assignment, inherited from
#: :class:`~nbgrader.api.Assignment`
max_score = None
#: The code score assigned to this assignment, automatically calculated from
#: the :attr:`~nbgrader.api.SubmittedNotebook.code_score` of each notebook
#: within this submitted assignment.
code_score = None
#: The maximum possible code score of this assignment, inherited from
#: :class:`~nbgrader.api.Assignment`
max_code_score = None
#: The written score assigned to this assignment, automatically calculated
#: from the :attr:`~nbgrader.api.SubmittedNotebook.written_score` of each
#: notebook within this submitted assignment.
written_score = None
#: The maximum possible written score of this assignment, inherited from
#: :class:`~nbgrader.api.Assignment`
max_written_score = None
#: Whether this assignment has parts that need to be manually graded,
#: automatically determined from the :attr:`~nbgrader.api.SubmittedNotebook.needs_manual_grade`
#: attribute of each notebook.
needs_manual_grade = None
@property
def duedate(self):
"""The duedate of this student's assignment, which includes any extension
given, if applicable, and which is just the regular assignment duedate
otherwise.
"""
orig_duedate = self.assignment.duedate
if self.extension is not None:
return orig_duedate + self.extension
else:
return orig_duedate
@property
def total_seconds_late(self):
"""The number of seconds that this assignment was turned in past the
duedate (including extensions, if any). If the assignment was turned in
before the deadline, this value will just be zero.
"""
if self.timestamp is None or self.duedate is None:
return 0
else:
return max(0, (self.timestamp - self.duedate).total_seconds())
def to_dict(self):
"""Convert the submitted assignment object to a JSON-friendly dictionary
representation. Note that this includes a ``student`` key which is the
unique id of the student, not the object itself.
"""
return {
"id": self.id,
"name": self.name,
"student": self.student.id,
"timestamp": self.timestamp.isoformat() if self.timestamp is not None else None,
"extension": self.extension.total_seconds() if self.extension is not None else None,
"duedate": self.duedate.isoformat() if self.duedate is not None else None,
"total_seconds_late": self.total_seconds_late,
"score": self.score,
"max_score": self.max_score,
"code_score": self.code_score,
"max_code_score": self.max_code_score,
"written_score": self.written_score,
"max_written_score": self.max_written_score,
"needs_manual_grade": self.needs_manual_grade
}
def __repr__(self):
return "SubmittedAssignment<{} for {}>".format(self.name, self.student.id)
class SubmittedNotebook(Base):
"""Database representation of a notebook submitted by a student."""
__tablename__ = "submitted_notebook"
__table_args__ = (UniqueConstraint('notebook_id', 'assignment_id'),)
#: Unique id of the submitted notebook (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Name of the notebook, inherited from :class:`~nbgrader.api.Notebook`
name = association_proxy("notebook", "name")
#: The submitted assignment this notebook is a part of, represented by a
#: :class:`~nbgrader.api.SubmittedAssignment` object
assignment = None
#: Unique id of :attr:`~nbgrader.api.SubmittedNotebook.assignment`
assignment_id = Column(String(32), ForeignKey('submitted_assignment.id'))
#: The master version of this notebook, represesnted by a
#: :class:`~nbgrader.api.Notebook` object
notebook = None
#: Unique id of :attr:`~nbgrader.api.SubmittedNotebook.notebook`
notebook_id = Column(String(32), ForeignKey('notebook.id'))
#: Collection of grades associated with this submitted notebook, represented
#: by :class:`~nbgrader.api.Grade` objects
grades = relationship("Grade", backref="notebook")
#: Collection of comments associated with this submitted notebook, represented
#: by :class:`~nbgrader.api.Comment` objects
comments = relationship("Comment", backref="notebook")
#: The student who submitted this notebook, represented by a
#: :class:`~nbgrader.api.Student` object
student = association_proxy('assignment', 'student')
#: Whether this assignment has been flagged by a human grader
flagged = Column(Boolean, default=False)
#: The score assigned to this notebook, automatically calculated from the
#: :attr:`~nbgrader.api.Grade.score` of each grade cell within
#: this submitted notebook.
score = None
#: The maximum possible score of this notebook, inherited from
#: :class:`~nbgrader.api.Notebook`
max_score = None
#: The code score assigned to this notebook, automatically calculated from
#: the :attr:`~nbgrader.api.Grade.score` and :attr:`~nbgrader.api.GradeCell.cell_type`
#: of each grade within this submitted notebook.
code_score = None
#: The maximum possible code score of this notebook, inherited from
#: :class:`~nbgrader.api.Notebook`
max_code_score = None
#: The written score assigned to this notebook, automatically calculated from
#: the :attr:`~nbgrader.api.Grade.score` and :attr:`~nbgrader.api.GradeCell.cell_type`
#: of each grade within this submitted notebook.
written_score = None
#: The maximum possible written score of this notebook, inherited from
#: :class:`~nbgrader.api.Notebook`
max_written_score = None
#: Whether this notebook has parts that need to be manually graded,
#: automatically determined from the :attr:`~nbgrader.api.Grade.needs_manual_grade`
#: attribute of each grade.
needs_manual_grade = None
#: Whether this notebook contains autograder tests that failed to pass,
#: automatically determined from the :attr:`~nbgrader.api.Grade.failed_tests`
#: attribute of each grade.
failed_tests = None
def to_dict(self):
"""Convert the submitted notebook object to a JSON-friendly dictionary
representation. Note that this includes a key for ``student`` which is
the unique id of the student, not the actual student object.
"""
return {
"id": self.id,
"name": self.name,
"student": self.student.id,
"score": self.score,
"max_score": self.max_score,
"code_score": self.code_score,
"max_code_score": self.max_code_score,
"written_score": self.written_score,
"max_written_score": self.max_written_score,
"needs_manual_grade": self.needs_manual_grade,
"failed_tests": self.failed_tests,
"flagged": self.flagged
}
def __repr__(self):
return "SubmittedNotebook<{}/{} for {}>".format(
self.assignment.name, self.name, self.student.id)
class Grade(Base):
"""Database representation of a grade assigned to the submitted version of
a grade cell.
"""
__tablename__ = "grade"
__table_args__ = (UniqueConstraint('cell_id', 'notebook_id'),)
#: Unique id of the grade (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Unique name of the grade cell, inherited from :class:`~nbgrader.api.GradeCell`
name = association_proxy('cell', 'name')
#: The submitted assignment that this grade is contained in, represented by
#: a :class:`~nbgrader.api.SubmittedAssignment` object
assignment = association_proxy('notebook', 'assignment')
#: The submitted notebook that this grade is assigned to, represented by a
#: :class:`~nbgrader.api.SubmittedNotebook` object
notebook = None
#: Unique id of :attr:`~nbgrader.api.Grade.notebook`
notebook_id = Column(String(32), ForeignKey('submitted_notebook.id'))
#: The master version of the cell this grade is assigned to, represented by
#: a :class:`~nbgrader.api.GradeCell` object.
cell = None
#: Unique id of :attr:`~nbgrader.api.Grade.cell`
cell_id = Column(String(32), ForeignKey('grade_cell.id'))
#: The type of cell this grade corresponds to, inherited from
#: :class:`~nbgrader.api.GradeCell`
cell_type = None
#: The student who this grade is assigned to, represented by a
#: :class:`~nbgrader.api.Student` object
student = association_proxy('notebook', 'student')
#: Score assigned by the autograder
auto_score = Column(Float())
#: Score assigned by a human grader
manual_score = Column(Float())
#: Whether a score needs to be assigned manually. This is True by default.
needs_manual_grade = Column(Boolean, default=True, nullable=False)
#: The overall score, computed automatically from the
#: :attr:`~nbgrader.api.Grade.auto_score` and :attr:`~nbgrader.api.Grade.manual_score`
#: values. If neither are set, the score is zero. If both are set, then the
#: manual score takes precedence. If only one is set, then that value is used
#: for the score.
score = column_property(case(
[
(manual_score != None, manual_score),
(auto_score != None, auto_score)
],
else_=literal_column("0.0")
))
#: The maximum possible score that can be assigned, inherited from
#: :class:`~nbgrader.api.GradeCell`
max_score = None
#: Whether the autograded score is a result of failed autograder tests. This
#: is True if the autograder score is zero and the cell type is "code", and
#: otherwise False.
failed_tests = None
def to_dict(self):
"""Convert the grade object to a JSON-friendly dictionary representation.
Note that this includes keys for ``notebook`` and ``assignment`` which
correspond to the name of the notebook and assignment, not the actual
objects. It also includes a key for ``student`` which corresponds to the
unique id of the student, not the actual student object.
"""
return {
"id": self.id,
"name": self.name,
"notebook": self.notebook.name,
"assignment": self.assignment.name,
"student": self.student.id,
"auto_score": self.auto_score,
"manual_score": self.manual_score,
"max_score": self.max_score,
"needs_manual_grade": self.needs_manual_grade,
"failed_tests": self.failed_tests,
"cell_type": self.cell_type
}
def __repr__(self):
return "Grade<{}/{}/{} for {}>".format(
self.assignment.name, self.notebook.name, self.name, self.student.id)
class Comment(Base):
"""Database representation of a comment on a cell in a submitted notebook."""
__tablename__ = "comment"
__table_args__ = (UniqueConstraint('cell_id', 'notebook_id'),)
#: Unique id of the comment (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Unique name of the solution cell, inherited from :class:`~nbgrader.api.SolutionCell`
name = association_proxy('cell', 'name')
#: The submitted assignment that this comment is contained in, represented by
#: a :class:`~nbgrader.api.SubmittedAssignment` object
assignment = association_proxy('notebook', 'assignment')
#: The submitted notebook that this comment is assigned to, represented by a
#: :class:`~nbgrader.api.SubmittedNotebook` object
notebook = None
#: Unique id of :attr:`~nbgrader.api.Comment.notebook`
notebook_id = Column(String(32), ForeignKey('submitted_notebook.id'))
#: The master version of the cell this comment is assigned to, represented by
#: a :class:`~nbgrader.api.SolutionCell` object.
cell = None
#: Unique id of :attr:`~nbgrader.api.Comment.cell`
cell_id = Column(String(32), ForeignKey('solution_cell.id'))
#: The student who this comment is assigned to, represented by a
#: :class:`~nbgrader.api.Student` object
student = association_proxy('notebook', 'student')
#: A comment which is automatically assigned by the autograder
auto_comment = Column(Text())
#: A commment which is assigned manually
manual_comment = Column(Text())
#: The overall comment, computed automatically from the
#: :attr:`~nbgrader.api.Comment.auto_comment` and
#: :attr:`~nbgrader.api.Comment.manual_comment` values. If neither are set,
#: the comment is None. If both are set, then the manual comment
#: takes precedence. If only one is set, then that value is used for the
#: comment.
comment = column_property(case(
[
(manual_comment != None, manual_comment),
(auto_comment != None, auto_comment)
],
else_=None
))
def to_dict(self):
"""Convert the comment object to a JSON-friendly dictionary representation.
Note that this includes keys for ``notebook`` and ``assignment`` which
correspond to the name of the notebook and assignment, not the actual
objects. It also includes a key for ``student`` which corresponds to the
unique id of the student, not the actual student object.
"""
return {
"id": self.id,
"name": self.name,
"notebook": self.notebook.name,
"assignment": self.assignment.name,
"student": self.student.id,
"auto_comment": self.auto_comment,
"manual_comment": self.manual_comment
}
def __repr__(self):
return "Comment<{}/{}/{} for {}>".format(
self.assignment.name, self.notebook.name, self.name, self.student.id)
## Needs manual grade
SubmittedNotebook.needs_manual_grade = column_property(
exists().where(and_(
Grade.notebook_id == SubmittedNotebook.id,
Grade.needs_manual_grade))\
.correlate_except(Grade), deferred=True)
SubmittedAssignment.needs_manual_grade = column_property(
exists().where(and_(
SubmittedNotebook.assignment_id == SubmittedAssignment.id,
Grade.notebook_id == SubmittedNotebook.id,
Grade.needs_manual_grade))\
.correlate_except(Grade), deferred=True)
Notebook.needs_manual_grade = column_property(
exists().where(and_(
Notebook.id == SubmittedNotebook.notebook_id,
Grade.notebook_id == SubmittedNotebook.id,
Grade.needs_manual_grade))\
.correlate_except(Grade), deferred=True)
## Overall scores
SubmittedNotebook.score = column_property(
select([func.coalesce(func.sum(Grade.score), 0.0)])\
.where(Grade.notebook_id == SubmittedNotebook.id)\
.correlate_except(Grade), deferred=True)
SubmittedAssignment.score = column_property(
select([func.coalesce(func.sum(Grade.score), 0.0)])\
.where(and_(
SubmittedNotebook.assignment_id == SubmittedAssignment.id,
Grade.notebook_id == SubmittedNotebook.id))\
.correlate_except(Grade), deferred=True)
Student.score = column_property(
select([func.coalesce(func.sum(Grade.score), 0.0)])\
.where(and_(
SubmittedAssignment.student_id == Student.id,
SubmittedNotebook.assignment_id == SubmittedAssignment.id,
Grade.notebook_id == SubmittedNotebook.id))\
.correlate_except(Grade), deferred=True)
## Overall max scores
Grade.max_score = column_property(
select([GradeCell.max_score])\
.where(Grade.cell_id == GradeCell.id)\
.correlate_except(GradeCell), deferred=True)
Notebook.max_score = column_property(
select([func.coalesce(func.sum(GradeCell.max_score), 0.0)])\
.where(GradeCell.notebook_id == Notebook.id)\
.correlate_except(GradeCell), deferred=True)
SubmittedNotebook.max_score = column_property(
select([Notebook.max_score])\
.where(SubmittedNotebook.notebook_id == Notebook.id)\
.correlate_except(Notebook), deferred=True)
Assignment.max_score = column_property(
select([func.coalesce(func.sum(GradeCell.max_score), 0.0)])\
.where(and_(
Notebook.assignment_id == Assignment.id,
GradeCell.notebook_id == Notebook.id))\
.correlate_except(GradeCell), deferred=True)
SubmittedAssignment.max_score = column_property(
select([Assignment.max_score])\
.where(SubmittedAssignment.assignment_id == Assignment.id)\
.correlate_except(Assignment), deferred=True)
Student.max_score = column_property(
select([func.coalesce(func.sum(Assignment.max_score), 0.0)])\
.correlate_except(Assignment), deferred=True)
## Written scores
SubmittedNotebook.written_score = column_property(
select([func.coalesce(func.sum(Grade.score), 0.0)])\
.where(and_(
Grade.notebook_id == SubmittedNotebook.id,
GradeCell.id == Grade.cell_id,
GradeCell.cell_type == "markdown"))\
.correlate_except(Grade), deferred=True)
SubmittedAssignment.written_score = column_property(
select([func.coalesce(func.sum(Grade.score), 0.0)])\
.where(and_(
SubmittedNotebook.assignment_id == SubmittedAssignment.id,
Grade.notebook_id == SubmittedNotebook.id,
GradeCell.id == Grade.cell_id,
GradeCell.cell_type == "markdown"))\
.correlate_except(Grade), deferred=True)
## Written max scores
Notebook.max_written_score = column_property(
select([func.coalesce(func.sum(GradeCell.max_score), 0.0)])\
.where(and_(
GradeCell.notebook_id == Notebook.id,
GradeCell.cell_type == "markdown"))\
.correlate_except(GradeCell), deferred=True)
SubmittedNotebook.max_written_score = column_property(
select([Notebook.max_written_score])\
.where(Notebook.id == SubmittedNotebook.notebook_id)\
.correlate_except(Notebook), deferred=True)
Assignment.max_written_score = column_property(
select([func.coalesce(func.sum(GradeCell.max_score), 0.0)])\
.where(and_(
Notebook.assignment_id == Assignment.id,
GradeCell.notebook_id == Notebook.id,
GradeCell.cell_type == "markdown"))\
.correlate_except(GradeCell), deferred=True)
SubmittedAssignment.max_written_score = column_property(
select([Assignment.max_written_score])\
.where(Assignment.id == SubmittedAssignment.assignment_id)\
.correlate_except(Assignment), deferred=True)
## Code scores
SubmittedNotebook.code_score = column_property(
select([func.coalesce(func.sum(Grade.score), 0.0)])\
.where(and_(
Grade.notebook_id == SubmittedNotebook.id,
GradeCell.id == Grade.cell_id,
GradeCell.cell_type == "code"))\
.correlate_except(Grade), deferred=True)
SubmittedAssignment.code_score = column_property(
select([func.coalesce(func.sum(Grade.score), 0.0)])\
.where(and_(
SubmittedNotebook.assignment_id == SubmittedAssignment.id,
Grade.notebook_id == SubmittedNotebook.id,
GradeCell.id == Grade.cell_id,
GradeCell.cell_type == "code"))\
.correlate_except(Grade), deferred=True)
## Code max scores
Notebook.max_code_score = column_property(
select([func.coalesce(func.sum(GradeCell.max_score), 0.0)])\
.where(and_(
GradeCell.notebook_id == Notebook.id,
GradeCell.cell_type == "code"))\
.correlate_except(GradeCell), deferred=True)
SubmittedNotebook.max_code_score = column_property(
select([Notebook.max_code_score])\
.where(Notebook.id == SubmittedNotebook.notebook_id)\
.correlate_except(Notebook), deferred=True)
Assignment.max_code_score = column_property(
select([func.coalesce(func.sum(GradeCell.max_score), 0.0)])\
.where(and_(
Notebook.assignment_id == Assignment.id,
GradeCell.notebook_id == Notebook.id,
GradeCell.cell_type == "code"))\
.correlate_except(GradeCell), deferred=True)
SubmittedAssignment.max_code_score = column_property(
select([Assignment.max_code_score])\
.where(Assignment.id == SubmittedAssignment.assignment_id)\
.correlate_except(Assignment), deferred=True)
## Number of submissions
Assignment.num_submissions = column_property(
select([func.count(SubmittedAssignment.id)])\
.where(SubmittedAssignment.assignment_id == Assignment.id)\
.correlate_except(SubmittedAssignment), deferred=True)
Notebook.num_submissions = column_property(
select([func.count(SubmittedNotebook.id)])\
.where(SubmittedNotebook.notebook_id == Notebook.id)\
.correlate_except(SubmittedNotebook), deferred=True)
## Cell type
Grade.cell_type = column_property(
select([GradeCell.cell_type])\
.where(Grade.cell_id == GradeCell.id)\
.correlate_except(GradeCell), deferred=True)
## Failed tests
Grade.failed_tests = column_property(
(Grade.auto_score < Grade.max_score) & (Grade.cell_type == "code"))
SubmittedNotebook.failed_tests = column_property(
exists().where(and_(
Grade.notebook_id == SubmittedNotebook.id,
Grade.failed_tests))\
.correlate_except(Grade), deferred=True)
class Gradebook(object):
"""The gradebook object to interface with the database holding
nbgrader grades.
"""
def __init__(self, db_url):
"""Initialize the connection to the database.
Parameters
----------
db_url : string
The URL to the database, e.g. ``sqlite:///grades.db``
"""
# create the connection to the database
engine = create_engine(db_url)
self.db = scoped_session(sessionmaker(autoflush=True, bind=engine))
# this creates all the tables in the database if they don't already exist
Base.metadata.create_all(bind=engine)
#### Students
@property
def students(self):
"""A list of all students in the database."""
return self.db.query(Student)\
.order_by(Student.last_name, Student.first_name)\
.all()
def add_student(self, student_id, **kwargs):
"""Add a new student to the database.
Parameters
----------
student_id : string
The unique id of the student
`**kwargs` : dict
other keyword arguments to the :class:`~nbgrader.api.Student` object
Returns
-------
student : :class:`~nbgrader.api.Student`
"""
student = Student(id=student_id, **kwargs)
self.db.add(student)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return student
def find_student(self, student_id):
"""Find a student.
Parameters
----------
student_id : string
The unique id of the student
Returns
-------
student : :class:`~nbgrader.api.Student`
"""
try:
student = self.db.query(Student)\
.filter(Student.id == student_id)\
.one()
except NoResultFound:
raise MissingEntry("No such student: {}".format(student_id))
return student
def update_or_create_student(self, name, **kwargs):
"""Update an existing student, or create it if it doesn't exist.
Parameters
----------
name : string
the name of the student
`**kwargs`
additional keyword arguments for the :class:`~nbgrader.api.Student` object
Returns
-------
student : :class:`~nbgrader.api.Student`
"""
try:
student = self.find_student(name)
except MissingEntry:
student = self.add_student(name, **kwargs)
else:
for attr in kwargs:
setattr(student, attr, kwargs[attr])
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return student
def remove_student(self, name):
"""Deletes an existing student from the gradebook, including any
submissions the might be associated with that student.
Parameters
----------
name : string
the name of the student to delete
"""
student = self.find_student(name)
for submission in student.submissions:
self.remove_submission(submission.assignment.name, name)
self.db.delete(student)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
#### Assignments
@property
def assignments(self):
"""A list of all assignments in the gradebook."""
return self.db.query(Assignment)\
.order_by(Assignment.duedate, Assignment.name)\
.all()
def add_assignment(self, name, **kwargs):
"""Add a new assignment to the gradebook.
Parameters
----------
name : string
the unique name of the new assignment
`**kwargs`
additional keyword arguments for the :class:`~nbgrader.api.Assignment` object
Returns
-------
assignment : :class:`~nbgrader.api.Assignment`
"""
if 'duedate' in kwargs:
kwargs['duedate'] = utils.parse_utc(kwargs['duedate'])
assignment = Assignment(name=name, **kwargs)
self.db.add(assignment)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return assignment
def find_assignment(self, name):
"""Find an assignment in the gradebook.
Parameters
----------
name : string
the unique name of the assignment
Returns
-------
assignment : :class:`~nbgrader.api.Assignment`
"""
try:
assignment = self.db.query(Assignment)\
.filter(Assignment.name == name)\
.one()
except NoResultFound:
raise MissingEntry("No such assignment: {}".format(name))
return assignment
def update_or_create_assignment(self, name, **kwargs):
"""Update an existing assignment, or create it if it doesn't exist.
Parameters
----------
name : string
the name of the assignment
`**kwargs`
additional keyword arguments for the :class:`~nbgrader.api.Assignment` object
Returns
-------
assignment : :class:`~nbgrader.api.Assignment`
"""
try:
assignment = self.find_assignment(name)
except MissingEntry:
assignment = self.add_assignment(name, **kwargs)
else:
for attr in kwargs:
if attr == 'duedate':
setattr(assignment, attr, utils.parse_utc(kwargs[attr]))
else:
setattr(assignment, attr, kwargs[attr])
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return assignment
def remove_assignment(self, name):
"""Deletes an existing assignment from the gradebook, including any
submissions the might be associated with that assignment.
Parameters
----------
name : string
the name of the assignment to delete
"""
assignment = self.find_assignment(name)
for submission in assignment.submissions:
self.remove_submission(name, submission.student.id)
for notebook in assignment.notebooks:
self.remove_notebook(notebook.name, name)
self.db.delete(assignment)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
#### Notebooks
def add_notebook(self, name, assignment, **kwargs):
"""Add a new notebook to an assignment.
Parameters
----------
name : string
the name of the new notebook
assignment : string
the name of an existing assignment
`**kwargs`
additional keyword arguments for the :class:`~nbgrader.api.Notebook` object
Returns
-------
notebook : :class:`~nbgrader.api.Notebook`
"""
notebook = Notebook(
name=name, assignment=self.find_assignment(assignment), **kwargs)
self.db.add(notebook)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return notebook
def find_notebook(self, name, assignment):
"""Find a particular notebook in an assignment.
Parameters
----------
name : string
the name of the notebook
assignment : string
the name of the assignment
Returns
-------
notebook : :class:`~nbgrader.api.Notebook`
"""
try:
notebook = self.db.query(Notebook)\
.join(Assignment, Assignment.id == Notebook.assignment_id)\
.filter(Notebook.name == name, Assignment.name == assignment)\
.one()
except NoResultFound:
raise MissingEntry("No such notebook: {}/{}".format(assignment, name))
return notebook
def update_or_create_notebook(self, name, assignment, **kwargs):
"""Update an existing notebook, or create it if it doesn't exist.
Parameters
----------
name : string
the name of the notebook
assignment : string
the name of the assignment
`**kwargs`
additional keyword arguments for the :class:`~nbgrader.api.Notebook` object
Returns
-------
notebook : :class:`~nbgrader.api.Notebook`
"""
try:
notebook = self.find_notebook(name, assignment)
except MissingEntry:
notebook = self.add_notebook(name, assignment, **kwargs)
else:
for attr in kwargs:
setattr(notebook, attr, kwargs[attr])
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return notebook
def remove_notebook(self, name, assignment):
"""Deletes an existing notebook from the gradebook, including any
submissions the might be associated with that notebook.
Parameters
----------
name : string
the name of the notebook to delete
assignment : string
the name of an existing assignment
"""
notebook = self.find_notebook(name, assignment)
for submission in notebook.submissions:
self.remove_submission_notebook(name, assignment, submission.student.id)
for grade_cell in notebook.grade_cells:
self.db.delete(grade_cell)
for solution_cell in notebook.solution_cells:
self.db.delete(solution_cell)
for source_cell in notebook.source_cells:
self.db.delete(source_cell)
self.db.delete(notebook)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
#### Grade cells
def add_grade_cell(self, name, notebook, assignment, **kwargs):
"""Add a new grade cell to an existing notebook of an existing
assignment.
Parameters
----------
name : string
the name of the new grade cell
notebook : string
the name of an existing notebook
assignment : string
the name of an existing assignment
`**kwargs`
additional keyword arguments for :class:`~nbgrader.api.GradeCell`
Returns
-------
grade_cell : :class:`~nbgrader.api.GradeCell`
"""
notebook = self.find_notebook(notebook, assignment)
grade_cell = GradeCell(name=name, notebook=notebook, **kwargs)
self.db.add(grade_cell)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return grade_cell
def find_grade_cell(self, name, notebook, assignment):
"""Find a grade cell in a particular notebook of an assignment.
Parameters
----------
name : string
the name of the grade cell
notebook : string
the name of the notebook
assignment : string
the name of the assignment
Returns
-------
grade_cell : :class:`~nbgrader.api.GradeCell`
"""
try:
grade_cell = self.db.query(GradeCell)\
.join(Notebook, Notebook.id == GradeCell.notebook_id)\
.join(Assignment, Assignment.id == Notebook.assignment_id)\
.filter(
GradeCell.name == name,
Notebook.name == notebook,
Assignment.name == assignment)\
.one()
except NoResultFound:
raise MissingEntry("No such grade cell: {}/{}/{}".format(assignment, notebook, name))
return grade_cell
def update_or_create_grade_cell(self, name, notebook, assignment, **kwargs):
"""Update an existing grade cell in a notebook of an assignment, or
create the grade cell if it does not exist.
Parameters
----------
name : string
the name of the grade cell
notebook : string
the name of the notebook
assignment : string
the name of the assignment
`**kwargs`
additional keyword arguments for :class:`~nbgrader.api.GradeCell`
Returns
-------
grade_cell : :class:`~nbgrader.api.GradeCell`
"""
try:
grade_cell = self.find_grade_cell(name, notebook, assignment)
except MissingEntry:
grade_cell = self.add_grade_cell(name, notebook, assignment, **kwargs)
else:
for attr in kwargs:
setattr(grade_cell, attr, kwargs[attr])
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return grade_cell
#### Solution cells
def add_solution_cell(self, name, notebook, assignment, **kwargs):
"""Add a new solution cell to an existing notebook of an existing
assignment.
Parameters
----------
name : string
the name of the new solution cell
notebook : string
the name of an existing notebook
assignment : string
the name of an existing assignment
`**kwargs`
additional keyword arguments for :class:`~nbgrader.api.SolutionCell`
Returns
-------
solution_cell : :class:`~nbgrader.api.SolutionCell`
"""
notebook = self.find_notebook(notebook, assignment)
solution_cell = SolutionCell(name=name, notebook=notebook, **kwargs)
self.db.add(solution_cell)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return solution_cell
def find_solution_cell(self, name, notebook, assignment):
"""Find a solution cell in a particular notebook of an assignment.
Parameters
----------
name : string
the name of the solution cell
notebook : string
the name of the notebook
assignment : string
the name of the assignment
Returns
-------
solution_cell : :class:`~nbgrader.api.SolutionCell`
"""
try:
solution_cell = self.db.query(SolutionCell)\
.join(Notebook, Notebook.id == SolutionCell.notebook_id)\
.join(Assignment, Assignment.id == Notebook.assignment_id)\
.filter(SolutionCell.name == name, Notebook.name == notebook, Assignment.name == assignment)\
.one()
except NoResultFound:
raise MissingEntry("No such solution cell: {}/{}/{}".format(assignment, notebook, name))
return solution_cell
def update_or_create_solution_cell(self, name, notebook, assignment, **kwargs):
"""Update an existing solution cell in a notebook of an assignment, or
create the solution cell if it does not exist.
Parameters
----------
name : string
the name of the solution cell
notebook : string
the name of the notebook
assignment : string
the name of the assignment
`**kwargs`
additional keyword arguments for :class:`~nbgrader.api.SolutionCell`
Returns
-------
solution_cell : :class:`~nbgrader.api.SolutionCell`
"""
try:
solution_cell = self.find_solution_cell(name, notebook, assignment)
except MissingEntry:
solution_cell = self.add_solution_cell(name, notebook, assignment, **kwargs)
else:
for attr in kwargs:
setattr(solution_cell, attr, kwargs[attr])
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
raise InvalidEntry(*e.args)
return solution_cell
#### Source cells
def add_source_cell(self, name, notebook, assignment, **kwargs):
"""Add a new source cell to an existing notebook of an existing
assignment.
Parameters
----------
name : string
the name of the new source cell
notebook : string
the name of an existing notebook
assignment : string
the name of an existing assignment
`**kwargs`
additional keyword arguments for :class:`~nbgrader.api.SourceCell`
Returns
-------
source_cell : :class:`~nbgrader.api.SourceCell`
"""
notebook = self.find_notebook(notebook, assignment)
source_cell = SourceCell(name=name, notebook=notebook, **kwargs)
self.db.add(source_cell)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return source_cell
def find_source_cell(self, name, notebook, assignment):
"""Find a source cell in a particular notebook of an assignment.
Parameters
----------
name : string
the name of the source cell
notebook : string
the name of the notebook
assignment : string
the name of the assignment
Returns
-------
source_cell : :class:`~nbgrader.api.SourceCell`
"""
try:
source_cell = self.db.query(SourceCell)\
.join(Notebook, Notebook.id == SourceCell.notebook_id)\
.join(Assignment, Assignment.id == Notebook.assignment_id)\
.filter(SourceCell.name == name, Notebook.name == notebook, Assignment.name == assignment)\
.one()
except NoResultFound:
raise MissingEntry("No such source cell: {}/{}/{}".format(assignment, notebook, name))
return source_cell
def update_or_create_source_cell(self, name, notebook, assignment, **kwargs):
"""Update an existing source cell in a notebook of an assignment, or
create the source cell if it does not exist.
Parameters
----------
name : string
the name of the source cell
notebook : string
the name of the notebook
assignment : string
the name of the assignment
`**kwargs`
additional keyword arguments for :class:`~nbgrader.api.SourceCell`
Returns
-------
source_cell : :class:`~nbgrader.api.SourceCell`
"""
try:
source_cell = self.find_source_cell(name, notebook, assignment)
except MissingEntry:
source_cell = self.add_source_cell(name, notebook, assignment, **kwargs)
else:
for attr in kwargs:
setattr(source_cell, attr, kwargs[attr])
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
raise InvalidEntry(*e.args)
return source_cell
#### Submissions
def add_submission(self, assignment, student, **kwargs):
"""Add a new submission of an assignment by a student.
This method not only creates the high-level submission object, but also
mirrors the entire structure of the existing assignment. Thus, once this
method has been called, the new submission exists and is completely
ready to be filled in with grades and comments.
Parameters
----------
assignment : string
the name of an existing assignment
student : string
the name of an existing student
`**kwargs`
additional keyword arguments for :class:`~nbgrader.api.SubmittedAssignment`
Returns
-------
submission : :class:`~nbgrader.api.SubmittedAssignment`
"""
if 'timestamp' in kwargs:
kwargs['timestamp'] = utils.parse_utc(kwargs['timestamp'])
try:
submission = SubmittedAssignment(
assignment=self.find_assignment(assignment),
student=self.find_student(student),
**kwargs)
for notebook in submission.assignment.notebooks:
nb = SubmittedNotebook(notebook=notebook, assignment=submission)
for grade_cell in notebook.grade_cells:
Grade(cell=grade_cell, notebook=nb)
for solution_cell in notebook.solution_cells:
Comment(cell=solution_cell, notebook=nb)
self.db.add(submission)
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return submission
def find_submission(self, assignment, student):
"""Find a student's submission for a given assignment.
Parameters
----------
assignment : string
the name of an assignment
student : string
the unique id of a student
Returns
-------
submission : :class:`~nbgrader.api.SubmittedAssignment`
"""
try:
submission = self.db.query(SubmittedAssignment)\
.join(Assignment, Assignment.id == SubmittedAssignment.assignment_id)\
.join(Student, Student.id == SubmittedAssignment.student_id)\
.filter(Assignment.name == assignment, Student.id == student)\
.one()
except NoResultFound:
raise MissingEntry("No such submission: {} for {}".format(
assignment, student))
return submission
def update_or_create_submission(self, assignment, student, **kwargs):
"""Update an existing submission of an assignment by a given student,
or create a new submission if it doesn't exist.
See :func:`~nbgrader.api.Gradebook.add_submission` for additional
details.
Parameters
----------
assignment : string
the name of an existing assignment
student : string
the name of an existing student
`**kwargs`
additional keyword arguments for :class:`~nbgrader.api.SubmittedAssignment`
Returns
-------
submission : :class:`~nbgrader.api.SubmittedAssignment`
"""
try:
submission = self.find_submission(assignment, student)
except MissingEntry:
submission = self.add_submission(assignment, student, **kwargs)
else:
for attr in kwargs:
if attr == 'timestamp':
setattr(submission, attr, utils.parse_utc(kwargs[attr]))
else:
setattr(submission, attr, kwargs[attr])
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return submission
def remove_submission(self, assignment, student):
"""Removes a submission from the database.
Parameters
----------
assignment : string
the name of an assignment
student : string
the name of a student
"""
submission = self.find_submission(assignment, student)
for notebook in submission.notebooks:
self.remove_submission_notebook(notebook.name, assignment, student)
self.db.delete(submission)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
def remove_submission_notebook(self, notebook, assignment, student):
"""Removes a submitted notebook from the database.
Parameters
----------
notebook : string
the name of a notebook
assignment : string
the name of an assignment
student : string
the name of a student
"""
submission = self.find_submission_notebook(notebook, assignment, student)
for grade in submission.grades:
self.db.delete(grade)
for comment in submission.comments:
self.db.delete(comment)
self.db.delete(submission)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
def assignment_submissions(self, assignment):
"""Find all submissions of a given assignment.
Parameters
----------
assignment : string
the name of an assignment
Returns
-------
submissions : list
A list of :class:`~nbgrader.api.SubmittedAssignment` objects
"""
return self.db.query(SubmittedAssignment)\
.join(Assignment, Assignment.id == SubmittedAssignment.assignment_id)\
.filter(Assignment.name == assignment)\
.all()
def notebook_submissions(self, notebook, assignment):
"""Find all submissions of a given notebook in a given assignment.
Parameters
----------
notebook : string
the name of an assignment
assignment : string
the name of an assignment
Returns
-------
submissions : list
A list of :class:`~nbgrader.api.SubmittedNotebook` objects
"""
return self.db.query(SubmittedNotebook)\
.join(Notebook, Notebook.id == SubmittedNotebook.notebook_id)\
.join(SubmittedAssignment, SubmittedAssignment.id == SubmittedNotebook.assignment_id)\
.join(Assignment, Assignment.id == SubmittedAssignment.assignment_id)\
.filter(Notebook.name == notebook, Assignment.name == assignment)\
.all()
def student_submissions(self, student):
"""Find all submissions by a given student.
Parameters
----------
student : string
the student's unique id
Returns
-------
submissions : list
A list of :class:`~nbgrader.api.SubmittedAssignment` objects
"""
return self.db.query(SubmittedAssignment)\
.join(Student, Student.id == SubmittedAssignment.student_id)\
.filter(Student.id == student)\
.all()
def find_submission_notebook(self, notebook, assignment, student):
"""Find a particular notebook in a student's submission for a given
assignment.
Parameters
----------
notebook : string
the name of a notebook
assignment : string
the name of an assignment
student : string
the unique id of a student
Returns
-------
notebook : :class:`~nbgrader.api.SubmittedNotebook`
"""
try:
notebook = self.db.query(SubmittedNotebook)\
.join(Notebook, Notebook.id == SubmittedNotebook.notebook_id)\
.join(SubmittedAssignment, SubmittedAssignment.id == SubmittedNotebook.assignment_id)\
.join(Assignment, Assignment.id == SubmittedAssignment.assignment_id)\
.join(Student, Student.id == SubmittedAssignment.student_id)\
.filter(
Notebook.name == notebook,
Assignment.name == assignment,
Student.id == student)\
.one()
except NoResultFound:
raise MissingEntry("No such submitted notebook: {}/{} for {}".format(
assignment, notebook, student))
return notebook
def find_submission_notebook_by_id(self, notebook_id):
"""Find a submitted notebook by its unique id.
Parameters
----------
notebook_id : string
the unique id of the submitted notebook
Returns
-------
notebook : :class:`~nbgrader.api.SubmittedNotebook`
"""
try:
notebook = self.db.query(SubmittedNotebook)\
.filter(SubmittedNotebook.id == notebook_id)\
.one()
except NoResultFound:
raise MissingEntry("No such submitted notebook: {}".format(notebook_id))
return notebook
def find_grade(self, grade_cell, notebook, assignment, student):
"""Find a particular grade in a notebook in a student's submission
for a given assignment.
Parameters
----------
grade_cell : string
the name of a grade cell
notebook : string
the name of a notebook
assignment : string
the name of an assignment
student : string
the unique id of a student
Returns
-------
grade : :class:`~nbgrader.api.Grade`
"""
try:
grade = self.db.query(Grade)\
.join(GradeCell, GradeCell.id == Grade.cell_id)\
.join(SubmittedNotebook, SubmittedNotebook.id == Grade.notebook_id)\
.join(Notebook, Notebook.id == SubmittedNotebook.notebook_id)\
.join(SubmittedAssignment, SubmittedAssignment.id == SubmittedNotebook.assignment_id)\
.join(Assignment, Assignment.id == SubmittedAssignment.assignment_id)\
.join(Student, Student.id == SubmittedAssignment.student_id)\
.filter(
GradeCell.name == grade_cell,
Notebook.name == notebook,
Assignment.name == assignment,
Student.id == student)\
.one()
except NoResultFound:
raise MissingEntry("No such grade: {}/{}/{} for {}".format(
assignment, notebook, grade_cell, student))
return grade
def find_grade_by_id(self, grade_id):
"""Find a grade by its unique id.
Parameters
----------
grade_id : string
the unique id of the grade
Returns
-------
grade : :class:`~nbgrader.api.Grade`
"""
try:
grade = self.db.query(Grade).filter(Grade.id == grade_id).one()
except NoResultFound:
raise MissingEntry("No such grade: {}".format(grade_id))
return grade
def find_comment(self, solution_cell, notebook, assignment, student):
"""Find a particular comment in a notebook in a student's submission
for a given assignment.
Parameters
----------
solution_cell : string
the name of a solution cell
notebook : string
the name of a notebook
assignment : string
the name of an assignment
student : string
the unique id of a student
Returns
-------
comment : :class:`~nbgrader.api.Comment`
"""
try:
comment = self.db.query(Comment)\
.join(SolutionCell, SolutionCell.id == Comment.cell_id)\
.join(SubmittedNotebook, SubmittedNotebook.id == Comment.notebook_id)\
.join(Notebook, Notebook.id == SubmittedNotebook.notebook_id)\
.join(SubmittedAssignment, SubmittedAssignment.id == SubmittedNotebook.assignment_id)\
.join(Assignment, Assignment.id == SubmittedAssignment.assignment_id)\
.join(Student, Student.id == SubmittedAssignment.student_id)\
.filter(
SolutionCell.name == solution_cell,
Notebook.name == notebook,
Assignment.name == assignment,
Student.id == student)\
.one()
except NoResultFound:
raise MissingEntry("No such comment: {}/{}/{} for {}".format(
assignment, notebook, solution_cell, student))
return comment
def find_comment_by_id(self, comment_id):
"""Find a comment by its unique id.
Parameters
----------
comment_id : string
the unique id of the comment
Returns
-------
comment : :class:`~nbgrader.api.Comment`
"""
try:
comment = self.db.query(Comment).filter(Comment.id == comment_id).one()
except NoResultFound:
raise MissingEntry("No such comment: {}".format(comment_id))
return comment
def average_assignment_score(self, assignment_id):
"""Compute the average score for an assignment.
Parameters
----------
assignment_id : string
the name of the assignment
Returns
-------
score : float
The average score
"""
assignment = self.find_assignment(assignment_id)
if assignment.num_submissions == 0:
return 0.0
score_sum = self.db.query(func.coalesce(func.sum(Grade.score), 0.0))\
.join(GradeCell, Notebook, Assignment)\
.filter(Assignment.name == assignment_id).scalar()
return score_sum / assignment.num_submissions
def average_assignment_code_score(self, assignment_id):
"""Compute the average code score for an assignment.
Parameters
----------
assignment_id : string
the name of the assignment
Returns
-------
score : float
The average code score
"""
assignment = self.find_assignment(assignment_id)
if assignment.num_submissions == 0:
return 0.0
score_sum = self.db.query(func.coalesce(func.sum(Grade.score), 0.0))\
.join(GradeCell, Notebook, Assignment)\
.filter(and_(
Assignment.name == assignment_id,
Notebook.assignment_id == Assignment.id,
GradeCell.notebook_id == Notebook.id,
Grade.cell_id == GradeCell.id,
GradeCell.cell_type == "code")).scalar()
return score_sum / assignment.num_submissions
def average_assignment_written_score(self, assignment_id):
"""Compute the average written score for an assignment.
Parameters
----------
assignment_id : string
the name of the assignment
Returns
-------
score : float
The average written score
"""
assignment = self.find_assignment(assignment_id)
if assignment.num_submissions == 0:
return 0.0
score_sum = self.db.query(func.coalesce(func.sum(Grade.score), 0.0))\
.join(GradeCell, Notebook, Assignment)\
.filter(and_(
Assignment.name == assignment_id,
Notebook.assignment_id == Assignment.id,
GradeCell.notebook_id == Notebook.id,
Grade.cell_id == GradeCell.id,
GradeCell.cell_type == "markdown")).scalar()
return score_sum / assignment.num_submissions
def average_notebook_score(self, notebook_id, assignment_id):
"""Compute the average score for a particular notebook in an assignment.
Parameters
----------
notebook_id : string
the name of the notebook
assignment_id : string
the name of the assignment
Returns
-------
score : float
The average notebook score
"""
notebook = self.find_notebook(notebook_id, assignment_id)
if notebook.num_submissions == 0:
return 0.0
score_sum = self.db.query(func.coalesce(func.sum(Grade.score), 0.0))\
.join(SubmittedNotebook, Notebook, Assignment)\
.filter(and_(
Notebook.name == notebook_id,
Assignment.name == assignment_id)).scalar()
return score_sum / notebook.num_submissions
def average_notebook_code_score(self, notebook_id, assignment_id):
"""Compute the average code score for a particular notebook in an
assignment.
Parameters
----------
notebook_id : string
the name of the notebook
assignment_id : string
the name of the assignment
Returns
-------
score : float
The average notebook code score
"""
notebook = self.find_notebook(notebook_id, assignment_id)
if notebook.num_submissions == 0:
return 0.0
score_sum = self.db.query(func.coalesce(func.sum(Grade.score), 0.0))\
.join(GradeCell, Notebook, Assignment)\
.filter(and_(
Notebook.name == notebook_id,
Assignment.name == assignment_id,
Notebook.assignment_id == Assignment.id,
GradeCell.notebook_id == Notebook.id,
Grade.cell_id == GradeCell.id,
GradeCell.cell_type == "code")).scalar()
return score_sum / notebook.num_submissions
def average_notebook_written_score(self, notebook_id, assignment_id):
"""Compute the average written score for a particular notebook in an
assignment.
Parameters
----------
notebook_id : string
the name of the notebook
assignment_id : string
the name of the assignment
Returns
-------
score : float
The average notebook written score
"""
notebook = self.find_notebook(notebook_id, assignment_id)
if notebook.num_submissions == 0:
return 0.0
score_sum = self.db.query(func.coalesce(func.sum(Grade.score), 0.0))\
.join(GradeCell, Notebook, Assignment)\
.filter(and_(
Notebook.name == notebook_id,
Assignment.name == assignment_id,
Notebook.assignment_id == Assignment.id,
GradeCell.notebook_id == Notebook.id,
Grade.cell_id == GradeCell.id,
GradeCell.cell_type == "markdown")).scalar()
return score_sum / notebook.num_submissions
def student_dicts(self):
"""Returns a list of dictionaries containing student data. Equivalent
to calling :func:`~nbgrader.api.Student.to_dict` for each student,
except that this method is implemented using proper SQL joins and is
much faster.
Returns
-------
students : list
A list of dictionaries, one per student
"""
# subquery the scores
scores = self.db.query(
Student.id,
func.sum(Grade.score).label("score")
).join(SubmittedAssignment, SubmittedNotebook, Grade)\
.group_by(Student.id)\
.subquery()
# full query
students = self.db.query(
Student.id, Student.first_name, Student.last_name,
Student.email, func.coalesce(scores.c.score, 0.0),
func.sum(GradeCell.max_score)
).outerjoin(scores, Student.id == scores.c.id)\
.group_by(Student.id)\
.all()
keys = ["id", "first_name", "last_name", "email", "score", "max_score"]
return [dict(zip(keys, x)) for x in students]
def notebook_submission_dicts(self, notebook_id, assignment_id):
"""Returns a list of dictionaries containing submission data. Equivalent
to calling :func:`~nbgrader.api.SubmittedNotebook.to_dict` for each
submission, except that this method is implemented using proper SQL
joins and is much faster.
Parameters
----------
notebook_id : string
the name of the notebook
assignment_id : string
the name of the assignment
Returns
-------
submissions : list
A list of dictionaries, one per submitted notebook
"""
# subquery the code scores
code_scores = self.db.query(
SubmittedNotebook.id,
func.sum(Grade.score).label("code_score"),
func.sum(GradeCell.max_score).label("max_code_score"),
).join(SubmittedAssignment, Notebook, Assignment, Student, Grade, GradeCell)\
.filter(GradeCell.cell_type == "code")\
.group_by(SubmittedNotebook.id)\
.subquery()
# subquery for the written scores
written_scores = self.db.query(
SubmittedNotebook.id,
func.sum(Grade.score).label("written_score"),
func.sum(GradeCell.max_score).label("max_written_score"),
).join(SubmittedAssignment, Notebook, Assignment, Student, Grade, GradeCell)\
.filter(GradeCell.cell_type == "markdown")\
.group_by(SubmittedNotebook.id)\
.subquery()
# subquery for needing manual grading
manual_grade = self.db.query(
SubmittedNotebook.id,
exists().where(Grade.needs_manual_grade).label("needs_manual_grade")
).join(SubmittedAssignment, Assignment, Notebook)\
.filter(
Grade.notebook_id == SubmittedNotebook.id,
Grade.needs_manual_grade)\
.group_by(SubmittedNotebook.id)\
.subquery()
# subquery for failed tests
failed_tests = self.db.query(
SubmittedNotebook.id,
exists().where(Grade.failed_tests).label("failed_tests")
).join(SubmittedAssignment, Assignment, Notebook)\
.filter(
Grade.notebook_id == SubmittedNotebook.id,
Grade.failed_tests)\
.group_by(SubmittedNotebook.id)\
.subquery()
# full query
submissions = self.db.query(
SubmittedNotebook.id, Notebook.name, Student.id,
func.sum(Grade.score), func.sum(GradeCell.max_score),
code_scores.c.code_score, code_scores.c.max_code_score,
written_scores.c.written_score, written_scores.c.max_written_score,
func.coalesce(manual_grade.c.needs_manual_grade, False),
func.coalesce(failed_tests.c.failed_tests, False),
SubmittedNotebook.flagged
).join(SubmittedAssignment, Notebook, Assignment, Student, Grade, GradeCell)\
.outerjoin(code_scores, SubmittedNotebook.id == code_scores.c.id)\
.outerjoin(written_scores, SubmittedNotebook.id == written_scores.c.id)\
.outerjoin(manual_grade, SubmittedNotebook.id == manual_grade.c.id)\
.outerjoin(failed_tests, SubmittedNotebook.id == failed_tests.c.id)\
.filter(and_(
Notebook.name == notebook_id,
Assignment.name == assignment_id,
Student.id == SubmittedAssignment.student_id,
SubmittedAssignment.id == SubmittedNotebook.assignment_id,
SubmittedNotebook.id == Grade.notebook_id,
GradeCell.id == Grade.cell_id))\
.group_by(Student.id)\
.all()
keys = [
"id", "name", "student",
"score", "max_score",
"code_score", "max_code_score",
"written_score", "max_written_score",
"needs_manual_grade",
"failed_tests", "flagged"
]
return [dict(zip(keys, x)) for x in submissions]
|
how's it going boys and girls?
it's not easy to win my heart unless you're truly dedicated.
who's the smallest of all?
|
"""
Image Cache Module
Copyright (C) 2016 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
from sqlite3 import dbapi2 as db_lib
def __enum(**enums):
return type('Enum', (), enums)
DB_TYPES = __enum(MYSQL='mysql', SQLITE='sqlite')
class DBCache(object):
def __init__(self, db_path=None):
self.db_path = '../tmdb_cache.db' if db_path is None else db_path
self.db_type = DB_TYPES.SQLITE
self.db = db_lib.connect(self.db_path)
self.db.text_factory = str
self.__execute('CREATE TABLE IF NOT EXISTS api_cache (tmdb_id INTEGER NOT NULL, object_type CHAR(1) NOT NULL, data VARCHAR(255), PRIMARY KEY(tmdb_id, object_type))')
self.__execute('CREATE TABLE IF NOT EXISTS db_info (setting VARCHAR(255), value TEXT, PRIMARY KEY(setting))')
def close(self):
self.db.close()
def update_movie(self, tmdb_id, js_data):
self.__update_object(tmdb_id, 'M', js_data)
def get_movie(self, tmdb_id):
return self.__get_object(tmdb_id, 'M')
def get_tvshow(self, tmdb_id):
return self.__get_object(tmdb_id, 'T')
def get_person(self, tmdb_id):
return self.__get_object(tmdb_id, 'P')
def __get_object(self, tmdb_id, object_type):
sql = 'SELECT data from api_cache where tmdb_id = ? and object_type=?'
rows = self.__execute(sql, (tmdb_id, object_type))
if rows:
return json.loads(rows[0][0])
else:
return {}
def update_tvshow(self, tmdb_id, js_data):
self.__update_object(tmdb_id, 'T', js_data)
def update_person(self, tmdb_id, js_data):
self.__update_object(tmdb_id, 'P', js_data)
def __update_object(self, tmdb_id, object_type, js_data):
self.__execute('REPLACE INTO api_cache (tmdb_id, object_type, data) values (?, ?, ?)', (tmdb_id, object_type, json.dumps(js_data)))
def get_setting(self, setting):
sql = 'SELECT value FROM db_info WHERE setting=?'
rows = self.__execute(sql, (setting,))
if rows:
return rows[0][0]
def set_setting(self, setting, value):
sql = 'REPLACE INTO db_info (setting, value) VALUES (?, ?)'
self.__execute(sql, (setting, value))
def execute(self, sql, params=None):
return self.__execute(sql, params)
def __execute(self, sql, params=None):
if params is None: params = []
rows = None
sql = self.__format(sql)
is_read = self.__is_read(sql)
cur = self.db.cursor()
cur.execute(sql, params)
if is_read:
rows = cur.fetchall()
cur.close()
self.db.commit()
return rows
# apply formatting changes to make sql work with a particular db driver
def __format(self, sql):
if self.db_type == DB_TYPES.MYSQL:
sql = sql.replace('?', '%s')
if self.db_type == DB_TYPES.SQLITE:
if sql[:7] == 'REPLACE':
sql = 'INSERT OR ' + sql
return sql
def __is_read(self, sql):
fragment = sql[:6].upper()
return fragment[:6] == 'SELECT' or fragment[:4] == 'SHOW'
|
This page displays sold house prices for Rosery Court in Salisbury.
Rosery Court in Dinton, Salisbury consists predominantly of detached houses. Properties on Rosery Court typically have values around £500,000 - £700,000, but larger detached houses can command upwards of £700,000.
Map showing Rosery Court in Salisbury.
|
import logging
from datetime import timedelta
from os.path import abspath, dirname
# Django settings for sendinel project.
DEBUG = True #for scheduling set to false
TEMPLATE_DEBUG = DEBUG
PROJECT_PATH = dirname(abspath(__file__))
LOGGING_LEVEL = logging.INFO
LOGGING_LEVEL_TEST = logging.CRITICAL
ADMINS = (
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = PROJECT_PATH + '/sendinel.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Berlin'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
_ = lambda s: s
#LANGUAGES = (
# ('de', _('German')),
# ('en', _('English')),
# ('ts', _('Shangaan')),
# ('zh', _('Test Language')),
#)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = PROJECT_PATH + '/media'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/mediaweb/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin_media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '4ztf1p=e9d*ns^d*f@bs3mu#37p)$jp(%lzo2a+-%j8^=eq852'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
TEMPLATE_CONTEXT_PROCESSORS = ("django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.contrib.messages.context_processors.messages")
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.csrf.middleware.CsrfMiddleware'
)
ROOT_URLCONF = 'sendinel.urls'
TEMPLATE_DIRS = (
PROJECT_PATH + "/templates",
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'sendinel',
'sendinel.web',
'sendinel.backend',
'sendinel.groups',
'sendinel.infoservices',
'sendinel.notifications',
'sendinel.medicines'
)
####################################
# Sendinel Configuration
REMINDER_TIME_BEFORE_APPOINTMENT = timedelta(days = 1)
DEFAULT_APPOINTMENT_DURATION = timedelta(minutes = 60)
DEFAULT_HOSPITAL_NAME = 'your hospital'
DEFAULT_SEND_TIME = '12:00' #hh:mm in 24-hours format
COUNTRY_CODE_PHONE = "0049" #"0027" for South Africa
START_MOBILE_PHONE = "0" # "0" for South Africa (07/08..), "01" for Germany
# see http://en.wikipedia.org/wiki/Telephone_numbers_in_South_Africa
# TODO multiple mobile prefixes
ASTERISK_USER = "sendinel"
ASTERISK_GROUP = "sendinel"
ASTERISK_SPOOL_DIR = "/var/spool/asterisk/outgoing/"
ASTERISK_DONE_SPOOL_DIR = "/var/spool/asterisk/outgoing_done/"
ASTERISK_RETRY = 5
ASTERISK_RETRY_TIME = 5
# to or of authentication
# and to turn it on and off again ss
ASTERISK_DATACARD = True
ASTERISK_EXTENSION = "s"
ASTERISK_SIP_ACCOUNT = "datacard0"
#ASTERISK_SIP_ACCOUNT = "ext-sip-account"
# Specify a COM Port for SMS
# for windows maybe it starts at 0
SERIALPORTSMS = '/dev/rfcomm0'
# FESTIVAL_CACHE = "/lib/init/rw"
FESTIVAL_CACHE = "/tmp"
# Phonenumber to authenticate against the system
AUTH_NUMBER = "CHANGE ME"
# time a user has to call the system to authenticate
AUTHENTICATION_CALL_TIMEOUT = timedelta(minutes = 3)
# True or False to turn authentication on or off
AUTHENTICATION_ENABLED = False
# enable Bluetooth as a Way of Communication
BLUETOOTH_ENABLED = True
# Salutation for all SMS
# TODO the count of characters has to be subtracted from "Characters Left:"
SMS_SALUTATION = ''
# Salutation for phone calls
CALL_SALUTATION = "This is an automated call from your clinic"
# Template for Medicine Notification Messages
MEDICINE_MESSAGE_TEMPLATE = "Your medicine is now available " + \
"at the $hospital. Please come and pick it up."
# used for marking the vcal uid
VCAL_UID_SLUG = 'sendinel.org'
####################################
# Setup Local_Settings if present
try:
from local_settings import *
except ImportError:
pass
|
Do you love The Good Luck Charm book? Please share!
The Good Luck Charm - Helena Hunting E-Book coming soon..
|
#!/usr/bin/env python3
"""
Copyright (c) 2018 The Hyve B.V.
This code is licensed under the GNU Affero General Public License (AGPL),
version 3, or (at your option) any later version.
"""
import unittest
import sys
import os
import glob
from contextlib import contextmanager
from io import StringIO
import logging.handlers
import tempfile
import shutil
from importer import validateStudies, cbioportal_common
# globals:
PORTAL_INFO_DIR = 'test_data/api_json_system_tests'
# FIXME: replace by contextlib.redirect_stdout when moving to Python 3.4+
@contextmanager
def redirect_stdout(new_target):
"""Temporarily re-bind sys.stdout to a different file-like object."""
old_target = sys.stdout
sys.stdout = new_target
try:
yield
finally:
sys.stdout = old_target
# FIXME: replace by tempfile.TemporaryDirectory when moving to Python 3.2+
@contextmanager
def TemporaryDirectory():
"""Create a temporary directory and remove it after use."""
path = tempfile.mkdtemp()
try:
yield path
finally:
shutil.rmtree(path)
class ValidateStudiesSystemTester(unittest.TestCase):
"""Test cases around running the validateStudies script
(such as "does it return the correct exit status?")
"""
def test_exit_status_success(self):
"""study 0 : no errors, expected exit_status = 0.
Possible exit statuses:
0: 'VALID',
1: 'INVALID'
"""
# Build up arguments and run
print("===study 0")
args = ['--list-of-studies', 'test_data/study_es_0/',
'--portal_info_dir', PORTAL_INFO_DIR]
args = validateStudies.interface(args)
exit_status = validateStudies.main(args)
self.assertEqual(0, exit_status)
def test_exit_status_failure(self):
"""study 1 : errors, expected exit_status = 1."""
# Build up arguments and run
print("===study 1")
args = ['--list-of-studies', 'test_data/study_es_1/',
'--portal_info_dir', PORTAL_INFO_DIR]
args = validateStudies.interface(args)
exit_status = validateStudies.main(args)
self.assertEqual(1, exit_status)
def test_exit_status_invalid(self):
"""test to fail: study directory not existing, so cannot run validation, expected exit_status = 1."""
# Build up arguments and run
print("===study invalid")
args = ['--list-of-studies', 'test_data/study_es_invalid/',
'--portal_info_dir', PORTAL_INFO_DIR]
args = validateStudies.interface(args)
exit_status = validateStudies.main(args)
self.assertEqual(1, exit_status)
def test_exit_status_warnings(self):
"""study 3 : warnings only, expected exit_status = 0."""
# Build up arguments and run
print("===study 3")
args = ['--list-of-studies', 'test_data/study_es_3/',
'--portal_info_dir', PORTAL_INFO_DIR]
args = validateStudies.interface(args)
exit_status = validateStudies.main(args)
self.assertEqual(0, exit_status)
def test_exit_status_multiple_studies(self):
"""Running validateStudies for four studies tested above, expected exit_status = 1."""
# Build up arguments and run
print("===study0,1,invalid,3")
args = ['--root-directory', 'test_data',
'--list-of-studies', 'study_es_0,study_es_1,study_es_invalid,study_es_3',
'--portal_info_dir', PORTAL_INFO_DIR]
args = validateStudies.interface(args)
exit_status = validateStudies.main(args)
self.assertEqual(1, exit_status)
def test_logs_study_label_before_validation_messages(self):
"""The log file should start with a line describing the study.
A subsequent study should have its own header line.
"""
# given
with TemporaryDirectory() as out_dir_path:
args = [
'--root-directory', 'test_data',
'--list-of-studies', 'study_various_issues,study_es_0',
'--portal_info_dir', PORTAL_INFO_DIR,
'--html-folder', out_dir_path
]
# when
with redirect_stdout(StringIO()):
parsed_args = validateStudies.interface(args)
validateStudies.main(parsed_args)
# then
log_file_path = glob.glob(os.path.join(out_dir_path, 'log*.txt'))[0]
with open(log_file_path) as log_file:
log_file_lines = log_file.readlines()
self.assertIn('study_various_issues', log_file_lines[0])
last_line_of_first_study = next(
index
for index, line
in enumerate(log_file_lines)
if 'Validation complete' in line)
self.assertIn(
'study_es_0',
log_file_lines[last_line_of_first_study + 1])
class ValidateStudiesWithEagerlyFlushingCollapser(unittest.TestCase):
"""Test validation with the collapser flushing due to buffer capacity.
When validating very large studies, it will flush partway through a study.
This can be simulated with a smaller study by lowering the buffer capacity.
"""
def setUp(self):
"""Make the collapsing log message handler flush more eagerly."""
class EagerFlusher(logging.handlers.MemoryHandler):
def __init__(self, *args, **kwargs):
"""Set the buffer capacity to 3 regardless of args."""
# leave out any capacity argument from args and kwargs
args = args[1:]
kwargs = {k: v for k, v in list(kwargs.items()) if k != 'capacity'}
# pass 3 as the capacity argument
super(EagerFlusher, self).__init__(3, *args, **kwargs)
class EagerFlushingCollapser(
cbioportal_common.CollapsingLogMessageHandler,
EagerFlusher):
"""CollapsingLogMessageHandler with EagerFlusher overrides."""
pass
self.original_collapser = cbioportal_common.CollapsingLogMessageHandler
cbioportal_common.CollapsingLogMessageHandler = EagerFlusher
def tearDown(self):
"""Restore the unmodified collapsing log message handler."""
cbioportal_common.CollapsingLogMessageHandler = self.original_collapser
def test_leaves_stdout_uncluttered_if_validation_produces_errors(self):
"""Test flushing the collapsing logger halfway through a study.
This should not spill the validation messages to stdout as it previously
did, even crashing with a KeyError sometimes because non-validator
log messages got flushed into the collapsing logic.
"""
output_stream = StringIO()
with redirect_stdout(output_stream):
args = validateStudies.interface([
'--root-directory', 'test_data',
'--list-of-studies', 'study_various_issues/',
'--portal_info_dir', PORTAL_INFO_DIR])
validateStudies.main(args)
self.assertNotIn(
'ERROR',
output_stream.getvalue(),
'The validation errors should not be printed to the console.')
if __name__ == '__main__':
unittest.main(buffer=True)
|
Add on a day to your Basic Training or just come for an Advanced Training/Workshop to discuss challenges that your organization is facing. This class is focused on helping your organization optimize usage of MicroMain to ensure you are incorporating all of our best practices and getting the most from your CMMS.
Exit from the Airport on Hwy. 71 and head west for several miles.
Stay on Hwy. 71, which will eventually cross under IH-35 and become Ben White Blvd.
Stay on Ben White Blvd. (again, for several miles) until you see the exit for Loop 360 (which is really more of a gentle veer to the right).
Stay on Loop 360, heading north to the Bee Cave Road (FM 2244) exit.
Head east (right) on Bee Cave Road approximately 1 mile to Bluff Park Circle.
Take a left on Bluff Park Circle and an immediate left into our driveway.
Take 6th Street west to Loop 1 (aka MoPac).
Head west on Bee Cave Road approximately 3 miles to Bluff Park Circle (the street immediately follows a sign that reads “5000 Bee Cave”).
Take a right on Bluff Park Circle and an immediate left into our driveway.
|
'''
Fork of pdf.py from jsunpack
'''
import binascii
import cStringIO
import Crypto.Cipher.ARC4 as ARC4
import Crypto.Cipher.AES as AES
import hashlib
import lzw
import re
import string
import struct
import xml.dom.minidom
import zlib
class pdfobj(object):
#this class parses single "1 0 obj" up till "endobj" elements
def __init__(self, keynum, data):
self.tags = [] #tuples of [key,value]
self.keynum = keynum
self.indata = data
self.tagstream = ''
self.tagstreamError = False
self.tagstreamChanged = False
self.hiddenTags = 0 #tags containing non-normalized data
self.children = [] #if this has a script tag, parse children of it
self.staticScript = '' #for those things not within objects append to this structure
#special children types
self.isJS = False #could be a reference (or self contains JS)
self.isDelayJS = False #for OpenAction
self.isEmbedded = False #for /EmbeddedFile
self.isAnnot = False
self.isObjStm = []
self.is_xfa = False
self.is_xfaData = False
self.isEncrypt = False
self.isFromObjStream = False
self.contains_comment = False
self.knownName = '' #related to annots
self.subj = '' #related to annots
self.doc_properties = []
#self.isTitle = False
#self.isKeywords = False
self.xfaChildren = []
if self.indata:
self.parseObject()
def __repr__(self):
out = 'pdfobj %s\n' % (self.keynum)
if self.children:
out += '\tchildren %s\n' % (str(self.children))
if self.isJS:
out += '\tisJS'
if self.isAnnot:
out += '\tisAnnot'
for doc_prop in self.doc_properties:
out += '\tis%s' % doc_prop
if self.isDelayJS:
out += '\tisDelayJS'
return out
def parseTag(self, tag, stream):
'''
Input: tag is the contents of /Tag1 value1 /Tag2 value2
stream is (optional) contents between stream and endstream
Output: self.tags and self.tagstream
If stream is not set, then we should set it before it gets assigned to tagstream
'''
state = 'INIT'
precomment_state = 'INIT'
curtag = ''
curval = ''
multiline = 0 # for tracking multiline in TAGVALCLOSED state
uncleaned_tags = [] #output of statemachine
num_paren_open = 0
is_bracket_closed = True
for index in range(0, len(tag)):
#if self.keynum == '23 0':
# print state, index, hex(index), hex(ord(tag[index])), tag[index], curtag, len(curval), num_paren_open, is_bracket_closed
if state == 'INIT':
is_bracket_closed = True
if tag[index] == '/':
state = 'TAG'
elif state == 'TAG':
is_bracket_closed = True
if re.match('[a-zA-Z0-9#]', tag[index]):
curtag += tag[index]
elif tag[index] == '/':
if curtag:
uncleaned_tags.append([state, curtag, '']) # no tag value
curtag = ''
state = 'TAG'
elif tag[index] == '(':
state = 'TAGVALCLOSED'
num_paren_open = 0
multiline = 0
curval = '' # ignore the (, for the most part
elif tag[index] == '[': # start of an array... probably
state = 'TAGVAL'
is_bracket_closed = False
curval = '['
elif tag[index] == '\n':
state = 'TAG'
elif tag[index] == '%':
precomment_state = state
state = 'COMMENT'
else:
state = 'TAGVAL'
curval = ''
elif state == 'COMMENT':
self.contains_comment = True
if tag[index] in ['\x0d', '\x0a']:
state = precomment_state
elif state == 'TAGVAL':
# Weird cases with arrays
if tag[index] == '/' and (not tag[index - 1] == '\\\\') and \
((curval and curval[0] == '[' and is_bracket_closed) or \
(not curval) or (curval and curval[0] != '[')):
# a new open bracket and we are not in the middle of a bracket
# or there is bracket here, but we ignore this one
if curtag or curval:
uncleaned_tags.append([state, curtag, curval])
state = 'TAG'
curtag = curval = ''
elif curval and curval[0] == '[' and tag[index] == ']': # finished array
curval += tag[index]
is_bracket_closed = True
elif tag[index] == '(':
#what do we do with curval? toss it
if re.match(r'^[\s\[\]\(\)<>]*$', curval): # look for any characters that indicate this isn't a TAGVALCLOSED
state = 'TAGVALCLOSED'
multiline = 0
if curtag in ['JS', 'O', 'U']:
num_paren_open += 1
if len(curval) > 0:
#print '\ttossed out %d characters (%s) because we entered TAGVALCLOSED state' % (len(curval),curval)
curval = ''
else: #keep processing?
curval += tag[index]
elif tag[index] == '[' and curtag == 'XFA': # coming up on an array listing the XFA objects
is_bracket_closed = False
state = 'TAGVALCLOSED'
# Normally ignore these, but they are useful when parsing the ID in the trailer
elif (tag[index] == '<' or tag[index] == '>') and self.keynum != 'trailer':
pass
elif tag[index] == ' ' and curval == '':
pass #already empty
elif tag[index] == '%':
precomment_state = state
state = 'COMMENT'
else:
curval += tag[index]
elif state == 'TAGVALCLOSED':
#in this state we know that the code started with (... therefore we can't end until we see )
#the code could also have enclosed ( chars; therefore, this algorithm is greedy
grab_more = 0 # if grab_more is set to 1, it means the tag isn't closing yet
if tag[index] == ')': #possible closing tag
if (tag[index - 1] == '\\' and tag[index-2] != '\\') or \
(tag[index-1] == '\\' and tag[index-2] == '\\' and tag[index-3] == '\\') or \
((curtag == 'JS' or curtag == 'JavaScript') and num_paren_open > 0 and tag[index-1] == '\\') or \
(curtag == 'XFA' and not is_bracket_closed): # we are in the middle of a JS string or an XFA array
grab_more = 1
if num_paren_open > 0:
num_paren_open -= 1
elif multiline: #tricky cases
#either a newline or "(" character leads us here.
#IGNORE THESE
#if re.match('^\)\s*($|\n\s*([^\)\s])',tag[index:]):
# #yep its closing time
# #this regex ensures there isn't another following close tag
#res = re.match('^(.*)\) $',tag[index:])
if index + 1 < len(tag):
indexParen = tag[index + 1:].find(')')
#indexNewL = tag[index+1:].find('\n')
if indexParen > -1: # and (indexNewL == -1 or indexNewL > indexParen):
if not re.match('^\s*\/[A-Za-z0-9]+\s*\(', tag[index + 1:]):
grab_more = 1
if grab_more:
curval += tag[index]
else: #ok ok, its simply closing
uncleaned_tags.append([state, curtag, curval])
state = 'INIT'
#print '%s (TAGVALCLOSED), length=%d bytes with %d/%d completed (around %s)' % (curtag, len(curval),index,len(tag), tag[index-20:index+20])
curtag = curval = ''
elif tag[index] == '(': #tag[index] == '\n'
num_paren_open += 1
curval += tag[index]
elif tag[index] == ']' and curtag != 'JS' and not is_bracket_closed: # can have ]s inside JS strings...
is_bracket_closed = True
elif tag[index] == '%' and num_paren_open == 0 and curtag not in ['JS', 'O', 'U']: #can have % inside JS strings... And in O and U strings in Encrypt Objects
precomment_state = state
state = 'COMMENT'
else:
curval += tag[index]
else:
print 'invalid state in parseTag: %s' % state
if curtag: #an ending tag with NO final separator
uncleaned_tags.append(['ENDTAG', curtag, curval])
#clean uncleaned_tags and put in self.tags instead
for source, tagtype, tagdata in uncleaned_tags:
newtagtype = pdfobj.fixPound(tagtype)
if newtagtype != tagtype:
self.hiddenTags += 1
tagtype = newtagtype
#newlines in tagtype? ONLY for state != TAGVALCLOSED
if source != 'TAGVALCLOSED':
#its okay to replace newlines, spaces, tabs here
tagdata = re.sub('[\s\r\n]+', ' ', tagdata)
# You can have octal further in the string, but that can sometimes cause problems
# so if there is a problem, just back out and use the original
if re.search('([^\\\\]\\\\[0-9]{3}\s*)+', tagdata): #ie. need to convert \040 == 0x20
original = tagdata
try:
tagdata = re.sub('\\\\([0-9]{3})', lambda mo: chr(int(mo.group(1), 8)), tagdata)
except:
tagdata = original
# to my dismay, there are lot of tags to unescape
unescaped_tagdata = ''
backslash = False
for d in tagdata:
if backslash:
backslash = False
if d == 'b':
unescaped_tagdata += '\b'
elif d == 'f':
unescaped_tagdata += '\f'
elif d == 'n':
unescaped_tagdata += '\n'
elif d == 'r':
unescaped_tagdata += '\r'
elif d == 's':
unescaped_tagdata += 's' # this one is weird, I know
elif d == 't':
unescaped_tagdata += '\t'
elif d in ('(', ')', '\\'):
unescaped_tagdata += d
elif d == '\'' and tagtype == 'JS':
unescaped_tagdata += '\\\''
elif d == '\\':
backslash = True
else:
unescaped_tagdata += d
tagdata = unescaped_tagdata
#print 'set stream to %s; %s; %d bytes' % (source, tagtype, len(tagdata))
#sometimes it's a short snippet without a ; at the end. So add a ;
if len(tagdata) < 50 and tagdata.find('AFDate') != -1 and tagdata[-1] != ';':
tagdata += ';'
# Only really want the JavaScript, and then only when it's not in a unicode format
if not stream and \
(source == 'TAGVALCLOSED' or source == 'ENDTAG') and \
(tagtype == 'JS' or tagtype == 'JavaScript') and \
len(tagdata) > 2 and tagdata[0:2] != '\xfe\xff':
stream = tagdata
self.tags.append([source, tagtype, tagdata])
self.tagstream = stream
if olivia.DEBUG:
print 'obj %s: ' % (self.keynum)
for source, tagtype, tagdata in self.tags:
tagtxt = '\ttag %s' % re.sub('\n', '', tagtype)
if len(tagdata) > 30:
tagtxt += ' = [data %d bytes]' % len(tagdata)
elif tagdata:
tagtxt += ' = '
for c in tagdata:
if c in string.printable and c != '\n':
tagtxt += c
else:
tagtxt += '\\x%02x' % (ord(c))
print '%-50s (%s)' % (tagtxt, source)
#end
def parseChildren(self):
'''
Input: self.tags (must be populated)
Output: self.children
'''
for state, k, kval in self.tags:
hasRef = re.search('\+?(\d+)\s+\+?(\d+)\s+R', kval)
if hasRef:
objkey = hasRef.group(1) + ' ' + hasRef.group(2)
self.children.append([k, objkey])
if k == 'XFA':
kids = re.findall('(\d+\s+\d+)\s+R', kval)
for kid in kids:
self.xfaChildren.append([k, kid])
def parseObject(self):
#previously this was non-greedy, but js with '>>' does mess things up in that case
#to solve the problem, do both
#if olivia.DEBUG:
# print '\tstarting object len %d' % len(self.indata)
tags = re.findall('<<(.*)>>[\s\r\n%]*(?:stream[\s\r\n]*(.*?)[\r\n]*endstream)?', self.indata, re.MULTILINE | re.DOTALL | re.IGNORECASE)
if tags:
for tag, stream in tags:
gttag = tag.find('>>')
streamtag = tag.find('stream')
endstream_tag_end = self.indata.rfind('endstream')
endstream_tag_begin = self.indata.find('endstream')
#
# This means that there was an improper parsing because the tag shouldn't contain a stream object
if endstream_tag_end != -1 and 0 < gttag < streamtag:
# do this in case the word stream is in the tag data somewhere...
stream_location_match = re.search('>>[\s\r\n%]*stream?', self.indata, re.MULTILINE | re.DOTALL | re.IGNORECASE)
if stream_location_match:
stream_location = stream_location_match.start()
else:
stream_location = self.indata.find('stream')
stream_start = self.indata.find('stream', stream_location)
stream_match = re.search('stream[\s\r\n]*(.*?)[\r\n]*endstream', self.indata, re.MULTILINE | re.DOTALL | re.IGNORECASE)
stream_data = ''
# Only search to start of stream, a compressed stream can have >> in it, and that will through off the regex
tag_match = re.search('<<(.*)>>', self.indata[0:stream_start], re.MULTILINE | re.DOTALL | re.IGNORECASE)
if tag_match and stream_match:
stream_data = stream_match.group(1)
tag = tag_match.group(1)
tags = [(tag, stream_data)]
#
# This checks if the word endstream happens inside the stream
if endstream_tag_begin != -1 and endstream_tag_begin != endstream_tag_end:
stream_location_match = re.search('>>[\s\r\n%]*stream?', self.indata, re.MULTILINE | re.DOTALL | re.IGNORECASE)
if stream_location_match:
stream_location = stream_location_match.start()
else:
stream_location = self.indata.find('stream')
stream_start = self.indata.find('stream', stream_location)
stream_match = re.search('stream[\s\r\n]*(.*?)[\r\n]*endstream$', self.indata, re.MULTILINE | re.DOTALL | re.IGNORECASE)
tag_match = re.search('<<(.*)>>', self.indata[0:stream_start], re.MULTILINE | re.DOTALL | re.IGNORECASE)
stream_data = ''
if stream_match and tag_match:
stream_data = stream_match.group(1)
tag = tag_match.group(1)
tags = [(tag, stream_data)]
if not tags: #Error parsing object!
return
for tag, stream in tags:
self.parseTag(tag, stream)
self.parseChildren()
@staticmethod
def fixPound(i):
#returns '#3a' substituted with ':', etc
#strips newlines, '[', and ']' characters
#this allows indexing in arrays
i = re.sub('[\[\]\n]', '', i)
i = re.sub('<<$', '', i)
return re.sub('#([a-fA-F0-9]{2})', lambda mo: chr(int('0x' + mo.group(1), 0)), i)
@staticmethod
def lzwdecode(data):
try:
return ''.join(lzw.LZWDecoder(cStringIO.StringIO(data)).run())
except:
return data
@staticmethod
def rldecode(input):
output = ''
index = 0
try:
key_len = ord(input[index])
while key_len != 0x80:
index += 1
if key_len & 0x80:
output += input[index] * (256 - key_len + 1)
index += 1
else:
output += input[index:index + key_len + 1]
index += key_len + 1
key_len = ord(input[index])
except:
return input
return output
@staticmethod
def ascii85(input):
outdata = ''
input = re.sub('\s', '', input)
input = re.sub('^<~', '', input)
input = re.sub('~>$', '', input)
for i in range(0, len(input), 5):
bytes = input[i:i + 5]
fraglen = len(bytes)
if bytes[0] == 'z':
pass #ignore
if bytes[0] == 'y':
pass #ignore
if i + 5 >= len(input):
#data not divisible by 5
bytes = input[i:]
fraglen = len(bytes)
if fraglen > 1:
bytes += 'vvv'
total = 0
shift = 85 * 85 * 85 * 85
for c in bytes:
total += shift * (ord(c) - 33)
shift /= 85
if fraglen > 1:
outdata += chr((total >> 24) % 256)
if fraglen > 2:
outdata += chr((total >> 16) % 256)
if fraglen > 3:
outdata += chr((total >> 8) % 256)
if fraglen > 4:
outdata += chr((total) % 256)
return outdata
class olivia(object):
DEBUG = 0
def __init__(self, indata, infile, password=''):
self.indata = indata
self.size = len(self.indata)
self.infile = infile
self.objects = {}
self.pages = []
self.numPages = 0
self.list_obj = []
self.jsObjects = []
self.encrypt_key = ''
self.encrypt_key_valid = False
self.encrypt_object = {}
self.encrypt_password = password
self.xfaObjects = []
def parse(self):
'''
#parsing xref tables
xrefs = re.findall('xref\s*\n\d+\s+(\d+)\s*\n((\d+\s+\d+\s+[fn]\s*\n)+)\s*trailer\s*\n',self.indata)#.*?startxref\s*\n(\d+)\s*\n\s*%%EOF\s*',self.indata)
for entries, table,junk in xrefs:
entries = int(entries)
print 'entries=',entries
lines = table.split('\n')
for line in lines:
valid = re.match('\s*(\d+)\s+(\d+)\s+[fn]\s*',line)
if valid:
offset,zero = int(valid.group(1)), int(valid.group(2))
print 'line = ', offset, zero
#offset = int(offset)
'''
objs = re.findall('\n?(\d+)\s+(\d+)[\x00\s]+obj[\s]*(.*?)\s*\n?(?<!%)(endobj|.ndobj|e.dobj|en.obj|end.bj|endo.j|endob.|objend)', self.indata, re.MULTILINE | re.DOTALL)
if objs:
for obj in objs:
#fill all objects
key = obj[0] + ' ' + obj[1]
if not key in self.list_obj:
self.list_obj.append(key)
else: # There are cases with the two objects have the same number, because PDFs are awesome that way
key = key + ' dup'
self.list_obj.append(key)
self.objects[key] = pdfobj(key, obj[2])
trailers = re.findall('(trailer[\s\r\n]*<<(.*?)>>)', self.indata, re.MULTILINE | re.DOTALL)
for trailertags in trailers:
trailerData = trailertags[1]
#
# Check for a dictionary inside the trailer
#
isDict = trailerData.find("<<")
if isDict != -1:
offset = self.indata.find(trailertags[0])
trailerData = self.extractTrailerData(offset)
trailerstream = '' #no stream in trailer
trailerobj = pdfobj('trailer', '') #empty second parameter indicates not to do an object parse
trailerobj.parseTag(trailerData, trailerstream)
trailerobj.parseChildren()
key = 'trailer'
if not key in self.list_obj:
self.list_obj.append(key)
else: # There are cases with the two objects have the same number, because PDFs are awesome that way
key = key + ' dup'
self.list_obj.append(key)
self.objects[key] = trailerobj
for tag, value in trailerobj.children:
# If there is an encrypt object, it should be specified in the trailer
# (in practice, that's not always the case... *sigh*)
if tag == 'Encrypt' and not self.encrypt_key_valid:
# Make sure the encrypt object is actually there
if value in self.objects:
self.objects[value].isEncrypt = True
self.encrypt_object = self.populate_encrypt_object(self.objects[value])
fileId = ''
for state, tag, val in trailerobj.tags:
if tag == 'ID':
ids = re.findall('<([\d\w]*)>', val)
# Just in case the ID has something I'm not expecting
if ids:
try:
fileId = binascii.unhexlify(ids[0])
except:
pass
else:
fileId = val
# yay for default passwords
padding = binascii.unhexlify('28BF4E5E4E758A4164004E56FFFA01082E2E00B6D0683E802F0CA9FE6453697A')
# limit of 16 characters
passwd = (self.encrypt_password + padding)[0:32]
self.encrypt_key = self.compute_encrypt_key(self.encrypt_object, passwd, fileId)
self.encrypt_key_valid = self.validate_encrypt_key(self.encrypt_key, padding, fileId, self.encrypt_object)
break
# but wait, sometimes the encrypt object is not specified in the trailer, yet sometimes another
# object has it in it, so search for it now
if not self.encrypt_key_valid:
encrypt_object_key = ''
fileId = '\x00' * 16
for key in self.list_obj:
if key == 'trailer':
continue
for kstate, k, kval in self.objects[key].tags:
if k == 'Encrypt':
for child_type, child_key in self.objects[key].children:
if child_type == 'Encrypt':
self.objects[child_key].isEncrypt = True
encrypt_object_key = child_key
break
if k == 'ID':
ids = re.findall('\[([\d\w]*)\]', kval)
if ids:
firstId = ids[0]
# for some reason it's there twice...
firstId = firstId[0:len(firstId)/2]
try:
fileId = binascii.unhexlify(firstId)
except:
pass
if encrypt_object_key and fileId:
break
if encrypt_object_key and fileId: # we found it
self.encrypt_object = self.populate_encrypt_object(self.objects[encrypt_object_key])
padding = binascii.unhexlify('28BF4E5E4E758A4164004E56FFFA01082E2E00B6D0683E802F0CA9FE6453697A')
# limit of 32 characters here
passwd = (self.encrypt_password + padding)[0:32]
self.encrypt_key = self.compute_encrypt_key(self.encrypt_object, passwd, fileId)
if self.encrypt_object['V'] == 5 and self.encrypt_key != '\xca\x1e\xb0' and 'Perms' in self.encrypt_object:
aes = AES.new(self.encrypt_key, AES.MODE_ECB)
decryptedPerms = aes.decrypt(self.encrypt_object['Perms'])
if decryptedPerms[0:4] == self.encrypt_object['P'][0:4] and decryptedPerms[9:12] == 'adb':
self.encrypt_key_valid = True
else:
self.encrypt_key_valid = self.validate_encrypt_key(self.encrypt_key, padding, fileId, self.encrypt_object)
for key in self.list_obj: #sorted(self.objects.keys()):
#set object options
if self.encrypt_key and self.encrypt_key_valid:
if self.objects[key].tagstream and not self.objects[key].isEncrypt and not self.objects[key].isFromObjStream:
if self.encrypt_object['algorithm'] == 'RC4':
self.objects[key].tagstream = self.decryptRC4(self.objects[key].tagstream, key)
elif self.encrypt_object['algorithm'] == 'AES':
self.objects[key].tagstream = self.decryptAES(self.objects[key].tagstream, key)
self.objects[key].tagstreamModified = True
for kstate, k, kval in self.objects[key].tags:
if k == 'OpenAction':
# sometimes OpenAction is an array, so check for that
if not kval or kval[0] != '[':
self.objects[key].isDelayJS = True
for child_type, child_key in self.objects[key].children:
if child_type == 'OpenAction' and child_key in self.objects:
self.objects[child_key].isDelayJS = False # This isn't the JS, the children have it
for cState, cType, cValue in self.objects[child_key].tags:
if cType in ['JavaScript', 'JS']:
self.objects[child_key].isDelayJS = True
elif olivia.DEBUG:
print 'error: not a valid object for child (%s)' % (child_key)
if k in ['JavaScript', 'JS']:
self.objects[key].isJS = True
foundChildJs = False
for child_type, child_key in self.objects[key].children: # Is the JS with the children?
if child_key in self.objects and child_type in ['JS', 'JavaScript']:
self.objects[child_key].isJS = True
self.objects[key].isJS = False
if child_key not in self.jsObjects:
self.jsObjects.append(child_key)
foundChildJs = True
if not foundChildJs: # JS is here
if key not in self.jsObjects:
self.jsObjects.append(key)
if k == 'XFA':
self.objects[key].is_xfa = True
for xfaType, xfaKey in self.objects[key].xfaChildren:
if xfaKey in self.objects:
self.objects[xfaKey].is_xfaData = True
if k == 'NM':
self.objects[key].knownName = kval
if k == 'Subj':
self.objects[key].subj = kval
if k == 'EmbeddedFile':
self.objects[key].isEmbedded = True
if k == 'Annot':
#since JavaScript can call getAnnots() we must populate these entries now
#don't handle /Annots (precursory tag), children will contain Subj element
self.objects[key].isAnnot = True
for type, childkey in self.objects[key].children:
if childkey in self.objects and (type == 'Subj'):
self.objects[childkey].isAnnot = True
self.jsObjects.append(childkey)
if k == 'Page':
hasContents = False
for childtype, childkey in self.objects[key].children:
if childtype == 'Contents':
self.pages.append(childkey)
hasContents = True
if not hasContents:
self.pages.append(key)
if k == 'Pages':
for pagestate, pagetag, pagevalue in self.objects[key].tags:
if pagetag == 'Count':
try:
self.numPages += int(pagevalue)
except ValueError:
# Check children
for childtype, childkey in self.objects[key].children:
if childtype == 'Count':
pagevalue = self.objects[childkey].indata
try:
self.numPages += int(pagevalue)
except ValueError:
pass
#populate pdfobj's doc_properties with those that exist
enum_properties = ['Title', 'Author', 'Subject', 'Keywords', 'Creator', 'Producer', 'CreationDate', 'ModDate', 'plot']
if k in enum_properties:
value = kval
value = re.sub('[\xff\xfe\x00]', '', value)
isReference = re.match('^\s*\d+\s+\d+\s+R\s*$', value)
if isReference:
validReference = False
for child_type, child_key in self.objects[key].children:
if child_key in self.objects and (child_type == k):
validReference = True
self.objects[child_key].doc_properties.append(k.lower())
self.jsObjects.append(child_key)
if not validReference:
if olivia.DEBUG:
print '[warning] possible invalid reference in %s' % (k)
self.objects[key].doc_properties.append(k.lower())
else:
#not a reference, use the direct value
value = re.sub('\'', '\\x27', value)
self.objects[key].staticScript += 'info.%s = String(\'%s\');\n' % (k.lower(), olivia.do_hexAscii(value))
self.objects[key].staticScript += 'this.%s = info.%s;\n' % (k.lower(), k.lower())
self.objects[key].staticScript += 'info.%s = info.%s;\n' % (k, k.lower())
self.objects[key].staticScript += 'app.doc.%s = info.%s;\n' % (k.lower(), k.lower())
self.objects[key].staticScript += 'app.doc.%s = info.%s;\n' % (k, k.lower())
if k == 'CreationDate':
self.objects[key].staticScript += 'app.doc.creationDate = info.creationdate;\n'
self.objects[key].staticScript += 'info.creationDate = info.creationdate;\n'
if key not in self.jsObjects:
self.jsObjects.append(key)
for kstate, k, kval in self.objects[key].tags:
# Multiple filters, sometimes pound issues, throws off the decode, so handle it here
if k == 'Filter':
kval = pdfobj.fixPound(kval)
filters = re.findall('/(\w+)', kval)
if filters:
for filter in filters:
if filter == 'FlateDecode' or filter == 'Fl':
try:
self.objects[key].tagstream = zlib.decompress(self.objects[key].tagstream)
except zlib.error, msg:
if olivia.DEBUG:
print 'failed to decompress object %s (inlen %d)' % (key, len(self.objects[key].tagstream))
print self.objects[key].tagstream
self.objects[key].tagstream = '' #failed to decompress
if filter == 'ASCIIHexDecode' or filter == 'AHx':
result = ''
counter = 0
self.objects[key].tagstream = re.sub('[^a-fA-F0-9]+', '', self.objects[key].tagstream)
for i in range(0, len(self.objects[key].tagstream), 2):
result += chr(int('0x' + self.objects[key].tagstream[i:i + 2], 0))
self.objects[key].tagstream = result
if filter == 'ASCII85Decode' or filter == 'A85':
self.objects[key].tagstream = pdfobj.ascii85(self.objects[key].tagstream)
if filter == 'LZWDecode' or filter == 'LZW':
self.objects[key].tagstream = pdfobj.lzwdecode(self.objects[key].tagstream)
if filter == 'RunLengthDecode' or filter == 'RL':
self.objects[key].tagstream = pdfobj.rldecode(self.objects[key].tagstream)
if k == 'FlateDecode' or k == 'Fl':
try:
self.objects[key].tagstream = zlib.decompress(self.objects[key].tagstream)
except zlib.error, msg:
# There is a chance our regex removed too many \r or \n when pulling out the stream. We probably
# should fix this there, but in the mean time, if it fails, try adding them back.
lame_fixes = ["\n", "\r"]
lame_fix_worked = True
for lame_fix in lame_fixes:
try:
self.objects[key].tagstream = zlib.decompress(self.objects[key].tagstream+lame_fix)
lame_fix_worked = True
break
except zlib.error, msg:
pass
if not lame_fix_worked:
if olivia.DEBUG:
print 'failed to decompress object %s (inlen %d)' % (key, len(self.objects[key].tagstream))
print self.objects[key].tagstream
self.objects[key].tagstream = '' #failed to decompress
if k == 'ASCIIHexDecode' or k == 'AHx':
result = ''
counter = 0
self.objects[key].tagstream = re.sub('[^a-fA-F0-9]+', '', self.objects[key].tagstream)
for i in range(0, len(self.objects[key].tagstream), 2):
result += chr(int('0x' + self.objects[key].tagstream[i:i + 2], 0))
self.objects[key].tagstream = result
if k == 'ASCII85Decode' or k == 'A85':
self.objects[key].tagstream = pdfobj.ascii85(self.objects[key].tagstream)
if k == 'LZWDecode' or k == 'LZW':
self.objects[key].tagstream = pdfobj.lzwdecode(self.objects[key].tagstream)
if k == 'RunLengthDecode' or k == 'RL':
self.objects[key].tagstream = pdfobj.rldecode(self.objects[key].tagstream)
# Check for Object Streams, but only if we don't have an error with tagstream
if not self.objects[key].tagstreamError:
object_stream_data = ''
object_stream_n = 0
object_stream_first = 0
for kstate, k, kval in self.objects[key].tags:
if k == 'ObjStm':
object_stream_data = self.objects[key].tagstream
if k == 'N':
# just in case
try:
object_stream_n = int(kval)
except:
pass
if k == 'First':
# ...
try:
object_stream_first = int(kval)
except:
pass
if object_stream_data != '' and object_stream_n != 0 and object_stream_first != 0:
self.parse_object_stream(object_stream_data, object_stream_n, object_stream_first)
self.objects[key].tagstream = olivia.applyFilter(self.objects[key].tagstream)
if olivia.DEBUG and self.objects[key].tagstream.startswith('MZ'):
print 'PDF file has embedded MZ file'
else:
print 'Fatal error: pdf has no objects in ' + self.infile
def populate_encrypt_object(self, encrypt_object):
e = {}
e['V'] = 0
e['R'] = 0
e['O'] = ''
e['U'] = ''
for state, tag, value in encrypt_object.tags:
# Multiple lengths, referring to different things, take the bigger one, that *should* be right
if tag == 'Length' and 'Length' in e:
if int(value) > int(e[tag]):
e[tag] = value
continue
e[tag] = value
e['KeyLength'] = 5
if 'AESV2' in e or 'AESV3' in e:
e['algorithm'] = 'AES'
else:
e['algorithm'] = 'RC4'
if 'EncryptMetadata' in e:
if e['EncryptMetadata'].lower() == 'false':
e['EncryptMetadata'] = False
else:
e['EncryptMetadata'] = True
if 'V' in e:
e['V'] = int(e['V'])
if e['V'] >= 2 and 'Length' in e:
e['KeyLength'] = int(e['Length'])/8
if 'R' in e:
e['R'] = int(e['R'])
if e['R'] <= 4 and len(e['O']) > 32:
e['O'] = binascii.unhexlify(e['O'].strip())
if e['R'] <= 4 and len(e['U']) > 32:
e['U'] = binascii.unhexlify(e['U'].strip())
if 'P' in e:
e['P'] = struct.pack('L', int(e['P']) & 0xffffffff)
return e
def compute_encrypt_key(self, encrypt_object, password, fileId):
'''Computes the encrypt key based on values in encrypt object'''
if encrypt_object['R'] <= 4:
h = hashlib.md5()
h.update(password)
h.update(encrypt_object['O'])
h.update(encrypt_object['P'][0:4])
h.update(fileId)
if encrypt_object['R'] == 4 and not encrypt_object['EncryptMetadata']:
h.update("\xff\xff\xff\xff")
key = h.digest()[0:encrypt_object['KeyLength']]
if encrypt_object['R'] >= 3:
for i in range(50):
key = hashlib.md5(key[0:encrypt_object['KeyLength']]).digest()
key = key[0:encrypt_object['KeyLength']]
return key
elif encrypt_object['R'] == 5:
user_key = hashlib.sha256(encrypt_object['U'][32:40]).digest()
if user_key == encrypt_object['U'][0:32]: # success!
almost_key = hashlib.sha256(encrypt_object['U'][40:48]).digest()
aes = AES.new(almost_key, AES.MODE_CBC, '\x00'*16)
the_key = aes.decrypt(encrypt_object['UE'])
return the_key
#
# Ok, then check the owner password
#
owner_sha = hashlib.sha256()
owner_sha.update(encrypt_object['O'][32:40])
owner_sha.update(encrypt_object['U'][0:48])
owner_hash = owner_sha.digest()
if owner_hash == encrypt_object['O'][0:32]:
almost_hash = hashlib.sha256()
almost_hash.update(encrypt_object['O'][40:48])
almost_hash.update(encrypt_object['U'][0:48])
almost_key = almost_hash.digest()
aes = AES.new(almost_key, AES.MODE_CBC, '\x00'*16)
the_key = aes.decrypt(encrypt_object['OE'])
return the_key
else:
print "No good", encrypt_object['R']
return '\xca\x1e\xb0'
def validate_encrypt_key(self, key, password, fileId, encrypt_object):
'''Verifies that the encryption key is correct'''
if encrypt_object['R'] == 2:
rc4 = ARC4.new(key)
password_encrypted = rc4.encrypt(password)
if encrypt_object['U'] == password_encrypted:
return True
elif encrypt_object['R'] >= 3:
m = hashlib.md5()
m.update(password)
m.update(fileId)
cHash = m.digest()
rc4 = ARC4.new(key)
dHash = rc4.encrypt(cHash)
for i in range(1, 20):
newKey = ''
for k in key:
newKey += chr(ord(k) ^ i)
stepE = ARC4.new(newKey)
dHash = stepE.encrypt(dHash)
if dHash == encrypt_object['U'][0:16]:
return True
else:
print "No good", encrypt_object['R']
return False
def parse_object_stream(self, data, n, first):
integer_pairs = re.findall('(\d+) +(\d+)', data[0:first])
for i, pairs in enumerate(integer_pairs):
key = str(pairs[0]) + " 0"
start_offset = first + int(pairs[1])
if i+1 == n:
end_offset = None
else:
end_offset = first + int(integer_pairs[i+1][1])
obj_data = data[start_offset:end_offset]
if not key in self.list_obj:
self.list_obj.append(key)
else:
key = key + ' dup'
self.list_obj.append(key)
self.objects[key] = pdfobj(key, obj_data)
self.objects[key].isFromObjStream = True
return
def extractTrailerData(self, trailer_start):
dictionaries = 0
trailer_end = trailer_start
first_dictionary = False
while dictionaries != 0 or not first_dictionary:
d = self.indata[trailer_end:trailer_end+2]
if d == '<<':
first_dictionary = True
dictionaries += 1
trailer_end += 2
continue
elif d == '>>':
dictionaries -= 1
trailer_end += 2
continue
elif d == '':
break
trailer_end += 1
trailer = self.indata[trailer_start:trailer_end]
return trailer
def decryptRC4(self, data, key):
'''
Input: data is the data to decrypt, key is the obj information of the form '5 0'
Assumptions: self.encrypt_key is set
Output: returns string of decrypted data
'''
try:
obj, rev = key.split(' ')
keyLength = self.encrypt_object['KeyLength'] + 5
if keyLength > 16:
keyLength = 16
decrypt_key = hashlib.md5(self.encrypt_key + struct.pack('L', int(obj))[0:3] + struct.pack('L', int(rev))[0:2]).digest()[0:keyLength]
cipher = ARC4.new(decrypt_key)
return cipher.decrypt(data)
except:
return ''
def decryptAES(self, aes_data, objectKey):
'''Function that will take AES encrypted data and decrypt it'''
if self.encrypt_object['V'] <= 4:
try:
obj, rev = objectKey.split(' ')
keyLength = self.encrypt_object['KeyLength'] + 5
if keyLength > 16:
keyLength = 16
m = hashlib.md5()
m.update(self.encrypt_key)
m.update(struct.pack('L', int(obj))[0:3])
m.update(struct.pack('L', int(rev))[0:2])
m.update('\x73\x41\x6c\x54')
aes_key = m.digest()[0:keyLength]
iv = aes_data[0:16]
aes = AES.new(aes_key, AES.MODE_CBC, iv)
pad_size = 16 - (len(aes_data)%16)
pad = "C" * pad_size
data = aes.decrypt(aes_data[16:] + pad)[0:(pad_size*-1)]
return data
except Exception:
return ''
else:
try:
iv = aes_data[0:16]
aes = AES.new(self.encrypt_key, AES.MODE_CBC, iv)
pad_size = 16 - (len(aes_data)%16)
pad = "C" * pad_size
data = aes.decrypt(aes_data[16:] + pad)[0:(pad_size*-1)]
return data
except Exception:
return ''
def is_valid(self):
'''Determines if this is a valid PDF file or not'''
if 0 <= self.indata[0:1024].find('%PDF-') <= 1024:
return True
return False
def __repr__(self):
if not self.is_valid():
return 'Invalid PDF file "%s"' % (self.infile)
out = 'PDF file %s has %d obj items\n' % (self.infile, len(self.objects))
for obj in sorted(self.objects.keys()):
out += str(self.objects[obj]) + '\n'
return out
def get_javascript(self):
'''Extracts all JavaScript from the PDF'''
out = ''
sloppy_flag = False
for jskey in self.jsObjects:
if self.objects[jskey].tagstreamError:
continue
if self.objects[jskey].staticScript:
out += self.objects[jskey].staticScript
if self.objects[jskey].tagstream:
value = self.objects[jskey].tagstream
value = re.sub('\'', '\\x27', value)
# Sometimes there is just weird data there (or unicode), maybe getting rid of it helps
# (like below)
value = re.sub('[\x00-\x1f\x7f-\xff]', '', value)
if self.objects[jskey].isAnnot:
out += 'var zzza = []; if(zzzannot.length > 0){ zzza=zzzannot.pop(); } zzza.push({subject:\'%s\'}); zzzannot.push(zzza);\n' % (value) #getAnnots
if self.objects[jskey].knownName:
if self.objects[jskey].subj:
subj = self.objects[jskey].subj
else:
subj = value
subj = re.sub('[\x00-\x1f\x7f-\xff]', '', subj) # <- below
out += 'zzzannot2["%s"] = {subject:\'%s\'};\n' % (self.objects[jskey].knownName, subj) #getAnnot
for doc_prop in self.objects[jskey].doc_properties:
out += 'info.%s = String(\'%s\'); this.%s = info.%s;\n' % (doc_prop, olivia.do_hexAscii(value), doc_prop, doc_prop)
if self.pages:
for page in self.pages:
if page in self.objects:
lines = self.objects[page].tagstream.split('\n')
out += 'c = []; '
for line in lines:
text_be = re.findall('BT[^(]*\(([^)]+)\)[^)]*?ET', line)
for hexdata in text_be:
words = hexdata.strip().split(' ')
for word in words:
out += 'c.push("%s"); ' % (olivia.do_hexAscii(word))
out += 'zzzpages.push(c); this.numPages = zzzpages.length; xfa.host.numPages = zzzpages.length;\n'
else:
out += 'this.numPages = ' + str(self.numPages) + ';\n'
out += 'xfa.host.numPages = ' + str(self.numPages) + ';\n'
else:
out += 'c = []; '
out += 'zzzpages.push(c); this.numPages = zzzpages.length; xfa.host.numPages = zzzpages.length;\n'
out += '\nfilesize = ' + str(self.size) + ';\n'
if out:
out += '\n//jsunpack End PDF headers\n'
headersjs = out #split value into 2 return values [js, header_js]
out = ''
delayout = ''
for jskey in self.jsObjects:
if self.objects[jskey].tagstreamError:
continue
# only do it if no encryption or it was decrypted
if self.encrypt_key == '' or self.encrypt_key_valid == True:
if self.objects[jskey].isDelayJS: #do this first incase the tag has /OpenAction /JS (funct())
if olivia.DEBUG:
print 'Found JavaScript (delayed) in %s (%d bytes)' % (jskey, len(self.objects[jskey].tagstream))
delayout += self.objects[jskey].tagstream
elif self.objects[jskey].isJS:
if olivia.DEBUG:
print 'Found JavaScript in %s (%d bytes)' % (jskey, len(self.objects[jskey].tagstream))
#if jskey == '84 0':
# print self.objects[jskey].tagstream
if len(self.objects[jskey].tagstream) > 4 and self.objects[jskey].tagstream[3] != '\x00':
out += self.objects[jskey].tagstream
if out[-1] not in[';', '}']:
out += ';'
else:
temp_js = re.sub(r'([^\x00])\x0a', r'\1', self.objects[jskey].tagstream)
temp_js = re.sub(r'([^\x00])\x0d', r'\1', temp_js)
temp_js = re.sub('^([\x80-\xff])', '', temp_js)
temp_js = re.sub('([\x00-\x08\x0b\x0c\x0e-\x1f])', '', temp_js)
temp_js = re.sub('([\x80-\xff])', 'C', temp_js)
out += temp_js
if olivia.DEBUG:
if self.objects[jskey].isJS or self.objects[jskey].isDelayJS:
print '\tchildren ' + str(self.objects[jskey].children)
print '\ttags ' + str(self.objects[jskey].tags)
print '\tindata = ' + re.sub('[\n\x00-\x19\x7f-\xff]', '', self.objects[jskey].indata)[:100]
for key in self.list_obj:
if self.objects[key].is_xfa and (self.encrypt_key == '' or self.encrypt_key_valid):
xfa_data = ''
for xfa_type, xfa_key in self.objects[key].xfaChildren:
if xfa_key in self.list_obj:
xfa_data += self.objects[xfa_key].tagstream
# gets rid of some crap. But unicode will probably cause problems down the road
xfa_data = re.sub('^([\x80-\xff])', '', xfa_data)
xfa_data = re.sub('([\x00-\x08\x0b\x0c\x0e-\x1f])', '', xfa_data)
xfa_data = re.sub('([\x80-\xff])', 'C', xfa_data)
try:
doc = xml.dom.minidom.parseString(xfa_data)
except Exception as e:
print "drat", str(e)
continue
scriptElements = doc.getElementsByTagNameNS("*", "script")
if not scriptElements:
continue
for script in scriptElements:
if script.getAttribute('contentType') != 'application/x-javascript' or not script.childNodes:
continue
js = script.childNodes[0].data
# maybe?
if type(js) == unicode:
js = unicode(js).encode('utf-8')
dataForJs = ''
jsNode = script.parentNode.parentNode
jsName = jsNode.getAttribute('name')
if type(jsName) == unicode:
jsName = unicode(jsName).encode('utf-8')
dataElements = doc.getElementsByTagName(jsName)
if dataElements and dataElements[0].childNodes and dataElements[0].childNodes[0].nodeType == xml.dom.minidom.Node.TEXT_NODE:
dataForJs = dataElements[0].childNodes[0].data.replace('\n', '').replace('\r', '')
xfa_javascript = ''
if jsName:
xfa_javascript += jsName + "=this;\n"
xfa_javascript += 'var rawValue = "' + dataForJs.strip() + '";\n'
for k in jsNode.attributes.keys():
xfa_javascript += jsName + '.' + k + ' = "' + jsNode.getAttribute(k) + '";\n'
xfa_javascript += js + '\n'
if jsName:
xfa_javascript += 'print("<rawValue>" + ' + jsName + '.rawValue + "</rawValue>");\n'
out += xfa_javascript
if len(out + delayout) <= 0:
#Basically if we don't find ANY JavaScript, then we can parse the other elements
for jskey in self.objects.keys():
sloppy = re.search('function |var ', self.objects[jskey].tagstream)
if sloppy:
sloppy_flag = True
out += self.objects[jskey].tagstream
if olivia.DEBUG:
print 'Sloppy PDF parsing found %d bytes of JavaScript' % (len(out))
return re.sub('[\x00-\x08\x0b\x0c\x0e-\x1f\x80-\xff]', '', out + delayout), headersjs, sloppy_flag
@staticmethod
def do_hexAscii(input):
return re.sub('([^a-zA-Z0-9])', lambda m: '\\x%02x' % ord(m.group(1)), input)
@staticmethod
def applyFilter(data):
if len(data) > 10000000:
return data
for i in range(0, len(data)):
c = ord(data[i])
if 0 < c < 0x19 or 0x7f < c < 0xff or data[i] in ' \n\r':
pass #cut beginning non-ascii characters
else:
data = data[i:]
break
data = data[::-1] #reversed
for i in range(0, len(data)):
c = ord(data[i])
if 0 < c < 0x19 or 0x7f < c < 0xff or data[i] in ' \n\r':
pass #cut trailing non-ascii characters
else:
data = data[i:]
break
output = data[::-1]
#output = re.sub('^[\x00-\x19\x7f-\xff\n\s]*[\x00-\x19\x7f-\xff]','',input) #look for starting non-ascii characters
#output = re.sub('[\x00-\x19\x7f-\xff\s]+$','',output) #look for trailing non-ascii characters
return output
|
An innovative strategy for sustainably active oxygen capture using nitrogen (N2) instead of helium (He) as direct analysis in real-time (DART) gas is demonstrated in this work. DART MS was carried out to analyze different polarity compounds including organophosphorus pesticides, amino acids, hormones, and poly brominated diphenyl ethers by using He and N2 as DART gas, respectively. The unexpectedly characteristic ionization reactions, including replacement reaction where the sulfur atom of P=S group, were replaced by oxygen atom, oxidation ([M + nO + H]+ or [M + nO-H]− (n = 1, 2, 3, 4, 5)), and hydrogen loss (loss of two hydrogens) rapidly occurred in situ in the presence of N2 under ambient conditions without any additives. The reaction mechanisms were proposed and further confirmed by high-resolution tandem mass spectrometry. Our study under high temperature and high voltage provides a powerful tool for generating unique ionic species that may be difficult to form by other means, which also creates favorable conditions for the future study of the mechanism of DART MS.
The online version of this article ( https://doi.org/10.1007/s13361-019-02132-7) contains supplementary material, which is available to authorized users.
This work was supported by the Science and Technology Development Planning Project of Jilin Province (Nos. 201603080YY, 20170623026TC, 20160204027YY, 20160101220JC), Project of the Education Department of Jilin Province (No. JJKH20181274KJ), and Special Fund Project of Industrial Innovation in Jilin Province (No. 2017C056-2).
|
#!/usr/bin/python
import sdl2
import sdl2.sdlgfx
from sdl2 import surface
import pygame2
def scale(surface, size, dest_sprite=None, resample=0):
"""Scale an image using python's imaging library."""
if not pygame2.display.window:
raise Exception("Error: Window has not yet been created.")
sprite = surface.sprite
# Resize the image using PIL
try:
img = sprite.pil.resize(size, resample)
except AttributeError:
print "ERROR: This surface does not have a PIL object! Resizing image failed."
return surface
# Create an SDL2 surface from our sprite.
surface, pil_surface = pygame2.image.load_image(img)
# Create a new sprite from the surface.
scaled_sprite = pygame2.display.window.factory.from_surface(surface)
scaled_sprite.angle = sprite.angle
scaled_sprite.pil = pil_surface
# If we're using a software renderer, keep an original for rotation.
if pygame2.display.window.type == "software":
scaled_sprite.original = pygame2.display.window.factory.from_surface(surface, True)
else:
scaled_sprite.sw_sprite = pygame2.display.window.sw_factory.from_surface(surface, True)
image = pygame2.Surface(sprite=scaled_sprite)
return image
def copy(surface):
if not pygame2.display.window:
raise Exception("Error: Window has not yet been created.")
sprite = surface.sprite
# Resize the image using PIL
img = sprite.pil
# Create an SDL2 surface from our sprite.
surface, pil_surface = pygame2.image.load_image(img)
# Create a new sprite from the surface.
new_sprite = pygame2.display.window.factory.from_surface(surface)
new_sprite.angle = sprite.angle
new_sprite.pil = pil_surface
# If we're using a software renderer, keep an original for rotation.
if pygame2.display.window.type == "software":
new_sprite.original = pygame2.display.window.factory.from_surface(surface, True)
else:
new_sprite.sw_sprite = pygame2.display.window.sw_factory.from_surface(surface, True)
image = pygame2.Surface(sprite=new_sprite)
return image
|
Up until this year, I've taken the time to stamp and assemble my make & take projects from convention while at convention. This year however, things were rather fast-paced and we were warned that it would be possible to take part in everything. For me, that meant that I would skip making my projects at convention, and do them when I returned home.
Here they are and I had a wonderfully relaxing time making them.
This adorable bag uses the Tag a Bag gift bag bundle and the stamp set Tag It (which is the Ronald McDonald House Charities stamp set this year, $3 of each one goes to help this worthwhile charity). So quick and easy!
These quick Tag a Bag Gift boxes are so fun, and this project covered the top of the box with a stamped cover and then some quick embellishments. The stamp set(s) are Gorgeous Grunge and Hello, Lovely.
I should have taken a photo before I put the note cards inside, but this bag has 5 matching note cards w/envelopes inside of it.
This card and matching envelope were the final projects. I think that Gorgeous Grunge may just be a new favorite stamp set for me. In fact, I liked the card layout so much that I made the card below for my nephew's birthday this next month. Instead of using the button and more "girly" colors, I changed up the colors and used the arrow clip from the Hung Up Cute Clips assortment.
|
import zope.interface
import zope.schema
from zeit.cms.i18n import MessageFactory as _
class ICountStorage(zope.interface.Interface):
"""Central access to click counting.
This utility takes care of refreshing and caching today.xml.
"""
def get_count(unique_id):
"""Return access count for given unique id.
returns amount of hits (int) or None if nothing is known about the
given unique_id.
"""
def get_count_date(unique_id):
"""Return the date when the sample was taken."""
def __iter__():
"""Iterate over the stored unique_ids."""
LIFETIME_DAV_NAMESPACE = 'http://namespaces.zeit.de/CMS/lifetimecounter'
class ILifeTimeCounter(zope.interface.Interface):
"""Live time hit counter."""
total_hits = zope.schema.Int(
title=_('Total hits'),
description=_('Total hits between first and last count.'))
first_count = zope.schema.Date(
title=_('Date the first hit was counted on'))
last_count = zope.schema.Date(
title=_('Date the last hit was counted on'))
|
Keep up to date with the latest news and updates from across the UK.
Updates from across Tyne and Wear, Northumberland, County Durham and Teesside.
Updates from across Cheshire, Merseyside, Isle of Man, Greater Manchester, Cumbria and Lancashire.
Updates from across Essex, Hertfordshire, Cambridgeshire, Bedfordshire, Norfolk and Suffolk.
Updates from across Nottinghamshire, Leicestershire, Lincolnshire, Derbyshire and Northamptonshire.
Updates across Birmingham, the Black Country, Herefordshire, Shropshire, Staffordshire, Warwickshire and Worcestershire.
Updates from across Surrey, Kent and Sussex.
Updates from across Cornwall, Devon, Dorset, Somerset, Avon, Wiltshire and Gloucestershire.
Updates from across Hampshire, Berkshire, Oxfordshire, Buckinghamshire, the Isle of Wight and Channel Islands.
Updates from the capital, across all 32 Greater London boroughs.
Updates from across Northern Ireland.
Updates from Wales, across all regions. Y diweddara o Gymru, ar draws y rhanbarthau oll.
Read all our latest UK press releases and statements.
Life with cancer is still life. We can help you live it.
Find out about support groups, where to get information and how to get involved with Macmillan where you live.
If you or someone close to you has been diagnosed with cancer, we can help you. Find out what to expect, get information, practical advice and support, hear from experts and read about other people’s experiences.
|
#coding=utf-8
"""
@Brief build index of suggestion
@Author wmd
@Create 2015.11.05
"""
import sugg_conf
import sys
from pypinyin import lazy_pinyin
class SuggServer:
_h_tid2item = {}
_h_key2item = {}
_h_prefix2tids = {}
def is_chinese_char(uchar):
'''
check if chinese character
'''
if uchar >= u'u4e00' and uchar<=u'u9fa5':
return True
else:
return False
def load_item_set(self):
'''
load item-set
'''
ifilename = sugg_conf.FileOutputItemSet
sys.stderr.write('[trace] begin to build item-set from:%s\n' % (ifilename))
try:
ifile = open(ifilename, 'r')
except:
sys.stderr.write('[ERROR] cannot open file:%s\n' % ifilename)
sys.exit(-1)
line_no = 0
for line in ifile:
line_no += 1
fields = line.replace('\n', '').split('\t')
if len(fields) != 4:
sys.stderr.write('[ERROR] invalid fields-count:%d, not %d\n' % (len(fields), 4))
sys.exit(-1)
tid = int(fields[0])
text = fields[1]
score = int(fields[2])
att = fields[3]
item = {
'tid' : tid,
'text' : text,
'score' : score,
'att' : att,
}
key = '%s\t%s' % (text, att)
self._h_tid2item[tid] = item
self._h_key2item[key] = item
ifile.close()
sys.stderr.write('[trace] done:%s, %d lines\n' % (ifilename, line_no))
def load_prefix_index(self):
'''
load prefix-index-dict
'''
ifilename = '%s.prefix' % sugg_conf.FileOutput
sys.stderr.write('[trace] begin to load prefix-index from:%s\n' % (ifilename))
try:
ifile = open(ifilename, 'r')
except:
sys.stderr.write('[ERROR] cannot open file:%s\n' % ifilename)
sys.exit(-1)
line_no = 0
for line in ifile:
line_no += 1
fields = line.replace('\n', '').split('\t')
if len(fields) < 2:
sys.stderr.write('[ERROR] invalid fields-count:%d, < %d\n' % (len(fields), 2))
sys.exit(-1)
prefix = fields[0]
tids = []
for i in range(1, len(fields)):
tids.append(int(fields[i]))
self._h_prefix2tids[prefix] = tids
ifile.close()
sys.stderr.write('[trace] done:%s, %d lines\n' % (ifilename, line_no))
def load_index(self):
'''
load index-dicts
'''
self.load_item_set()
self.load_prefix_index()
def get_sugg(self, prefix):
'''
get suggestion-list according to a certain prefix
'''
sugg_info = {}
sugg_info['prefix'] = prefix
sugg_info['sugg'] = []
if len(prefix) == 0:
return sugg_info
py_flag = False
if not prefix in self._h_prefix2tids:
if sugg_conf.ifPY == 1:
py = lazy_pinyin(prefix.decode(sugg_conf.encoding))
py_str = (''.join(py)).encode(sugg_conf.encoding)
if not py_str in self._h_prefix2tids:
return sugg_info
### as an alternate, use py_str as prefix
prefix = py_str
py_flag = True
else:
return sugg_info
tids = self._h_prefix2tids[prefix]
for tid in tids:
if not tid in self._h_tid2item:
continue
item = self._h_tid2item[tid]
sugg_item = {
'text' : item['text'],
'score' : item['score'],
'att' : item['att'],
}
sugg_info['sugg'].append(sugg_item)
return sugg_info
def init(self):
'''
init
'''
sys.stderr.write('[trace] init\n')
self.load_index()
def run(self):
'''
dispatch commands
'''
if len(sys.argv) < 2:
sys.stderr.write('[ERROR] no command\n')
sys.exit(-1)
sys.stderr.write('[trace] begin to run command: %s\n' % sys.argv[1])
if sys.argv[1] == 'build_item_set':
self.build_item_set()
elif sys.argv[1] == 'gen_tag2tid':
self.load_item_set()
self.gen_tag2tid()
elif sys.argv[1] == 'gen_prefix2tid':
self.gen_prefix2tid()
else:
sys.stderr.write('[ERROR] unknown command: %s\n' % sys.argv[1])
sys.exit(-1)
sys.stderr.write('[trace] done.\n')
if __name__ == '__main__':
server = SuggServer()
server.init()
server.run()
|
Maybe it's easier to forget what the local jobs picture was just seven years ago, as a major U.S. recession and cutbacks at the Kennedy Space Center gripped Brevard County.
It was painful, and it looked like this in early 2010: Nearly 31,000 people had no jobs and the unemployment rate was 11.8 percent. The labor force had shrunk to 259,295.
Here's what it looks like Labor Day 2017: The unemployment rate is 4.4 percent and about 12,000 people, out of a work force of 273,698, are jobless.
The current local labor force is significantly higher than the peak of 265,336 reached in July 2006.
All of this is quite a turnaround for Brevard. And it's a jobs rebound driven mostly by higher-tech, higher-paying jobs in aerospace, aviation, engineering and manufacturing.
"This is a true economic development recovery story," said Lynda Weatherman, president and CEO of the Economic Development Commission of Florida's Space Coast. "I don't know where this is happening elsewhere."
If anything the local job picture shows a community that didn't lay down in times of adversity. Educators and community and business leaders in the area stubbornly stuck to a strategy of encouraging — and aggressively recruiting — key industries like aerospace and aviation which lined up with Brevard's moniker "The Space Coast."
Ironically, just last Thursday, the first graduating class from Eastern Florida State College’s new aviation maintenance technician program received their certificates from an FAA-approved airframe and power plant (A&P) program, based at the college’s Aviation Center at Orlando Melbourne International Airport.
"This is an incredible time to be coming into the job market," said Lanny Schott, director of Aviation Programs at EFSC, "because companies are searching for qualified employees like them."
The program at EFSC, as well as the strong aviation program at the Florida Institute of Technology in Melbourne, is designed to be a pipeline to provide skilled workers to the area's growing aviation presence. That includes companies like Embraer, AAR Airlift and STS Aviation Group. The latter recently acquired the assets of AeroMod International to launch a new business known as STS Mod Center at Orlando Melbourne International's 14,500 square foot,, state-of-the-art hangar facility.
"The aviation and aerospace industry is growing right outside my window air side," said Greg Donovan, executive director at Orlando Melbourne International. "We can’t add parking spaces fast enough to keep up with their growth."
Weatherman noted that high-tech landscape in Brevard has developed into two geographical book ends: Aviation and defense-related companies like Harris Corp. and Northrop Grumman Corp. in the southern end of Brevard, while the central and northern part of the counties are bulking up with aerospace ventures like Blue Orion, SpaceX and the satellite venture, OneWeb.
"It's starting to sizzle," Weatherman said, "People want to know what's happening here."
However, Weatherman noted it's not a time to breathe a sigh of relief because of the job gains. Other communities and other states are very hawkish in going after the same high-tech manufacturing industries that Brevard is pursuing.
And those areas are scoring some important victories.
In June, for example, Blue Origin, the spaceflight company started by Amazon founder Jeff Bezos, announced said it would manufacture its BE-4 engine in a $200 million, state-of-the art production facility to be built in Alabama’s Rocket City. Alabama officials reportedly offered economic incentives of up $39 million incentive package and in return will get about 350 high-tech manufacturing jobs.
One hope locally — and maybe it was a little far fetched — was that Bezos would have located the engine manufacturing operation on the Space Coast where it's building two versions of its New Glenn rockets.
"One thing about this industry," Weatherman said, "is that you can never, ever rest."
March 29, 2019, 7:36 p.m.
March 28, 2019, 6:40 p.m.
|
import sqlalchemy
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import mapper, sessionmaker
import subprocess
class PygrationState(object):
'''Python object representing the state table'''
def __init__(self, migration=None, step_id=None, step_name=None):
self.migration = migration
self.step_id = step_id
self.step_name = step_name
self.sequence = None
self.add_state = None
self.simdrop_state = None
self.drop_state = None
def __repr__(self):
return "<PygrationState(%s, %s)>" % (self.migration, self.step_id)
class Table(object):
metadata = sqlalchemy.MetaData()
engine = None
pygration_state = None
@classmethod
def define(cls, schema=None):
cls.pygration_state = sqlalchemy.Table('pygration_state', cls.metadata
, Column('migration', String(length=160), primary_key=True)
, Column('step_id', String(length=160), primary_key=True)
, Column('step_name', String(length=160))
, Column('sequence', Integer)
, Column('add_state', String(length=16))
, Column('simdrop_state', String(length=16))
, Column('drop_state', String(length=16))
, schema=schema
)
class FileLoader(object):
'''Object for running SQL from a file on the file system'''
def __init__(self, binary, args = [], formatting_dict = {}):
self._binary = binary
self._args = [arg.format(filename="{filename}", **formatting_dict) for arg in args]
def __call__(self, filename):
args = [arg.format(filename=filename) for arg in self._args]
print self._binary, args
subprocess.check_call([self._binary] + args)
def open(url=None, drivername=None, schema=None, username=None,
password=None, host=None, port=None, database=None, query=None):
"""Open the DB through a SQLAlchemy engine.
Returns an open session.
"""
if url is None and drivername is None:
raise Exception("Either a url or a driver name is required to open a db connection")
if url is None:
url = sqlalchemy.engine.url.URL(drivername = drivername,
username = username,
password = password,
host = host,
port = port,
database = database,
query = query)
Table.engine = sqlalchemy.create_engine(url)
Table.metadata.bind = Table.engine
Session = sessionmaker()
Session.configure(bind=Table.engine)
session = Session()
Table.define(schema)
mapper(PygrationState, Table.pygration_state)
return session
|
The Las Vegas strip generated $6.2 billion in gaming revenue in 2012, and the county overall generated $9.4 billion, notes the Las Vegas Convention and Visitors Authority. More than 39 million visitors took their hard-earned vacation time and hit Vegas for the time of their life last year. While many like to gamble, the city’s food scene is respected around the world, and its night life brings you up close and personal with celebrities and fellow Vegas visitors.
Once you realize how many great restaurants are in town, you may want to take that wad of cash meant for the craps table and double down on some exquisite cuisine.
Sen of Japan: There’s plenty of sushi in Vegas, but Sen is the Holy Grail of sushi. The prices are great, the selection is outstanding, and the service is world renowned. There is a traditional spread of sushi selections, but Sen also provides more unorthodox options on the menu.
Picasso: Picasso isn’t just a random name for this French restaurant. The entire restaurant is decorated with original Picasso paintings, making this a perfect destination for art lovers. Julian Serrano does a great job bringing new French cuisine to Vegas. The prix fixe dinners are pricey, but well worth it.
Michael’s: Michael’s prides itself on old school service, elegance, and up-scale Victorian decor. The menu satisfies on a number of levels, especially if you like seafood. It’s set apart from other restaurants with a variety of divine dishes that are prepared table side.
Even with the myriad restaurants and shows, gambling is the big draw in Vegas. After you wear yourself out shopping and lounging by the pool, head to a slot machine. Don’t know how to gamble? Ask your hotel concierge if the property offers free lessons. Or go online for additional resources.
The Bellagio: It’s famous for a reason. The hotel and casino goes over the top and is one of the most luxurious casinos available. The hotel is amazing, with every amenity you could want, and the stellar casino has every game imaginable.
The MGM Grand: Another iconic Las Vegas casino, the MGM Grand is the largest of them all. You’ll recognize it from afar by the lions at the entrance. You won’t want to leave once you see the 3,500 slot machines and 165 game tables. It also has plenty of on-site entertainment if you need a break from gambling.
Caesars Palace: You’ll feel like you stepped into ancient Rome with this casino. This low-key casino caters to every experience level, and it’s not nearly as intimidating as some of the other places in town.
There is no shortage of places to party in this town. The best night clubs are found in the larger hotels.
The 10AK at The Mirage Resort: This is the first stop for star-struck club goers, who have caught glimpses of celebrities like Kim Kardashian, Leonardo DiCaprio, and Paris Hilton. This high class club is part club and part art form, with two separate spaces and enough room for 1,500 people.
Andrea’s at Encore Las Vegas: Want Asian fusion food and a great lounge to keep you entertained all night? Head to this club, and you might just see George Clooney and Lady Gaga.
LAVO at The Palazzo Resort: The LAVO nightclub is part awesome Italian restaurant, and part club. You’ll see A-list celebrities like Rihanna and Jay-Z. The restaurant and lounge areas of LAVO are open 7 days a week. The club is open 5 days, but has some of the latest club hours in Vegas.
|
# post-process tex files, looking for
# "%% @..." directives
import sys
import codecs
import re
debug = 0
verbose=True
# define processors
class PostProcessor:
def process_line(self, line):
return (line, False)
class ReplaceNextEnvironment(PostProcessor):
def __init__(self, args):
self.opened1 = False
self.openenv = 0
self.target = args[0]
self.replacement = args[1]
self.targeto = re.compile(r'\\begin{' + args[0] + '}', re.U)
self.targetc = re.compile(r'\\end{' + args[0] + '}', re.U)
def process_line(self, line):
reso = self.targeto.match(line)
resc = self.targetc.match(line)
#print("RNE :: " + line + str(reso) + str(resc) +
#str(self.targeto.pattern))
res = (line, False)
if reso:
if not self.opened1:
self.opened1 = True
line = self.targeto.sub(r'\\begin{' + self.replacement +
'}',line)
res = (line, False)
self.openenv += 1
if resc:
if self.opened1:
self.openenv -= 1
if self.openenv == 0:
line = self.targetc.sub(r'\\end{' + self.replacement +
'}',line)
res = (line, True)
return res
# set up processor dict
processor_dict = dict()
processor_dict['replace-next-environment'] = ReplaceNextEnvironment
ppdirective_re = re.compile(r'^%% @(\S+) (.*)')
def main():
# announce
if verbose:
print('Python Pandoc Postprocessor')
# read args
args = sys.argv
if len(args) == 1:
raise SystemExit('No arguments supplied')
else:
infile = args[1]
# announce
if verbose:
print('\tProcessing: %s' % infile)
# read in file, lazy as hell
with codecs.open(infile, mode='r', encoding='utf8') as fin:
lines = [line.strip() for line in fin]
if debug > 0:
print(lines)
processors = list()
outlines = list()
for line in lines:
res = ppdirective_re.match(line)
if debug > 0:
print(line)
# check for new processors
if res:
directive = res.groups()[0]
dir_args = [x.strip() for x in (res.groups()[1]).split()]
processors.append(processor_dict[directive](dir_args))
if debug > 0:
print('\tDIRECTIVE: %s, ARGS: %s' % (directive, dir_args))
continue
elif debug > 1:
print('\t(NO DIRECTIVE)')
# run the processors
drop_processor = list()
for processor in processors:
res = processor.process_line(line)
line = res[0]
drop_processor.append(res[1])
if debug > 1:
print(" ==> " + str(line))
outlines.append(line)
if debug > 1:
print(processors)
# drop any finished processors
processors = [processors[i] for i in range(len(processors)) if not
drop_processor[i]]
if debug > 1:
print(processors)
# write everything out
if debug > 0:
print(outlines)
with codecs.open(infile, mode='w', encoding='utf8') as fout:
for line in outlines:
fout.write(line + "\n")
if verbose:
print('\tPPP done!')
if __name__ == "__main__":
main()
|
Seminars of Department of Psychiatry of 1st Faculty of Medicine and General Teaching Hospital take place in the library of Psychiatric Clinic at wendesday from 14:00 to 15:30 p.m. in general; they are available to the competent public.
Schedule of seminars (in Czech): click here.
Page ranking: (by 1 user) it's not good minor deficiencies good very good excellent thanks for rating If you think the article is not up-to-date, click here.
|
import asyncio
from gi.repository import GLib
import threading
from typing import MutableMapping
import sys
import weakref
from ._loop import GLibEventLoop
__all__ = [
'GLibEventLoopPolicy',
]
class GLibEventLoopPolicy(asyncio.AbstractEventLoopPolicy):
class _ThreadLocalVariable(threading.local):
value = None
def __init__(self) -> None:
self._set_loops_lock = threading.Lock()
self._set_loops = weakref.WeakValueDictionary() # type: MutableMapping[GLib.MainContext, GLibEventLoop]
self._last_accessed_loop = __class__._ThreadLocalVariable()
def get_event_loop(self) -> GLibEventLoop:
context = self._get_current_context()
with self._set_loops_lock:
try:
loop = self._set_loops[context]
except KeyError:
loop = GLibEventLoop(context)
self._set_loops[context] = loop
self._last_accessed_loop.value = loop
return loop
def set_event_loop(self, loop: GLibEventLoop) -> None:
context = self._get_current_context()
if loop.context != context:
raise ValueError("Loop has a different context")
with self._set_loops_lock:
self._set_loops[context] = loop
self._last_accessed_loop.value = loop
def new_event_loop(self) -> GLibEventLoop:
context = self._get_current_context()
return GLibEventLoop(context)
def _get_current_context(self) -> GLib.MainContext:
default_context = GLib.MainContext.get_thread_default()
if default_context is None:
default_context = GLib.MainContext.default()
return default_context
if sys.platform != 'win32':
def get_child_watcher(self) -> asyncio.AbstractChildWatcher:
raise NotImplementedError
def set_child_watcher(self, watcher: asyncio.AbstractChildWatcher) -> None:
raise NotImplementedError
|
The Samsung Galaxy S4 is rumored to be heading to shelves 2013 to replace the Samsung Galaxy S3 – Samsung’s best-selling smartphone. As Samsung Galaxy S3 have already picked up the T3 Gadget Award for Best Smartphone of the Year 2012 and regularly tops lists of the best smartphones available on the market, will its successor – Samsung Galaxy S4 to be more fantastic? This much awaited new phone is being talked hotly these days. On considering this, we’ve prepared a brief summary about the latest news, rumors and speculations about the Samsung Galaxy S4.
Galaxy S4 Rumored launch date: Samsung Galaxy S4 launch date rumored to be once again announced at Mobile World Congress 2013 which is set to kick off in Barcelona at the end of February. As it is a perfect time to keep the Galaxy series fresh in consumer’s minds. Another rumor has suggested that Samsung might be announcing the Galaxy S4, in April. If true, it would mean that Samsung would likely be using its own event for the announcement.
Galaxy S4 Rumored display: Samsung Galaxy S4 is heavily rumored to be coming with a 1080p unbreakable display at a size rumored to be either 5-inches or 4.99-inches. And Samsung is predicted to showcase this brand new AMOLED display with 441 pixels-per-inch at CES 2013 in Las Vegas.
Galaxy S4 Rumored processor: Rumors about many of the Galaxy S4 upcoming specifications remain firmly in the darkness though there are a few that have seemingly emerged from the shadows. The Galaxy S4 is rumored to have a quad-core processor, possibly of the Exynos 5440 variety.
Galaxy S4 Rumored camera: the Galaxy S4 is rumored to have a 13MP rear camera, which would be up from the 8MP camera found in the Galaxy S3.
Galaxy S4 Rumored OS: one report suggests that the Samsung Galaxy S4 will arrive with Android 5.0 Key Lime Pie on board, which is Google’s rumored name for its latest piece of Android software.
Galaxy S4 Rumored price: It is rumored that smartphone will cost just like its predecessor, while we can expect a slash on the prices of Samsung Galaxy S3.
Samsung will look to keep pace with the Galaxy S4 and will do battle with not only the iPhone 5 but the iPhone 5S as well. Though there is no official comment from the South Korea’s tech giant, we believe the launch of Samsung Galaxy S4 would surely send its rival scurrying away in fear. Will it become another myth in Samsung history? Let’s just stay tuned for its coming!
|
# coding: utf-8
from flask import Blueprint, g, request, flash
from flask import render_template, redirect, abort, jsonify
from flask import url_for
from flask.ext.babel import gettext as _
from ..helpers import require_user, force_int, limit_request
from ..models import Node, Topic, Reply, Account
from ..models import fill_topics, fill_with_users
from ..forms import TopicForm, ReplyForm
__all__ = ['bp']
bp = Blueprint('topic', __name__)
@bp.route('/')
def topics():
"""
The topics list page.
"""
page = force_int(request.args.get('page', 1), 0)
if not page:
return abort(404)
paginator = Topic.query.order_by(Topic.updated.desc()).paginate(page)
paginator.items = fill_topics(paginator.items)
return render_template('topic/topics.html', paginator=paginator,
endpoint='topic.topics')
@bp.route('/latest')
def latest():
"""
Topics ordered by created time.
"""
page = force_int(request.args.get('page', 1), 0)
if not page:
return abort(404)
paginator = Topic.query.order_by(Topic.id.desc()).paginate(page)
paginator.items = fill_topics(paginator.items)
return render_template('topic/topics.html', paginator=paginator,
endpoint='topic.latest')
@bp.route('/desert')
def desert():
"""
Topics without any replies.
"""
page = force_int(request.args.get('page', 1), 0)
if not page:
return abort(404)
paginator = Topic.query.filter_by(
reply_count=0).order_by(Topic.id.desc()).paginate(page)
paginator.items = fill_topics(paginator.items)
return render_template('topic/topics.html', paginator=paginator,
endpoint='topic.desert')
@bp.route('/create/<urlname>', methods=['GET', 'POST'])
@require_user
def create(urlname):
"""
Create a topic in the node by an activated user.
:param urlname: the urlname of the Node model
"""
node = Node.query.filter_by(urlname=urlname).first_or_404()
if node.role == 'staff' and not g.user.is_staff:
flash(_('You have no permission in this node.'), 'warn')
return redirect(url_for('node.view', urlname=urlname))
if node.role == 'admin' and not g.user.is_admin:
flash(_('You have no permission in this node.'), 'warn')
return redirect(url_for('node.view', urlname=urlname))
form = TopicForm()
if form.validate_on_submit():
topic = form.save(g.user, node)
return redirect(url_for('.view', uid=topic.id))
return render_template('topic/create.html', node=node, form=form)
@bp.route('/<int:uid>', methods=['GET', 'POST'])
def view(uid):
"""
View a topic with the given id.
:param uid: the id of a topic.
"""
if request.method == 'POST':
# record hits
topic = Topic.query.get_or_404(uid)
topic.hits += 1
topic.save()
return jsonify(hits=topic.hits)
page = force_int(request.args.get('page', 1), 0)
if not page:
return abort(404)
topic = Topic.query.get_or_404(uid)
node = Node.query.get_or_404(topic.node_id)
author = Account.query.get_or_404(topic.account_id)
paginator = Reply.query.filter_by(topic_id=uid).paginate(page)
paginator.items = fill_with_users(paginator.items)
form = None
if g.user:
form = ReplyForm()
return render_template(
'topic/view.html', topic=topic, node=node, author=author,
form=form, paginator=paginator
)
@bp.route('/<int:uid>/edit', methods=['GET', 'POST'])
@require_user
def edit(uid):
"""
Edit a topic by the topic author.
:param uid: the id of the topic
"""
topic = Topic.query.get_or_404(uid)
form = TopicForm(obj=topic)
if form.validate_on_submit():
form.populate_obj(topic)
topic.save()
return redirect(url_for('.view', uid=uid))
return render_template('topic/edit.html', topic=topic, form=form)
@bp.route('/<int:uid>/delete', methods=['POST'])
@require_user
def delete(uid):
"""
Delete a topic by the topic author.
"""
#TODO: should we delete the replies of the topic?
password = request.form.get('password')
if not password:
flash(_('Password is required to delete a topic'), 'info')
return redirect(url_for('.view', uid=uid))
if not g.user.check_password(password):
flash(_('Password is wrong'), 'error')
return redirect(url_for('.view', uid=uid))
topic = Topic.query.get_or_404(uid)
topic.delete()
return redirect(url_for('.topics'))
@bp.route('/<int:uid>/move', methods=['GET', 'POST'])
@require_user
def move(uid):
"""
Move a topic to another node.
:param uid: the id of the topic
"""
topic = Topic.query.get_or_404(uid)
if g.user.id != topic.account_id and not g.user.is_staff:
return abort(403)
if request.method == 'GET':
return render_template('topic/move.html', topic=topic)
urlname = request.form.get('node', None)
if not urlname:
return redirect(url_for('.view', uid=uid))
node = Node.query.filter_by(urlname=urlname).first()
if node:
topic.move(node)
flash(_('Move topic success.'), 'success')
else:
flash(_('Node not found.'), 'error')
return redirect(url_for('.view', uid=uid))
@bp.route('/<int:uid>/reply', methods=['POST', 'DELETE'])
@limit_request(5, redirect_url=lambda uid: url_for('.view', uid=uid))
@require_user
def reply(uid):
"""
Reply of the given topic.
* POST: it will create a reply
* DELETE: it will delete a reply
Delete should pass an arg of the reply id, and it can be only deleted
by the reply author or the staff members.
:param uid: the id of the topic
"""
if request.method == 'DELETE':
reply_id = force_int(request.args.get('reply', 0), 0)
if not reply_id:
return abort(404)
reply = Reply.query.get_or_404(reply_id)
if not reply:
return abort(404)
if reply.topic_id != uid:
return abort(404)
if g.user.is_staff or g.user.id == reply.account_id:
reply.delete()
return jsonify(status='success')
return abort(403)
topic = Topic.query.get_or_404(uid)
form = ReplyForm()
if form.validate_on_submit():
form.save(g.user, topic)
else:
flash(_('Missing content'), 'error')
return redirect(url_for('.view', uid=uid))
|
Go out. Get drunk. Dodgy stripper in the pub. Fall over in the street. Who doesn’t love a good Stag Do?!
White high quality ladies tee shirt (we don't do cheap t-shirts!) made from 100% pre-shrunk ring-spun cotton, with Red and Black vinyl printed design. Medium fit.
|
# Postr, a Flickr Uploader
#
# Copyright (C) 2006-2008 Ross Burton <ross@burtonini.com>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# St, Fifth Floor, Boston, MA 02110-1301 USA
import os
from gi.repository import Gtk, GdkPixbuf
import bsddb3
from twisted.web.client import getPage
from twisted.internet import defer
from twisted.python import log
def greek(size):
"""Take a quantity (like 1873627) and display it in a human-readable rounded
form (like 1.8M)"""
_abbrevs = [
(1 << 50L, 'P'),
(1 << 40L, 'T'),
(1 << 30L, 'G'),
(1 << 20L, 'M'),
(1 << 10L, 'k'),
(1, '')
]
for factor, suffix in _abbrevs:
if size > factor:
break
return "%.1f%s" % (float(size) / factor, suffix)
def get_widget_checked(glade, name):
"""Get widget name from glade, and if it doesn't exist raise an exception
instead of returning None."""
widget = glade.get_object(name)
if widget is None: raise "Cannot find widget %s" % name
return widget
def get_glade_widgets (glade, object, widget_names):
"""Get the widgets in the list widget_names from the GladeXML object glade
and set them as attributes on object."""
for name in widget_names:
setattr(object, name, get_widget_checked(glade, name))
def get_thumb_size(srcw, srch, dstw, dsth):
"""Scale scrw x srch to an dimensions with the same ratio that fits as
closely as possible to dstw x dsth."""
scalew = dstw / float(srcw)
scaleh = dsth / float(srch)
scale = min(scalew, scaleh)
return (int(srcw * scale), int(srch * scale))
def align_labels(glade, names):
"""Add the list of widgets identified by names in glade to a horizontal
sizegroup."""
group = Gtk.SizeGroup()
group.set_mode(Gtk.SizeGroupMode.HORIZONTAL)
widget = [group.add_widget(get_widget_checked(glade, name)) for name in names]
__buddy_cache = None
def get_buddyicon(flickr, data, size=48):
"""Lookup the buddyicon from the data in @data using @flickr and resize it
to @size pixels."""
global __buddy_cache
if __buddy_cache is None:
folder = os.path.join (get_cache_path(), "postr")
if not os.path.exists(folder):
os.makedirs(folder)
path = os.path.join (folder, "buddyicons")
try:
__buddy_cache = bsddb3.hashopen(path, "c")
except bsddb3.db.DBInvalidArgError:
# The database needs upgrading, so delete it
os.remove(path)
__buddy_cache = bsddb3.hashopen(path, "c")
def load_thumb(page, size):
loader = GdkPixbuf.PixbufLoader()
loader.set_size (size, size)
loader.write(page)
loader.close()
return loader.get_pixbuf()
def got_data(page, url, size):
__buddy_cache[url] = page
return load_thumb(page, size)
if int(data.get("iconfarm")) > 0:
url = "http://farm%s.static.flickr.com/%s/buddyicons/%s.jpg" % (data.get("iconfarm"), data.get("iconserver"), data.get("nsid"))
else:
url = "http://www.flickr.com/images/buddyicon.jpg"
if __buddy_cache.has_key(url):
return defer.execute(load_thumb, __buddy_cache[url], size)
else:
deferred = getPage(url)
deferred.addCallback(got_data, url, size)
deferred.addErrback(log.err)
return deferred
def get_cache_path():
"""Return the location of the XDG cache directory."""
return os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache/"))
|
This cosmetic pigmentation treatment is delicate and gentle to scalp and hair however produces a decisive and stable color, rich in splendid, intense, long lasting shades and tones.
1. Mix product with oxidizing agent according to the required degree. The basic ratio for extremely damaged hair is 1:1.
2. Apply the mixture to hair. Standard development time 30-40 minutes.
Caution: This is a chemical product,keep out of the reach of children.Use only as directed.External use only. Avoid contact with eyes, wash with water if it reaches eyes. This product is for professional use.
|
'''
Created on 15 Apr 2018
@author: si
'''
from datetime import datetime
from flask import Flask, render_template, current_app, abort, request, make_response, redirect
from flask_sqlalchemy import SQLAlchemy
from pi_fly.actional.abstract import CommsMessage
from pi_fly.devices.abstract import AbstractSensor
from pi_fly.model import Sensor
from pi_fly.web_sessions import valid_session, session_token_create, SESSION_COOKIE_NAME
db = SQLAlchemy()
def create_app(profiles_class, scoreboard):
"""
:param profiles_class (str or class) to Flask settings
:param scoreboard instance of :class:`pi_fly.scoreboard.ScoreBoard`
"""
app = Flask(__name__)
app.config.from_object(profiles_class)
db.init_app(app)
app.sensor_scoreboard = scoreboard
@app.route('/')
def dashboard():
hot_water_sensor_id = "28-0015231007ee"
last_reading = db.session.query(Sensor)\
.order_by(Sensor.last_updated.desc())\
.filter(Sensor.sensor_id == hot_water_sensor_id)\
.first()
if last_reading is None:
return render_template("user_message.html", **{'msg': 'No sensor readings in DB.'})
d = datetime.utcnow() - last_reading.last_updated
minutes_since_reading = d.total_seconds() / 60.
page_vars = {'collection_failure': minutes_since_reading > 10.,
'water_temp': last_reading.value_float,
'bath_possible': last_reading.value_float > 45.,
'last_read_at': last_reading.last_updated,
}
return render_template("dashboard.html", **page_vars)
@app.route('/sensor_scoreboard/')
def sensor_scoreboard():
"""
Show the current values for all input devices in the profile.
"""
# consolidate all sensors on the scoreboard with all input devices listed in
# the profile. Give a warning message when these don't tally.
sensor_values = {k: v for k, v in current_app.sensor_scoreboard.get_all_current_values()}
p = {} # sensor name => {'display_value': '', 'display_class': ''}
for input_device in current_app.config['INPUT_DEVICES']:
assert isinstance(input_device, AbstractSensor)
if input_device.name in sensor_values:
v = sensor_values[input_device.name]
dv = v['value_type'] + ':' + str(v['value_float'])
display = {'display_value': dv,
'display_class': '',
}
p[input_device.name] = display
actional_names = [a.name for a in current_app.config['ACTIONALS']]
for name, values in sensor_values.items():
if name not in p and name not in actional_names:
# in scoreboard but not in config??
p[name] = {'display_value': str(values),
'display_class': 'WARNING',
}
page_vars = dict(sensors=p)
return render_template("sensor_scoreboard.html", **page_vars)
@app.route('/run_command/', methods=['GET', 'POST'])
@valid_session()
def run_actional_command():
"""
GET lists available commands
POST Sends a user selected command to an actional
"""
ac_command = {}
for ac in current_app.config['ACTIONALS']:
ac_command[ac.name] = []
for command_template in ac.available_commands:
cmd_summary = (command_template.command, command_template.description)
ac_command[ac.name].append(cmd_summary)
page_vars = {'actionals_with_commands': ac_command}
if request.method == 'POST':
target_actional = request.values.get('actional_name', None)
target_command = request.values.get('command', None)
if target_actional not in ac_command:
abort(400, "Unknown actional {}".format(target_actional))
try:
actional_comms = scoreboard.get_current_value(target_actional)['comms']
except KeyError:
abort(500, "Actional not found in the scoreboard")
actional_comms.send(CommsMessage(action="command", message=target_command))
msg = "Running....{} .. {}".format(target_actional, target_command)
page_vars['message'] = msg
return render_template("run_command.html", **page_vars)
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
secret_key = current_app.config['SESSION_PASSWORD']
if secret_key is None:
return render_template("user_message.html",
**{'msg': "secret_key hasn't been set."}
), 500
if request.values.get('password', '') != secret_key:
return "Incorrect password", 401
next_hop = request.values.get('next', '')
assert '@' not in next_hop
location = request.host_url + next_hop
response = make_response(redirect(location, code=302))
response.set_cookie(SESSION_COOKIE_NAME,
value=session_token_create(secret_key),
max_age=60 * 60 * 24 * 100, # 100 days
httponly=True
)
return response
return render_template("login.html")
return app
|
Flying the Friendly Skies | Are You Kidding Me?
Mark and I talked my brother David and his wife Marietta into joining us in Bermuda to celebrate their 10th anniversary. They agreed and rushed to get their passports renewed. With less than a week to go, their passports arrived.
Early morning on the day of departure, we headed to the airport–tired, but excited. To “save time” we went to a cramped, kiosk-only check-in area. We swiped the first passport–no problem. Then the second… reader error. We had to type in the passport #, date of birth and expiration date. Done. We swiped the third… reader error… type, type, type. The fourth… reader error… ugh!
Check-in complete, Mark and I crowded into the two-foot gap between kiosks to check our bag while David and Marietta hung back. The attendant had obviously skipped her morning coffee or had gotten less sleep than we did (i.e. she seemed extremely cranky).
“You’re going to Bermuda?” she asked. Maybe she was just jealous!
“D. Wachowski?” she asked Mark.
“Okay.” She put the tag on Mark’s bag and grabbed the next bag tag from the printer.
Mark’s expression said all too clearly–Are you kidding me?–but he simply pointed to his now tagged bag.
Mark pointed to David’s bag behind us and motioned for David and Marietta to join us.
My heart sank. What could possibly be wrong with a government issued passport? After a stunned silence, we managed to drag the reason out of her.
Are you kidding me?? Couldn’t she have said that in the first place?
LOL. Great title and a well written story about an experience to which we can all relate. Airports ! You should try it deaf.
|
from tkinter import *
class App(Frame):
def __init__(self, master):
super().__init__(master)
self.pack(padx=20, pady=20)
self.create_widgets()
def create_widgets(self):
Label(self, text="Name").grid(row=0, column=0, sticky=E)
self.name = Entry(self, width=10)
self.name.grid(row=0, column=1)
Label(self, text="Email").grid(row=1, column=0, sticky=E)
self.email = Entry(self, width=10)
self.email.grid(row=1, column=1)
Label(self, text="@smash.ac.kr").grid(row=1, column=2, sticky=W)
self.sex = StringVar()
self.sex.set(None)
Label(self, text="Sex").grid(row=2, column=0, sticky=E)
Radiobutton(self, text='male',
variable=self.sex, value='male'
).grid(row=2, column=1)
Radiobutton(self, text='female',
variable=self.sex, value='female'
).grid(row=2, column=2, sticky=W)
Label(self, text="Favorites").grid(row=3, column=1)
self.lagers = BooleanVar()
Checkbutton(self, text="Lager", variable=self.lagers
).grid(row=4, column=0)
self.wheetbeer = BooleanVar()
Checkbutton(self, text="Wheet Beer", variable=self.wheetbeer
).grid(row=4, column=1)
self.pilsners = BooleanVar()
Checkbutton(self, text="Pilsner", variable=self.pilsners
).grid(row=4, column=2)
self.paleales = BooleanVar()
Checkbutton(self, text="Pale Ale", variable=self.paleales
).grid(row=5, column=0)
self.indiapaleales = BooleanVar()
Checkbutton(self, text="India Pale Ale", variable=self.indiapaleales
).grid(row=5, column=1)
self.stouts = BooleanVar()
Checkbutton(self, text="Stout", variable=self.stouts
).grid(row=5, column=2)
Button(self, text="Register",
command=self.write_summary
).grid(row=6, column=0, columnspan=3, sticky=S)
self.summary = Text(self, width=48, height=10, wrap=WORD)
self.summary.grid(row=7, column=0, columnspan=3, sticky=S)
Button(self, text="Quit", command=self.quit
).grid(row=8, column=0, columnspan=3)
def write_summary(self):
summary = "Name: " + self.name.get() + "\n"
summary += "Email: " + self.email.get() + "@smash.ac.kr\n"
summary += "Sex: " + self.sex.get() + "\n"
summary += "Favorites are: "
if self.lagers.get():
summary += "Lagers, "
if self.wheetbeer.get():
summary += "Wheet Beers, "
if self.pilsners.get():
summary += "Pilsners, "
if self.paleales.get():
summary += "Pale Ales, "
if self.indiapaleales.get():
summary += "India Pale Ales, "
if self.stouts.get():
summary += "Stouts, "
summary += "..."
self.summary.delete(0.0, END)
self.summary.insert(0.0, summary)
# main
root = Tk()
root.title("SMaSH Beer Club")
root.geometry("400x420")
App(root)
root.mainloop()
|
It is a common misconception that once we’ve designed and built a website, that it will work flawlessly forever. Unfortunately, it won’t, not unless it is maintained, updated, backed up and scanned for malware on a regular basis. Websites break all the time, usually because something about their environment has changed, and the website must be changed to adapt and continue to function normally.
Malicious software is becoming a bigger problem with each passing year, and it is growing at an increasingly faster pace than ever before. In fact, last year there were 74,000 new computer viruses created every single day on average. You site needs regular maintenance, wether you know it or not. Our Web Maintenance Plans provide your site with every thing it needs to remain healthy and safe.
FAQ - Got questions? We got answers!
It is a common misconception that once we’ve designed and built a website, that it will work flawlessly forever. It won’t, not unless it is maintained and managed on a regular basis. Websites break all the time, usually because something about their environment has changed, and the website must be changed to adapt and continue to function normally.
What does the FREE WEB DESIGN include?
The FREE WEB DESIGN included with our our Web Management Plans, gives you up to 5 pages, 1 home page slider/banner, blog integration, a contact form, and social media links. The site is built on the WordPress platform which also gives you the ability to make changes to your site yourself. Additional pages and options extra.
Yes. If you purchase your Web Design and then decide to add a Monthly Management Plan, your monthly fees will be $10 less than the regular monthly fees.
Can I keep my site if I cancel my Web Management Plan?
Our Web Management Plans include a free Web Design for the life of the plan. If your Web Management plan is cancelled and you would like to keep your web design, you will need to purchase the web design.
|
from itertools import zip_longest
import threading
import time
from ._vendor.Adafruit_CharLCD import Adafruit_CharLCDPlate, SELECT, RIGHT, DOWN, UP, LEFT
def noop():
pass
class LCDButtons:
_lcd = Adafruit_CharLCDPlate()
_io_lock = threading.Lock()
def __init__(self, *, on_up=noop, on_down=noop, on_left=noop, on_right=noop, on_select=noop):
# Make the equality check fail for every character during the initial update
self._old_rows = [
[object()] * 15,
[object()] * 15,
]
self._cursor_col = -1
self._cursor_row = -1
self._on_up = on_up
self._on_down = on_down
self._on_left = on_left
self._on_right = on_right
self._on_select = on_select
self._thread = threading.Thread(target=self._button_thread_watcher,
name='button poller')
self._thread.start()
def _render(self, *, pid):
def fmt(temp):
if temp is not None:
return '{0:.1f}'.format(temp)
else:
return '-'
return [
'{current} / {target} C'.format(current=fmt(pid.temperature_current), target=fmt(pid.temperature_target)),
'{} %'.format(int(pid.duty_cycle * 100))
]
def _set_char(self, col_idx, row_idx, char):
if (self._cursor_col, self._cursor_row) != (col_idx, row_idx):
with self._io_lock:
self._lcd.set_cursor(col_idx, row_idx)
self._cursor_col = col_idx
self._cursor_row = row_idx
with self._io_lock:
self._lcd.message(char)
self._cursor_col += 1
def _update_row(self, row_idx, old_row, new_row):
for col_idx, (old_char, new_char) in enumerate(zip_longest(old_row, new_row, fillvalue=' ')):
if old_char != new_char:
self._set_char(col_idx, row_idx, new_char)
def set_temperature_current(self, temperature):
self.temperature_current = temperature
def update_screen(self, **context):
new_rows = self._render(**context)
for row_idx, (old_row, new_row) in enumerate(zip(self._old_rows, new_rows)):
self._update_row(row_idx, old_row, new_row)
self._old_rows = new_rows
def _button_thread_watcher(self):
buttons = [
(SELECT, self._on_select),
(RIGHT, self._on_right),
(DOWN, self._on_down),
(UP, self._on_up),
(LEFT, self._on_left),
]
pressed_buttons = set()
while True:
for button, func in buttons:
with self._io_lock:
is_pressed = self._lcd.is_pressed(button)
if is_pressed:
pressed_buttons.add(button)
elif not is_pressed and button in pressed_buttons:
pressed_buttons.remove(button)
func()
time.sleep(0.1)
|
Allett Liberty 30 Cordless Cylinder Lawnmower This new Allett Liberty 30 push mower is the ideal model for small to medium gardens. It has a five blade carbon steel cylinder and a 32 litre grass box capacity. Featuring a steel rear roller to truly give the perfect stripe.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import namedtuple
import logging
import re
from django.core.urlresolvers import reverse
from regulations.generator.toc import fetch_toc
from regulations.views import utils
logger = logging.getLogger(__name__)
class Title(namedtuple('Title', ['full', 'short', 'subtitle'])):
def __new__(cls, full, short=None, subtitle=None):
"""Adds defaults to constructor"""
return super(Title, cls).__new__(cls, full, short, subtitle)
class NavItem(namedtuple(
'NavItem',
['url', 'title', 'markup_id', 'children', 'category', 'section_id'])):
"""Shared data structure to represent entries in the table of contents and
the navigation in the page footer. We may be able to expand this
standardization more broadly than fr_notices, but let's move one step at a
time.
:type title: Title
:type markup_id: str
:type children: potentially empty list
:type category: str or None
:param str section_id: markup id associated with AJAX and other JS;
temporary shim so that we can turn off AJAX per NavItem. Defaults to
copy the markup_id
"""
def __new__(cls, url, title, markup_id, children=None, category=None,
section_id=None):
"""Adds defaults to constructor"""
if children is None:
children = []
if section_id is None:
section_id = markup_id
return super(NavItem, cls).__new__(
cls, url, title, markup_id, children, category, section_id)
# Properties/fns for backwards compatibility
@property
def markup_prefix(self):
return self.title.short
@property
def sub_label(self):
return self.title.subtitle
def _preamble_titles(node):
"""Hack: Split out navigation title and subtitle from a preamble node.
TODO: Emit these fields in a ToC layer in -parser instead
:param node: a preamble Node (i.e. dict)
:return: pair of (title, sub_title) strings"""
marker = node['label'][-1]
prefix = '{}. '.format(marker.lower())
normalized_title = node['title'].lower()
if normalized_title.startswith(prefix):
title, subtitle = node['title'].split('. ', 1)
return Title(node['title'], title, subtitle)
else:
return Title(node['title'], marker, node['title'])
def make_preamble_nav(nodes, depth=1, max_depth=3):
"""Generate NavItems specific to a notice's preamble.
:type nodes: iterable of Node (a dict)
:param int depth: Current nesting depth of navigation elements
:param int max_depth: We'll stop processing once we reach a certain depth
"""
toc = []
have_titles = [n for n in nodes if n.get('title')]
for node in have_titles:
url = reverse('chrome_preamble',
kwargs={'paragraphs': '/'.join(node['label'][:2])})
# Add a hash to a specific section if we're not linking to the
# top-level entry
if len(node['label']) > 2:
url += '#' + '-'.join(node['label'])
markup_id = '{}-preamble-{}'.format(node['label'][0],
'-'.join(node['label']))
if 'intro' in node['label'] or depth == max_depth:
children = []
else:
children = make_preamble_nav(
node.get('children', []),
depth=depth + 1,
max_depth=max_depth)
toc.append(NavItem(
url=url,
title=_preamble_titles(node),
markup_id=markup_id,
children=children
))
return toc
class CFRChangeBuilder(object):
"""Builds the ToC specific to CFR changes from amendment data. As there is
some valuable state shared between amendment processing, we store it all
in an object"""
def __init__(self):
"""version_info structure: {cfr_part -> {"left": str, "right": str}}
e.g. {"111": {"left": "v1", "right": "v2"},
"222": {"left": "vold", "right": "vnew"}}"""
self.cfr_title = self.cfr_part = self.section = None
self.section_titles = {}
self.toc = []
def add_cfr_part(self, doc_number, version_info, amendment):
"""While processing an amendment, if it refers to a CFR part which
hasn't been seen before, we need to perform some accounting, fetching
related meta data, etc."""
part = amendment['cfr_part']
if part not in version_info:
logger.error("No version info for %s", part)
elif self.cfr_part is None or self.cfr_part != amendment['cfr_part']:
meta = utils.regulation_meta(part, version_info[part]['right'])
flat_toc = fetch_toc(part, version_info[part]['right'],
flatten=True)
self.section_titles = {
elt['index'][1]: elt['title']
for elt in flat_toc if len(elt['index']) == 2}
self.cfr_part = part
self.cfr_title = meta.get('cfr_title_number')
self.section = None
title = '{} CFR {}'.format(self.cfr_title, part)
markup_id = '{}-cfr-{}'.format(doc_number, part)
self.toc.append(NavItem(
url=reverse('cfr_changes', kwargs={
'doc_number': doc_number, 'section': part}),
title=Title('Authority', title, 'Authority'),
markup_id=markup_id,
category=title,
section_id='')) # disable AJAX
_cfr_re = re.compile(r'(§ [\d.]+) (.*)')
def _change_title(self, section):
if section not in self.section_titles:
logger.error("Could not find section title for %s", section)
title_str = self.section_titles.get(section, '')
# Hack: Reconstitute node prefix and title
# TODO: Emit these fields in a ToC layer in -parser instead
match = self._cfr_re.search(title_str)
if match:
return Title(title_str, *match.groups())
else:
return Title(title_str, title_str)
def add_change(self, doc_number, label_parts):
"""While processing an amendment, we will encounter sections we
haven't seen before -- these will ultimately be ToC entries"""
change_section = label_parts[1]
is_subpart = 'Subpart' in label_parts or 'Subjgrp' in label_parts
if not is_subpart and (self.section is None or
self.section != change_section):
self.section = change_section
section = '-'.join(label_parts[:2])
self.toc.append(NavItem(
url=reverse('cfr_changes', kwargs={
'doc_number': doc_number,
'section': section}),
title=self._change_title(change_section),
markup_id='{}-cfr-{}'.format(doc_number, section),
category='{} CFR {}'.format(self.cfr_title, self.cfr_part)
))
def make_cfr_change_nav(doc_number, version_info, amendments):
"""Soup to nuts conversion from a document number to a table of contents
list"""
builder = CFRChangeBuilder()
for amendment in amendments:
# Amendments are of the form
# {'cfr_part': 111, 'instruction': 'text1', 'authority': 'text2'} or
# {'cfr_part': 111, 'instruction': 'text3',
# 'changes': [['111-22-c', [data1]], ['other', [data2]]}
builder.add_cfr_part(doc_number, version_info, amendment)
for change_label, _ in amendment.get('changes', []):
builder.add_change(doc_number, change_label.split('-'))
return builder.toc
def footer(preamble_toc, cfr_toc, full_id):
"""Generate "navigation" context which allows the user to move between
sections in the footer"""
items = preamble_toc + cfr_toc
nav = {'previous': None, 'next': None, 'page_type': 'preamble-section'}
for idx, item in enumerate(items):
if item.markup_id == full_id:
if idx > 0:
nav['previous'] = items[idx - 1]
if idx < len(items) - 1:
nav['next'] = items[idx + 1]
return nav
|
This week's review concerns round creatures with tiny arms and legs, big eyes and a great deal of momentum. Many of them are blue, but some are red or even green. I am mystified as to the plot behind this game yet, like so many game plots, I scarcely think it matters (or that many people bother to read them). On with the game!
Funny Creatures is a puzzle game in keeping with the format of similar puzzlers such as Rotadim or Stone Collector. Select a creature by clicking on it with the mouse, then click roughly in the direction you want it to go. Get all the creatures onto the corresponding markers to complete the puzzle. The usual catch applies - once they move, they won't stop until they hit a wall or another creature.
Graphically, Funny Creatures is okay - but there is nothing exceptional. The creatures themselves look more bizarre than cute (which appears to be the intention) and the colours seem remarkably invariant - I have seen red, dark blue and light blue creatures. Later on there are green ones. Why not yellow, purple, brown, black or white creatures? The backgrounds vary nicely though, and the creatures always stand out.
The music is described as "beautiful" by the game website. It is a pleasant little ditty. It is also never-ending. I started playing on mute after a few games. It's not a bad tune, just repetitive. Funny Creatures would benefit from a range of background music.
I have to admit that this is one of the easiest games to operate that Bytten has reviewed. Controls are through mouse or keyboard. Less experienced mouse users will probably find the keyboard a better bet, for when clicking on creatures it is easy to miss and send your current creature zipping along the board (which can ruin your hard work as there is no undo feature!). The move counter ticks upwards, not down, so you can solve a puzzle in eight moves or eight hundred moves as far as the game is concerned.
Funny Creatures doesn't have many obvious flaws to it, though it shares one with the earlier version of Rotadim. If you attempt to move a creature in a direction it cannot go, nothing will happen to the creature; however, your move counter will go up. Thankfully (unlike Rotadim) there are no move restrictions - indeed, no restrictions at all. This is ideal for children, perhaps, but the lack of restriction meant I found little purpose in forward planning.
Overall, this is a nice game and an excellent early effort by Astatix, but lacks the depth found in similar games by more experienced developers.
Keywords: funny creatures review, astatix software reviews, astatix software games, funny creatures scores, pc game reviews, indie game reviews, independent gaming.
|
# Audio Tools, a module and set of tools for manipulating audio data
# Copyright (C) 2007-2016 Brian Langenberger
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from audiotools import (AudioFile, InvalidFile, ChannelMask)
class InvalidVorbis(InvalidFile):
pass
class VorbisAudio(AudioFile):
"""an Ogg Vorbis file"""
from audiotools.text import (COMP_VORBIS_0,
COMP_VORBIS_10)
SUFFIX = "ogg"
NAME = SUFFIX
DESCRIPTION = u"Ogg Vorbis"
DEFAULT_COMPRESSION = "3"
COMPRESSION_MODES = tuple([str(i) for i in range(0, 11)])
COMPRESSION_DESCRIPTIONS = {"0": COMP_VORBIS_0,
"10": COMP_VORBIS_10}
def __init__(self, filename):
"""filename is a plain string"""
AudioFile.__init__(self, filename)
self.__sample_rate__ = 0
self.__channels__ = 0
try:
self.__read_identification__()
except IOError as msg:
raise InvalidVorbis(str(msg))
def __read_identification__(self):
from audiotools.bitstream import BitstreamReader
with BitstreamReader(open(self.filename, "rb"), True) as ogg_reader:
(magic_number,
version,
header_type,
granule_position,
self.__serial_number__,
page_sequence_number,
checksum,
segment_count) = ogg_reader.parse("4b 8u 8u 64S 32u 32u 32u 8u")
if magic_number != b'OggS':
from audiotools.text import ERR_OGG_INVALID_MAGIC_NUMBER
raise InvalidVorbis(ERR_OGG_INVALID_MAGIC_NUMBER)
if version != 0:
from audiotools.text import ERR_OGG_INVALID_VERSION
raise InvalidVorbis(ERR_OGG_INVALID_VERSION)
segment_length = ogg_reader.read(8)
(vorbis_type,
header,
version,
self.__channels__,
self.__sample_rate__,
maximum_bitrate,
nominal_bitrate,
minimum_bitrate,
blocksize0,
blocksize1,
framing) = ogg_reader.parse(
"8u 6b 32u 8u 32u 32u 32u 32u 4u 4u 1u")
if vorbis_type != 1:
from audiotools.text import ERR_VORBIS_INVALID_TYPE
raise InvalidVorbis(ERR_VORBIS_INVALID_TYPE)
if header != b'vorbis':
from audiotools.text import ERR_VORBIS_INVALID_HEADER
raise InvalidVorbis(ERR_VORBIS_INVALID_HEADER)
if version != 0:
from audiotools.text import ERR_VORBIS_INVALID_VERSION
raise InvalidVorbis(ERR_VORBIS_INVALID_VERSION)
if framing != 1:
from audiotools.text import ERR_VORBIS_INVALID_FRAMING_BIT
raise InvalidVorbis(ERR_VORBIS_INVALID_FRAMING_BIT)
def lossless(self):
"""returns False"""
return False
def bits_per_sample(self):
"""returns an integer number of bits-per-sample this track contains"""
return 16
def channels(self):
"""returns an integer number of channels this track contains"""
return self.__channels__
def channel_mask(self):
"""returns a ChannelMask object of this track's channel layout"""
if self.channels() == 1:
return ChannelMask.from_fields(
front_center=True)
elif self.channels() == 2:
return ChannelMask.from_fields(
front_left=True, front_right=True)
elif self.channels() == 3:
return ChannelMask.from_fields(
front_left=True, front_right=True,
front_center=True)
elif self.channels() == 4:
return ChannelMask.from_fields(
front_left=True, front_right=True,
back_left=True, back_right=True)
elif self.channels() == 5:
return ChannelMask.from_fields(
front_left=True, front_right=True,
front_center=True,
back_left=True, back_right=True)
elif self.channels() == 6:
return ChannelMask.from_fields(
front_left=True, front_right=True,
front_center=True,
back_left=True, back_right=True,
low_frequency=True)
elif self.channels() == 7:
return ChannelMask.from_fields(
front_left=True, front_right=True,
front_center=True,
side_left=True, side_right=True,
back_center=True, low_frequency=True)
elif self.channels() == 8:
return ChannelMask.from_fields(
front_left=True, front_right=True,
side_left=True, side_right=True,
back_left=True, back_right=True,
front_center=True, low_frequency=True)
else:
return ChannelMask(0)
def total_frames(self):
"""returns the total PCM frames of the track as an integer"""
from audiotools._ogg import PageReader
try:
with PageReader(open(self.filename, "rb")) as reader:
page = reader.read()
pcm_samples = page.granule_position
while not page.stream_end:
page = reader.read()
pcm_samples = max(pcm_samples, page.granule_position)
return pcm_samples
except (IOError, ValueError):
return 0
def sample_rate(self):
"""returns the rate of the track's audio as an integer number of Hz"""
return self.__sample_rate__
@classmethod
def supports_to_pcm(cls):
"""returns True if all necessary components are available
to support the .to_pcm() method"""
try:
from audiotools.decoders import VorbisDecoder
return True
except ImportError:
return False
def to_pcm(self):
"""returns a PCMReader object containing the track's PCM data"""
from audiotools.decoders import VorbisDecoder
try:
return VorbisDecoder(self.filename)
except ValueError as err:
from audiotools import PCMReaderError
return PCMReaderError(str(err),
self.sample_rate(),
self.channels(),
int(self.channel_mask()),
self.bits_per_sample())
@classmethod
def supports_from_pcm(cls):
"""returns True if all necessary components are available
to support the .from_pcm() classmethod"""
try:
from audiotools.encoders import encode_vorbis
return True
except ImportError:
return False
@classmethod
def from_pcm(cls, filename, pcmreader,
compression=None, total_pcm_frames=None):
"""encodes a new file from PCM data
takes a filename string, PCMReader object,
optional compression level string and
optional total_pcm_frames integer
encodes a new audio file from pcmreader's data
at the given filename with the specified compression level
and returns a new VorbisAudio object"""
from audiotools import (BufferedPCMReader,
__default_quality__,
EncodingError)
from audiotools.encoders import encode_vorbis
if (((compression is None) or
(compression not in cls.COMPRESSION_MODES))):
compression = __default_quality__(cls.NAME)
if pcmreader.bits_per_sample not in {8, 16, 24}:
from audiotools import UnsupportedBitsPerSample
pcmreader.close()
raise UnsupportedBitsPerSample(filename, pcmreader.bits_per_sample)
if (pcmreader.channels > 2) and (pcmreader.channels <= 8):
channel_mask = int(pcmreader.channel_mask)
if ((channel_mask != 0) and
(channel_mask not in
(0x7, # FR, FC, FL
0x33, # FR, FL, BR, BL
0x37, # FR, FC, FL, BL, BR
0x3f, # FR, FC, FL, BL, BR, LFE
0x70f, # FL, FC, FR, SL, SR, BC, LFE
0x63f))): # FL, FC, FR, SL, SR, BL, BR, LFE
from audiotools import UnsupportedChannelMask
pcmreader.close()
raise UnsupportedChannelMask(filename, channel_mask)
if total_pcm_frames is not None:
from audiotools import CounterPCMReader
pcmreader = CounterPCMReader(pcmreader)
try:
encode_vorbis(filename,
pcmreader,
float(compression) / 10)
if ((total_pcm_frames is not None) and
(total_pcm_frames != pcmreader.frames_written)):
from audiotools.text import ERR_TOTAL_PCM_FRAMES_MISMATCH
cls.__unlink__(filename)
raise EncodingError(ERR_TOTAL_PCM_FRAMES_MISMATCH)
return VorbisAudio(filename)
except (ValueError, IOError) as err:
cls.__unlink__(filename)
raise EncodingError(str(err))
finally:
pcmreader.close()
def update_metadata(self, metadata):
"""takes this track's current MetaData object
as returned by get_metadata() and sets this track's metadata
with any fields updated in that object
raises IOError if unable to write the file
"""
import os
from audiotools import TemporaryFile
from audiotools.ogg import (PageReader,
PacketReader,
PageWriter,
packet_to_pages,
packets_to_pages)
from audiotools.vorbiscomment import VorbisComment
from audiotools.bitstream import BitstreamRecorder
if metadata is None:
return
elif not isinstance(metadata, VorbisComment):
from audiotools.text import ERR_FOREIGN_METADATA
raise ValueError(ERR_FOREIGN_METADATA)
elif not os.access(self.filename, os.W_OK):
raise IOError(self.filename)
original_ogg = PacketReader(PageReader(open(self.filename, "rb")))
new_ogg = PageWriter(TemporaryFile(self.filename))
sequence_number = 0
# transfer current file's identification packet in its own page
identification_packet = original_ogg.read_packet()
for (i, page) in enumerate(packet_to_pages(
identification_packet,
self.__serial_number__,
starting_sequence_number=sequence_number)):
page.stream_beginning = (i == 0)
new_ogg.write(page)
sequence_number += 1
# discard the current file's comment packet
comment_packet = original_ogg.read_packet()
# generate new comment packet
comment_writer = BitstreamRecorder(True)
comment_writer.build("8u 6b", (3, b"vorbis"))
vendor_string = metadata.vendor_string.encode('utf-8')
comment_writer.build("32u {:d}b".format(len(vendor_string)),
(len(vendor_string), vendor_string))
comment_writer.write(32, len(metadata.comment_strings))
for comment_string in metadata.comment_strings:
comment_string = comment_string.encode('utf-8')
comment_writer.build("32u {:d}b".format(len(comment_string)),
(len(comment_string), comment_string))
comment_writer.build("1u a", (1,)) # framing bit
# transfer codebooks packet from original file to new file
codebooks_packet = original_ogg.read_packet()
for page in packets_to_pages(
[comment_writer.data(), codebooks_packet],
self.__serial_number__,
starting_sequence_number=sequence_number):
new_ogg.write(page)
sequence_number += 1
# transfer remaining pages after re-sequencing
page = original_ogg.read_page()
page.sequence_number = sequence_number
page.bitstream_serial_number = self.__serial_number__
sequence_number += 1
new_ogg.write(page)
while not page.stream_end:
page = original_ogg.read_page()
page.sequence_number = sequence_number
page.bitstream_serial_number = self.__serial_number__
sequence_number += 1
new_ogg.write(page)
original_ogg.close()
new_ogg.close()
def set_metadata(self, metadata):
"""takes a MetaData object and sets this track's metadata
this metadata includes track name, album name, and so on
raises IOError if unable to write the file"""
from audiotools.vorbiscomment import VorbisComment
if metadata is None:
return self.delete_metadata()
metadata = VorbisComment.converted(metadata)
old_metadata = self.get_metadata()
metadata.vendor_string = old_metadata.vendor_string
# remove REPLAYGAIN_* tags from new metadata (if any)
for key in [u"REPLAYGAIN_TRACK_GAIN",
u"REPLAYGAIN_TRACK_PEAK",
u"REPLAYGAIN_ALBUM_GAIN",
u"REPLAYGAIN_ALBUM_PEAK",
u"REPLAYGAIN_REFERENCE_LOUDNESS"]:
try:
metadata[key] = old_metadata[key]
except KeyError:
metadata[key] = []
self.update_metadata(metadata)
@classmethod
def supports_metadata(cls):
"""returns True if this audio type supports MetaData"""
return True
def get_metadata(self):
"""returns a MetaData object, or None
raises IOError if unable to read the file"""
from io import BytesIO
from audiotools.bitstream import BitstreamReader
from audiotools.ogg import PacketReader, PageReader
from audiotools.vorbiscomment import VorbisComment
with PacketReader(PageReader(open(self.filename, "rb"))) as reader:
identification = reader.read_packet()
comment = BitstreamReader(BytesIO(reader.read_packet()), True)
(packet_type, packet_header) = comment.parse("8u 6b")
if (packet_type == 3) and (packet_header == b'vorbis'):
vendor_string = \
comment.read_bytes(comment.read(32)).decode('utf-8')
comment_strings = [
comment.read_bytes(comment.read(32)).decode('utf-8')
for i in range(comment.read(32))]
if comment.read(1) == 1: # framing bit
return VorbisComment(comment_strings, vendor_string)
else:
return None
else:
return None
def delete_metadata(self):
"""deletes the track's MetaData
this removes or unsets tags as necessary in order to remove all data
raises IOError if unable to write the file"""
from audiotools import MetaData
# the vorbis comment packet is required,
# so simply zero out its contents
self.set_metadata(MetaData())
@classmethod
def supports_replay_gain(cls):
"""returns True if this class supports ReplayGain"""
return True
def get_replay_gain(self):
"""returns a ReplayGain object of our ReplayGain values
returns None if we have no values"""
from audiotools import ReplayGain
vorbis_metadata = self.get_metadata()
if ((vorbis_metadata is not None) and
({u'REPLAYGAIN_TRACK_PEAK',
u'REPLAYGAIN_TRACK_GAIN',
u'REPLAYGAIN_ALBUM_PEAK',
u'REPLAYGAIN_ALBUM_GAIN'}.issubset(vorbis_metadata.keys()))):
# we have ReplayGain data
try:
return ReplayGain(
vorbis_metadata[u'REPLAYGAIN_TRACK_GAIN'][0][0:-len(u" dB")],
vorbis_metadata[u'REPLAYGAIN_TRACK_PEAK'][0],
vorbis_metadata[u'REPLAYGAIN_ALBUM_GAIN'][0][0:-len(u" dB")],
vorbis_metadata[u'REPLAYGAIN_ALBUM_PEAK'][0])
except (IndexError, ValueError):
return None
else:
return None
def set_replay_gain(self, replaygain):
"""given a ReplayGain object, sets the track's gain to those values
may raise IOError if unable to modify the file"""
if replaygain is None:
return self.delete_replay_gain()
vorbis_comment = self.get_metadata()
if vorbis_comment is None:
from audiotools.vorbiscomment import VorbisComment
from audiotools import VERSION
vorbis_comment = VorbisComment(
[], u"Python Audio Tools {}".format(VERSION))
vorbis_comment[u"REPLAYGAIN_TRACK_GAIN"] = [
u"{:.2f} dB".format(replaygain.track_gain)]
vorbis_comment[u"REPLAYGAIN_TRACK_PEAK"] = [
u"{:.8f}".format(replaygain.track_peak)]
vorbis_comment[u"REPLAYGAIN_ALBUM_GAIN"] = [
u"{:.2f} dB".format(replaygain.album_gain)]
vorbis_comment[u"REPLAYGAIN_ALBUM_PEAK"] = [
u"{:.8f}".format(replaygain.album_peak)]
vorbis_comment[u"REPLAYGAIN_REFERENCE_LOUDNESS"] = [u"89.0 dB"]
self.update_metadata(vorbis_comment)
def delete_replay_gain(self):
"""removes ReplayGain values from file, if any
may raise IOError if unable to modify the file"""
vorbis_comment = self.get_metadata()
if vorbis_comment is not None:
for field in [u"REPLAYGAIN_TRACK_GAIN",
u"REPLAYGAIN_TRACK_PEAK",
u"REPLAYGAIN_ALBUM_GAIN",
u"REPLAYGAIN_ALBUM_PEAK",
u"REPLAYGAIN_REFERENCE_LOUDNESS"]:
try:
del(vorbis_comment[field])
except KeyError:
pass
self.update_metadata(vorbis_comment)
class VorbisChannelMask(ChannelMask):
"""the Vorbis-specific channel mapping"""
def __repr__(self):
return "VorbisChannelMask({})".format(
",".join(["{}={}".format(field, getattr(self, field))
for field in self.SPEAKER_TO_MASK.keys()
if (getattr(self, field))]))
def channels(self):
"""returns a list of speaker strings this mask contains
returned in the order in which they should appear
in the PCM stream
"""
count = len(self)
if count == 1:
return ["front_center"]
elif count == 2:
return ["front_left", "front_right"]
elif count == 3:
return ["front_left", "front_center", "front_right"]
elif count == 4:
return ["front_left", "front_right",
"back_left", "back_right"]
elif count == 5:
return ["front_left", "front_center", "front_right",
"back_left", "back_right"]
elif count == 6:
return ["front_left", "front_center", "front_right",
"back_left", "back_right", "low_frequency"]
elif count == 7:
return ["front_left", "front_center", "front_right",
"side_left", "side_right", "back_center",
"low_frequency"]
elif count == 8:
return ["front_left", "front_center", "front_right",
"side_left", "side_right",
"back_left", "back_right", "low_frequency"]
else:
return []
|
Local authority building control surveyors provide the building control services for major shopping centres, supermarkets, mixed use developments, national roll-out programmes and small scale fit-outs. Our surveyors have been chosen to provide building control services on the most technically complex projects and developments and our early involvement and availability adds real value to the design process. Here is a small selection of the projects we've worked on.
A single storey pod building, constructed to house three new restaurants.
The building control team were able to provide lots of flexibility in relation to carrying out early inspections.
An enormous mixed use commercial building with a four storey basement near Victoria Station.
The building control team were involved from pre-commencement stage.
For more information on any of the above please contact our Director of Regulatory Policy Martin Taylor by phone on 07766 493885 or by email at martin.taylor@labc.co.uk.
Or speak to your local council's building control team about building regulations.
The Manado Town shop is part of the Islands scheme at Chester Zoo.
The timber-framed building and internal fit-out provides a completely modern EPOS system housed in handmade units.
Europe's first boutique Asian shopping centre.
Problems were identified and solutions decided on at an early stage thanks to the cooperation between the designers, contractors and Newham Council Building Control.
|
"""
Functions to decode messages
"""
# Copyright (C) 2015 by
# Himanshu Mishra <himanshu2014iit@gmail.com>
# All rights reserved.
# GNU license.
from . import encoding
__all__ = ['decode']
def decode(code, encoding_type='default'):
"""Converts a string of morse code into English message
The encoded message can also be decoded using the same morse chart
backwards.
"""
reversed_morsetab = {symbol: character for character,
symbol in list(getattr(encoding, 'morsetab').items())}
if encoding_type == 'default':
# For spacing the words
letters = 0
words = 0
index = {}
for i in range(len(code)):
if code[i: i+3] == ' ':
if code[i: i+7] == ' ':
words += 1
letters += 1
index[words] = letters
elif code[i+4] and code[i-1] != ' ': # Check for ' '
letters += 1
message = [reversed_morsetab[i] for i in code.split()]
for i, (word, letter) in enumerate(list(index.items())):
message.insert(letter + i, ' ')
return ''.join(message)
if encoding_type == 'binary':
return ('Sorry, but it seems that binary encodings can have multiple'
' messages. So for now, we couldn\'t show even one of them.')
|
Another Side Records, 2015 (ASR14-114).
Great T-shirt!!!!! I love it!!!!!!!
|
import sys
from django.db import models
from django.core.signals import request_started
from django.db.models import signals
from django.utils.importlib import import_module
from .utils import register, unregister, isregister, countregister, get_users
def import_class(module, class_name):
return getattr(import_module(module),class_name)
def monkey(model, info):
# Init base params
code = info.get('code', 1)
unique = info.get('unique', True)
methods = info.get('names')
positive_only = info.get('positive_only', False)
counter_field = info.get('counter_field', None)
if isinstance(methods,basestring):
methods = dict((k, '%s_%s' % (k, methods)) for k in ['set', 'uset', 'is', 'count', 'users'])
# Patch methods
for k in methods:
if k == 'set':
if positive_only:
setattr(model,methods[k],lambda self, user, **kwargs: register(code,user,self,unique,positive=True,counter_field=counter_field))
else:
setattr(model,methods[k],lambda self, user, positive=True: register(code,user,self,unique,positive=positive,counter_field=counter_field))
elif k == 'uset':
setattr(model,methods[k],lambda self, user: unregister(code,user,self,unique,counter_field=counter_field))
elif k == 'is':
setattr(model,methods[k],lambda self, user: isregister(code,user,self,unique))
elif k == 'count':
if positive_only:
setattr(model,methods[k],lambda self, **kwargs: countregister(code,self,positive=True))
else:
setattr(model,methods[k],lambda self, positive=True: countregister(code,self,positive=positive))
elif k == 'users':
setattr(model, methods[k], lambda self, positive=True: get_users(code, self, positive=positive))
else:
raise Exception, 'Undefined method: %s' % methods[k]
if counter_field:
fields = model._meta.get_all_field_names()
if not isinstance(counter_field,(list,tuple)):
counter_field = [counter_field]
# Patch model fields
for f in counter_field:
if f not in fields:
# Add counter field as usigned int
model.add_to_class(f, models.PositiveIntegerField(default=0,editable=True,blank=True))
def __init_patch__(**kwargs):
if not getattr(__init_patch__,'inited',False):
from .settings import ATOMREGISTER
for k in ATOMREGISTER:
app, mod = k.split('.')
model = import_class('%s.models' % app, mod)
monkey(model,ATOMREGISTER[k])
setattr(__init_patch__,'inited',True)
if len(sys.argv)>1 and ('run' in sys.argv[1] or 'server' in sys.argv[1] or sys.argv[1] in ['supervisor']):
request_started.connect(__init_patch__)
else:
__init_patch__()
|
Monday evening we ventured into the Genetic Archives Act 2 to have another go at Phage Maw. We managed to get him down, giving us a server 7th and world 32nd position on him, which we are quite proud of.
Last reset we ended our Monday raid early because of Phage Maw bugging and us not having time to both clear trash again and get more solid tries on him. This time we had managed to clear everything up to Phage Maw on our Thursday raid, giving us plenty of time to practice on him. We did bug once towards the second half of the raids and we were forced to exit, reset the instance AND clear trash again. All this is of course happily forgotten now that we got the kill, but still, pretty annoying bug.
Below is the kill video with stalker dps point of view, with a couple of easter eggs and lol at the end, enjoy!
This brings us to 3 out of 6 down in the Genetic Archives which we feel is quite an achievement with just two raids per week, good job guys, good job!
Many of our members were fairly quick at getting the leveling part done seeing quite a few hitting level 50 even during the head start/before the actual launch on the 3rd of June. Now we’ve finished with the attunement and have commenced raiding, the joy!
After three resets post-launch we had 20+ people ready and attuned for the Genetic Archives 20 man raiding dungeon – off we went. We had prepared quite a lot for these raids, but never tried them during the beta, so our expectations where mixed. After some of the standard wipes on trash we got the the first couple of minibosses down, and ended up wiping an hour or so on X-89 the first evening. On our second evening we down’ed X-89 after a couple of hours, adding up to around 3 hours total spent on him and a world kill position of 32, very well played guys! Videos of the kills below, stalker and warrior DPS pov.
As always we are still looking for more raiders for our roster so we can get a second 20man team on the road and get the necessary people ready and attuned for 40man raiding, see you on Nexus!
WildStar – it is time!
Character names have been reserved, the guild name has been reserved, the server has been picked and the hoverboards are fully charged. The wait is over, WildStar launches in 24 hours, let’s get this show on the road!
We have been preparing for this for quite some time – never underestimating the task it would be to assemble a Danish 40-man raiding guild. We started recruiting a while back and the last 3 months have been quite exciting, with so many people rallying under the Lokes List banner. With our current 200 Danish WildStar members, we feel we’ve laid the foundations for a solid and successful launch while maximizing our chances of getting started on raids fairly quickly post-launch. The goal is of course to do 40-man raiding as soon as possible.
As for now we will be playing on the European PvP server “Hazak”, the reason why we’re phrasing it like this is the fact that Carbine has only made this single PvP server for English speaking guilds on the European realms. Quite frankly we are worried about the login queues and stability seeing that 80% of all European guilds are picking this server as their initial server. We hope that Carbine are on top of this and the server can take it, because this has the potential to be a major fail in regards to ensuring a smooth launch of WildStar. With this said Hazak definitely looks like the place to be when it comes to top raiding guilds and we’re very excited to see so many familiar faces from especially WoW on the server guild list.
We will still be recruiting through launch as we’re always looking for good players for our raid group, although there will be a brief period where we won’t be able to process all of the applications coming in. We like to be thorough when processing applications which takes time, time we won’t have during launch, as we will be playing WildStar (oh yeah!). For more info on recruitment check out the forum.
To our members and anyone else reading along and planning to play WildStar we wish everyone a good launch – may you never encounter the need to wait for respawns!
|
from functools import wraps
import inspect
import collections
import warnings
import types
import sys
from ._dummy_key import _CmpDummyKey as _CmpDummyKey
RED_BLACK_TREE = 0
"""
Red-black tree algorithm indicator; good for general use.
"""
SPLAY_TREE = 1
"""
Splay tree algorithm indicator; good for temporal locality cases.
"""
SORTED_LIST = 2
"""
Sorted list algorithm indicator; good for infrequent updates.
"""
def _updator_metadata(set_, init_info):
if init_info.updator is None:
return None
name_clss = [(name, cls) for \
(name, cls) in inspect.getmembers(init_info.updator, predicate = inspect.isclass) if name == 'Metadata']
assert len(name_clss) == 1
cls = name_clss[0][1]
compare = init_info.compare if init_info.compare is not None else \
lambda x, y: -1 if x < y else (0 if x == y else 1)
def update(m, key, l, r):
m.update(
key,
init_info.key if init_info.key is not None else lambda k: _CmpDummyKey(compare, k),
l,
r)
return (cls, update)
def _adopt_updator_methods(self, updator):
def method_wrapper(f):
def wrapper(*args, **kwargs):
return f(self, *args, **kwargs)
return wraps(f)(wrapper)
if updator is None:
return
for name, method in inspect.getmembers(updator()):
if name.find('_') == 0 or name in self.__dict__ or name == 'Metadata':
continue
try:
method_ = method.__func__ if sys.version_info >= (3, 0) else method.im_func
self.__dict__[name] = method_wrapper(method_)
except AttributeError:
warnings.warn(name, RuntimeWarning)
_CommonInitInfo = collections.namedtuple(
'_CommonInitInfo',
['key_type', 'alg', 'key', 'compare', 'updator'],
verbose = False)
|
The five-day Enabling & Managing Microsoft Office 365 training course targets the needs of IT professionals who take part in evaluating, planning, deploying, and operating Office 365 services, including its identities, dependencies, requirements, and supporting technologies.
This Enabling & Managing Microsoft Office 365 training course focuses on skills required to set up an Office 365 tenant, including federation with existing user identities, and skills required to sustain an Office 365 tenant and its users.
This Enabling & Managing Microsoft Office 365 training course will provide you with all the necessary information and teach you skills to successfully operate and manage Microsoft Office 365 features such as evaluating, planning, deploying, and operating Office 365 services including its identities, dependencies, requirements, and supporting technologies.
IT professionals, Students including IT professionals who are looking to take the 70-346 examination.
There is no prereading associated with the Enabling & Managing Microsoft Office 365 training session.
The examination 70-346 is not included in the cost of the training course and is not part of the Enabling & Managing Microsoft Office 365 training course.
Ready to attend examination 70-696.
The Enabling & Managing Microsoft Office 365 training course is designed and will prepare anyone looking to take the 70-364 Exam.
The Enabling & Managing Microsoft Office 365 training course will prepare you for 70-364 examination.
|
""" Schema for unimodel objects.
This allow us to do several things:
- Encode the schema of the message along with the message itself
- Build ASTs from generators which take eg. jsonschema as input
- Create classes at runtime based on a schema (jsonschema or thrift)
etc.
"""
from unimodel.model import Unimodel, UnimodelUnion, Field, FieldFactory
from unimodel import types
from unimodel.metadata import Metadata
from unimodel.backends.json.type_data import MDK_TYPE_STRUCT_UNBOXED
import inspect
class SchemaObjectMetadata(Unimodel):
# TODO: validators
backend_data = Field(
types.Map(
types.UTF8, # Key is the name of the backend, eg: 'thrift'
# data for each backend should be represented as a simple dict
types.Map(types.UTF8, types.UTF8)))
class SchemaObject(Unimodel):
name = Field(types.UTF8, required=True)
namespace = Field(types.List(types.UTF8))
uri = Field(types.UTF8)
metadata = Field(types.Struct(SchemaObjectMetadata))
schema_object_field = Field(
types.Struct(SchemaObject),
required=True,
metadata=Metadata(
backend_data={'json': {MDK_TYPE_STRUCT_UNBOXED: True}}))
type_id_enum = types.Enum(types.type_id_to_name_dict())
# TypeDef is recursive because of ParametricType
class TypeDef(Unimodel):
pass
# List, Set, Map, Tuple
class ParametricType(Unimodel):
type_id = Field(type_id_enum, required=True)
type_parameters = Field(types.List(types.Struct(TypeDef)), required=True)
class TypeClass(UnimodelUnion):
primitive_type_id = Field(type_id_enum)
enum = Field(types.Map(types.Int, types.UTF8))
struct_name = Field(types.UTF8)
parametric_type = Field(types.Struct(ParametricType))
field_factory = FieldFactory()
field_factory.add_fields(TypeDef, {
'metadata': Field(types.Struct(SchemaObjectMetadata)),
'type_class': Field(types.Struct(TypeClass), required=True)})
class LiteralValue(UnimodelUnion):
integer = Field(types.Int)
double = Field(types.Double)
string = Field(types.UTF8)
class Literal(Unimodel):
literal_value = Field(types.Struct(LiteralValue()))
metadata = Field(
types.Struct(SchemaObjectMetadata),
metadata=Metadata(
backend_data={'json': {MDK_TYPE_STRUCT_UNBOXED: True}}))
class FieldDef(Unimodel):
common = schema_object_field
field_id = Field(types.Int)
field_type = Field(types.Struct(TypeDef), required=True)
required = Field(types.Bool, default=False)
default = Field(types.Struct(Literal))
class StructDef(Unimodel):
common = schema_object_field
is_union = Field(types.Bool, default=False)
fields = Field(types.List(types.Struct(FieldDef)), required=True)
class SchemaAST(Unimodel):
common = schema_object_field
description = Field(types.UTF8)
structs = Field(types.List(types.Struct(StructDef)))
root_struct_name = Field(types.UTF8)
|
Our STIN Hans J. Wegner The Chair is a faithful reproduction of this famous iconic furniture design. Hans J. Wegner's The Chair was originally named 'The Round Chair'. But after being used to seat John F. Kennedy and Richard Nixon in the first televised election debate in 1960, it became known as 'The Chair'. Our Hans J. Wegner The Chair has all the functional elegance and comfort of the original. The wooden frame is made from a choice of quality oak or ash and has a gracefully arched backrest that supports the body. The seat is lightly padded with high quality classic leather, making it perfect for everyday use.
Very high quality. Cannot tell the difference between the original and this one.
While I was waiting on the chair I was a little worried about the wood grain would be like proper uniform, and whether there would be likely to be color-shades of the wood. Concerns were happily proved wrong - the chair is fantastic - and looks like this looked better than it is in the picture. the wood grain is very nice, but I've been in a genuine (not replica) edition of the chair, and there are veins slightly more similar on both arms and straighter, which is probably because the replica is machine-sanded. the special and difficult assembly of the armrest three parts are made very flat. There is something that seems like a small cosmetic defects on one arm, but it may actually be that it's just a natural shift in the tree structure. I see no reason not to give the chair 6 stars. all in all I am very happy and it's not my last purchase from OSKAR.
I received the chairs almost as promised, they were perfectly wrapped, it was actually difficult to extract them. The quality of the chairs is obviously not quite on a par with the original, nor can it be expected-. Sorting of the wood used could be better color differences-. The abrasion of the surface is made evident by machine, since there are waves in accordance with the tree structure. stronger than the original, but it looks really very pretty.
The chairs were received in a very careful and sturdy packaging, which had protected the chairs optimalt.Stolene fully corresponds to our expectations, and fits perfectly with our round egetræsbord.De are beautiful and great to sit in.
While I was waiting on the chair I was a little worried about the wood grain would be like proper uniform, and whether there would be likely to be color-shades of the wood. Concerns were happily proved wrong - the chair is fantastic - and looks like this looked better than it is in the picture. the wood grain is very nice, but I've been in a genuine (not replica) edition of the chair, and there are veins slightly more similar on both arms and straighter, which is probably because the replica is machine-sanded. the special and difficult assembly of the armrest three parts are made very flot.Der is something that seems like a small cosmetic defects on one arm, but it may actually be that it's just a natural shift in the tree structure. I see no reason not to give the chair 6 stars. all in all I am very happy and it's not my last purchase from Stin.
It was the first time we dealt with Stin, but it's certainly not the last time. 4 pcs. The Chair was received in solid packaging. Nice processing - perfect for our round oak table. A pleasure to watch.
|
#!/usr/bin/env python2.7
#
# Print properties from the pom.xml file as BASH variable settings.
# Note that the '.' characters in property names are re-written as '_'
#
import os
import re
import sys
from pom_handlers import DependencyInfo
from pom_utils import PomUtils
class PomProperties(object):
def safe_property_name(self, property_name):
"""Replace characters that aren't safe for bash variables with an underscore"""
return re.sub(r'\W', '_', property_name)
def write_properties(self, pom_file_path, output_stream, rootdir=None):
di = DependencyInfo(pom_file_path, rootdir)
for property_name, value in di.properties.iteritems():
output_stream.write('{0}="{1}"\n'.format(self.safe_property_name(property_name), value))
# Print out some other things. These are useful for script/pants_kochiku_build_wrapper
output_stream.write('project_artifactId="{0}"\n'.format(di.artifactId))
output_stream.write('project_groupId="{0}"\n'.format(di.groupId))
def usage():
print "usage: {0} [args] ".format(sys.argv[0])
print "Prints all the properties defined in a pom.xml in bash variable syntax."
print ""
print "-?,-h Show this message"
PomUtils.common_usage()
sys.exit(1)
def main():
arguments = PomUtils.parse_common_args(sys.argv[1:])
flags = set(arg for arg in arguments if arg.startswith('-'))
for f in flags:
if f == '-h' or f == '-?':
usage()
return
else:
print ("Unknown flag {0}".format(f))
usage()
return
path_args = list(set(arguments) - flags)
if len(path_args) != 1 :
print("Expected a single project path that contains a pom.xml file.")
usage()
pom_file_path = os.path.join(os.path.realpath(path_args[0]), 'pom.xml')
if not os.path.exists(pom_file_path):
print ("Couldn't find {0}".format(pom_file_path))
usage()
PomProperties().write_properties(pom_file_path, sys.stdout)
if __name__ == '__main__':
main()
|
Product Lead time: 4 to 7 working days C4 CROXLEY WHITE GUMMED UNBANDED (CARTRIDGE) ENVELOPE ..
Product Lead time: 4 to 7 working days C4 CROXLEY WHITE SEAL EASI UNBANDED (PRESTIC CARTRIDGE) ENVELOPE ..
Product Lead time: 4 to 7 working days C4 CROXLEY WHITE WINDOW UNBANDED (CARTRIDGE) ENVELOPE ..
Product Lead time: 4 to 7 working days C5 CROXLEY WHITE GUMMED UNBANDED (CARTRIDGE) ENVELOPE ..
Product Lead time: 4 to 7 working days C5 CROXLEY WHITE SEAL EASI UNBANDED (PRESTIC CARTRIDGE)ENVELOPE ..
Product Lead time: 4 to 7 working days Marlin Envelopes C4 Pocket White Gum 250's ..
Product Lead time: 4 to 7 working days Marlin Envelopes C4 Pocket White Self Seal 250's ..
Product Lead time: 4 to 7 working days Marlin Envelopes C5 Pocket White Self Seal 500's ..
Product Lead time: 4 to 7 working days Marlin Envelopes C6 White Gum 25's ..
Product Lead time: 4 to 7 working days Marlin Envelopes DL White Opaque Gum 500's ..
Product Lead time: 4 to 7 working days Marlin Envelopes DL White Self Seal 500's ..
Product Lead time: 4 to 7 working days Marlin Envelopes DL White with Window Self Seal 500's ..
Product Lead time: 4 to 7 working days Marlin Envelopes White Gum 25's ..
Product Lead time: 4 to 7 working days Marlin Envelopes White with Window Gum 25's ..
|
from pymongo import MongoClient
import pymongo
from datetime import datetime
mongodb_url = 'mongodb://192.168.0.30:27017/'
mongodb_url = 'mongodb://127.0.0.1:27017/'
client = MongoClient(mongodb_url)
db = client['web_jobs_server']
db = client['test_web_jobs_server']
print "** DB Collections: ", db.collection_names()
#collection = db[job_target]
#print collection
def make_job(job_id, job_url, job_file_path, client_id, create_date, update_date, job_status, http_status):
job = {
"job_id": job_id,
"job_url":job_url,
"job_file_path": job_file_path,
"client_id": client_id,
"create_date": create_date,
"update_date": update_date,
"job_status": job_status,
"http_status": http_status
}
return job
## insert: only be used for fresh insert, as existing _id would cause duplicate insert and then error
## save: same as _update method, but would create collection if it is not exist
## consider with ejdb does not support custom _id, so I have to use upsert
def job_upsert(job, collection):
j = db[collection].update({'job_id': job['job_id']}, {'$set':job}, upsert=True, multi=True)
print j
def job_insert(job, collection):
try:
j = db[collection].insert(job)
except pymongo.errors.DuplicateKeyError as e:
#print e
pass
except Exception as e:
#print e
pass
|
We are the Celtic cross-quarter of Lammas.
I can always feel the change in the earth at cross-quarters. I don’t know why really but I do. Maybe something about that pagan witch stuff.
Lammas is a celebration of the wheat and other harvests. I read that a loaf of wheat bread was a traditional offering. Who knows? sounds right though.
So, raise a glass to the God, Lugh!!
|
import os, re, sys
from kodi_six import xbmc, xbmcvfs, xbmcaddon
if sys.version_info[0] > 2:
import http
cookielib = http.cookiejar
else:
import cookielib
try:
xbmc.translatePath = xbmcvfs.translatePath
except AttributeError:
pass
class Util:
addon_path = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('profile'))
def find(self, source, start_str, end_str):
start = source.find(start_str)
end = source.find(end_str, start + len(start_str))
if start != -1:
return source[start + len(start_str):end]
else:
return ''
def natural_sort_key(self, s):
_nsre = re.compile('([0-9]+)')
return [int(text) if text.isdigit() else text.lower()
for text in re.split(_nsre, s)]
def save_cookies(self, cookiejar):
cookie_file = os.path.join(self.addon_path, 'cookies.lwp')
cj = cookielib.LWPCookieJar()
try:
cj.load(cookie_file, ignore_discard=True)
except:
pass
for c in cookiejar:
args = dict(vars(c).items())
args['rest'] = args['_rest']
del args['_rest']
c = cookielib.Cookie(**args)
cj.set_cookie(c)
cj.save(cookie_file, ignore_discard=True)
def load_cookies(self):
cookie_file = os.path.join(self.addon_path, 'cookies.lwp')
cj = cookielib.LWPCookieJar()
try:
cj.load(cookie_file, ignore_discard=True)
except:
pass
return cj
def check_cookies(self):
perform_login = True
if os.path.isfile(os.path.join(self.addon_path, 'cookies.lwp')):
fingerprint_valid = False
ipid_valid = False
cj = cookielib.LWPCookieJar(os.path.join(self.addon_path, 'cookies.lwp'))
cj.load(os.path.join(self.addon_path, 'cookies.lwp'), ignore_discard=True)
for cookie in cj:
if cookie.name == "fprt" and not cookie.is_expired():
fingerprint_valid = True
elif cookie.name == "ipid" and not cookie.is_expired():
ipid_valid = True
if fingerprint_valid and ipid_valid:
perform_login = False
return perform_login
def delete_cookies(self):
if os.path.isfile(os.path.join(self.addon_path, 'cookies.lwp')):
os.remove(os.path.join(self.addon_path, 'cookies.lwp'))
|
'Rare photos from the set of the zombie masterpiece!' Fangoria, Issue 337: 4 page excerpt.
My favourite part was when they were talking about Romero s first vision of Day of the Dead. I was reading this part and was thinking, This is not the movie I remember . So I backtracked a bit a saw that this was his first vision and not what actually made it to the screen. This alone made it worth reading this book. It gave such a perspective on how much an original vision can change from conception to what really hits the screen. Stephanie Drum, Bea s Book Nook blog"
Released in 1985, Day of the Dead was the final film of George A. Romero's classic zombie trilogy, which forever changed the face of horror filmmaking. Set in an apocalyptic world where the living-dead epidemic has wiped out most of humanity, the movie quickly acquired cult status, and ? with one remake released in 2008 and another planned for 2014 ? its influence on popular culture can still be felt today. Now, for the first time, the full history of the making of the iconic original film is revealed. Drawing on a wealth of exclusive interviews with the cast and crew, author Lee Karr leaves no stone unturned in detailing the movie's preproduction, shoot, release, and legacy. Filled with behind-the-scenes gossip and previously unpublished stories from the set, as well as over 100 full-color photos, this book gives Day of the Dead the resurrection it deserves.
|
# Copyright 2010-2012 Institut Mines-Telecom
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jun 21, 2012
@author: Bilel Msekni
@contact: bilel.msekni@telecom-sudparis.eu
@author: Houssem Medhioub
@contact: houssem.medhioub@it-sudparis.eu
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache License, Version 2.0
"""
try:
import simplejson as json
except ImportError:
import json
def extract_term_from_category(json_object):
"""
returns the term from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('term'):
return json_object['term']
else:
return None
def extract_scheme_from_category(json_object):
"""
returns the scheme from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('scheme'):
return json_object['scheme']
else:
return None
def extract_location_from_category(json_object):
"""
returns the location from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('location'):
return json_object['location']
else:
return None
def extract_title_from_category(json_object):
"""
returns the title from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('title'):
return json_object['title']
else:
return None
def extract_related_from_category(json_object):
"""
returns the related from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('related'):
items = json_object['related']
rel = ""
for item in items:
rel += item + ","
rel = rel[:-1]
else:
rel = None
return rel
def extract_actions_from_category(json_object):
"""
returns the actions from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('actions'):
items = json_object['actions']
actions = ""
for item in items:
actions += item + ","
actions = actions[:-1]
else:
actions = None
return actions
def extract_attributes_from_category(json_object):
"""
returns the attributes from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('attributes'):
items = json_object['attributes']
attributes = recursive_for_attribute(items)
htt_att = ""
for att in attributes:
htt_att += att + ","
attributes = htt_att[:-1]
else:
attributes = None
return attributes
def extract_kind_from_entity(json_object):
"""
returns the HTTP kind description extracted from a json entity representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('kind'):
kind_id = json_object['kind']
kind_scheme, kind_term = kind_id.split('#')
return kind_term + "; scheme=\"" + kind_scheme + "\"; class=\"kind\";"
else:
return None
def extract_mixin_from_entity(json_object):
"""
returns mixins of the entity
Args:
@param json_object: JSON representation
"""
if json_object.has_key('mixin'):
mix_http = list()
mixins = json_object['mixin']
for item in mixins:
mix_scheme, mix_term = item.split('#')
mix_http.append(mix_term + "; scheme=\"" + mix_scheme + "\"; class=\"mixin\";")
return mix_http
else:
return None
def extract_id_from_entity(json_object):
"""
returns id of the entity
Args:
@param json_object: JSON representation
"""
if json_object.has_key('id'):
return json_object['id']
else:
return None
def extract_title_from_entity(json_object):
"""
returns title of the entity
Args:
@param json_object: JSON representation
"""
if json_object.has_key('title'):
return json_object['title']
else:
return None
def extract_actions_from_entity(json_object):
"""
returns actions of the entity
Args:
@param json_object: JSON representation
"""
if json_object.has_key('actions'):
items = json_object['actions']
actions = list()
for item in items:
actions.append("<" + item['href'] + ">; rel=\"" + item['category'] + "\"")
return actions
else:
return None
def extract_internal_link_from_entity(json_object):
"""
returns internal links of the entity
Args:
@param json_object: JSON representation
"""
if json_object.has_key('links'):
items = json_object['links']
links = list()
for item in items:
uri = "|zizi|"
rel = "|zala|"
category = item['kind']
self = "|zolo|"
link = "<" + uri + ">; rel=\"" + rel + "\"; self=\"" + self + "\"; category=\"" + category + "\";"
if item.has_key('attributes'):
attributes = recursive_for_attribute_v2(item['attributes'])
for att in attributes:
link += att[:-1] + ";"
links.append(link)
return links
else:
return None
def extract_attributes_from_entity(json_object):
"""
returns the attributes from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('attributes'):
items = json_object['attributes']
attributes = recursive_for_attribute_v2(items)
return attributes
else:
return None
def treat_attribute_members(members):
to_return = ""
for key in members.keys():
if key == "mutable":
if members[key] is True:
to_return += ""
else:
to_return += "{immutable}"
elif key == "required":
if members[key] is True:
to_return += "{required}"
else:
to_return += ""
else:
pass
return [to_return]
def recursive_for_attribute(attributes):
"""
"""
att_http = list()
for key in attributes.keys():
if type(attributes[key]) is dict:
items = recursive_for_attribute(attributes[key])
for item in items:
if not (item.find('{')):
att_http.append(key + item)
else:
att_http.append(key + "." + item)
else:
attributes = treat_attribute_members(attributes)
return attributes
final_att = list()
for item in att_http:
if item.endswith('.'):
final_att.append(item[:-1])
else:
final_att.append(item)
return final_att
def treat_attribute_members_v2(attributes):
to_return = list()
for key in attributes.keys():
to_return.append(key + "=\"" + str(attributes[key]) + "\"")
return to_return
def recursive_for_attribute_v2(attributes):
"""
"""
att_http = list()
for key in attributes.keys():
if type(attributes[key]) is dict:
items = recursive_for_attribute_v2(attributes[key])
for item in items:
att_http.append(key + "." + item)
else:
attributes = treat_attribute_members_v2(attributes)
return attributes
return att_http
if __name__ == '__main__':
print '====== Test ======'
att = """
{
"occi": {
"compute": {
"speed": 2,
"memory": 4,
"cores": 2
}
},
"org": {
"other": {
"occi": {
"my_mixin": {
"my_attribute": "my_value"
}
}
}
}
}
"""
attold = """
{"occi": {
"compute": {
"hostname": {
"mutable": true,
"required": false,
"type": "string",
"pattern": "(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\\\-]*[a-zA-Z0-9])\\\\.)*",
"minimum": "1",
"maximum": "255"
},
"state": {
"mutable": false,
"required": false,
"type": "string",
"pattern": "inactive|active|suspended|failed",
"default": "inactive"
}
}
}}
"""
att_obj = json.loads(att)
res = recursive_for_attribute_v2(att_obj)
# json_mixin = json.loads(mixin)
# res = convert_json_action_to_http_action(json_mixin)
print res
|
Hysucat at the Louis Vuitton America’s Cup World Series in New York, May 7-8, 2016.
Hysucat at the Louis Vuitton America’s Cup World Following the New York regatta, Hysucat will also attend the Louis Vuitton America’s Cup World Series in Chicago 10-12 June with four 8.5 RIB chase-boats to follow the foiling, wing-sailed AC45 catamarans on the course. During the New York event five Hysucat support vessels were used to provide the ‘Guest Chaser Experience’ for the AC VIPs, and chase behind the America’s Cup teams as they raced around the course, as well as to support ACEA (America’s Cup Event Authority) team members, including 5 times America’s Cup champion & CEO of the America’s Cup, Russell Coutts. Race officials chose the Hysucat over other RIB’s because the Hysucat is able to keep up with the catamarans in heavy chop while still giving a comfortable, safe ride that leaves little wake.
What Separates Hysucat From Other Hydrofoil Companies?
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Author: AsherYang
Email: 1181830457@qq.com
Date: 2017/7/24
Desc: get weidian token
@see https://wiki.open.weidian.com/guide#145
https://oauth.open.weidian.com/token?grant_type=client_credential&appkey=xxx&secret=xxx
必须为get 请求
"""
import json
import time
import DbUtil
import OpenRequest
import TokenConstant
from Token import Token
# import ssl
# get_token_url = "https://oauth.open.weidian.com/token?grant_type=client_credential&appkey=" + TokenConstant.appkey + "&secret=" + TokenConstant.secret
# ssl._create_default_https_context = ssl._create_unverified_context
# 服务型URL
# get_token_url = '%s/oauth2/access_token' % TokenConstant.domain
# 自用型
get_token_url = '%s/token' % TokenConstant.domain
# ====== get 方式 20170907 之前版本,由于微店更改规则,导致直接获取无效,以下面方式模仿浏览器行为 =========
# def getTokenFromNet():
# # request 封装
# request = urllib2.Request(url=get_token_url)
# # 发起请求
# html = urllib2.urlopen(request)
# response_data = html.read()
# print response_data
# jsonToken = json.loads(response_data)
# access_token = jsonToken['result']['access_token']
# expire_in = jsonToken['result']['expire_in']
# token = Token()
# token.access_token = access_token
# token.expire_in = expire_in
# return token
def getTokenFromNet():
params = {"appkey": TokenConstant.appkey, "secret": TokenConstant.secret, "grant_type": "client_credential"}
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
}
body = OpenRequest.http_get(get_token_url, params=params, header=header)
print body
jsonToken = json.loads(body)
access_token = jsonToken['result']['access_token']
expire_in = jsonToken['result']['expire_in']
token = Token()
token.access_token = access_token
token.expire_in = expire_in
return token
# ====== get token from db ========
def getTokenFromDb():
query = "select * from sh_token where update_time = (select max(update_time) from sh_token)"
token = Token()
results = DbUtil.query(query)
# print results
if results is None:
return None
for row in results:
row_id = row[0]
access_token = row[1]
expire_in = row[2]
update_time = row[3]
token.access_token = access_token
token.expire_in = expire_in
token.update_time = update_time
# print "row_id = %s, access_token = %s, expire_in = %s, update_time = %s " %(row_id, access_token, expire_in, update_time)
return token
# ====== save token to db =======
def saveToDb(token=None, expire_in=None):
if token is None or expire_in is None:
print "token is %s, expire_in is %s" % (token, expire_in)
return
else:
# locattime = time.asctime(time.localtime(time.time()))
# current_milli_time = lambda: int(round(time.time() * 1000))
# print locattime
# print current_milli_time()
locattime = int(time.time())
insert = 'insert into sh_token (access_token, expire_in, update_time) values("%s", "%s", "%s")' %(token, expire_in, locattime)
DbUtil.insert(insert)
def doGetToken():
dbToken = getTokenFromDb()
currentTime = int(time.time())
if dbToken is None:
netToken = getTokenFromNet()
saveToDb(netToken.access_token, netToken.expire_in)
print "ok , update token from net success, when dbToken is null. "
return netToken.access_token
if currentTime >= int(dbToken.update_time) + int(dbToken.expire_in):
print "currentTime = %s , update_time = %s " % (currentTime, dbToken.update_time)
# expired
netToken = getTokenFromNet()
saveToDb(netToken.access_token, netToken.expire_in)
print "ok , update token from net success. "
return netToken.access_token
else:
print "ok , token in date. "
return dbToken.access_token
if __name__ == '__main__':
doGetToken()
|
Baker Donelson announced that Robert C. Divine has been recognized in EB-5 Investors Magazine's list of "Top Attorneys in Specialized Fields." This list of distinguished attorneys are recognized in specialized EB-5 practice groups, which focus on EB-5 policy and lobbying matters, complex cases and immigration compliance.
This is Mr. Divine's sixth consecutive year included on the "Top Attorneys" lists, previously recognized among the "Top 25 Immigration Attorneys" and "Top EB-5 Attorneys."
Mr. Divine is the leader of Baker Donelson's Immigration Group and a shareholder in the firm's Washington, D.C. and Chattanooga offices. He has experience serving clients throughout the world in the arrangement of all types of business-based temporary and permanent immigration status. Mr. Divine represents EB-5 developers, regional centers, and individual foreign investors, balancing immigration and securities considerations, and litigating when necessary. Mr. Divine is an active member of Invest in the USA (IIUSA), a national non-profit industry trade association for the EB-5 Regional Center Program, and served as the vice president of the IIUSA from 2010-2017. In 2018, Mr. Divine was recognized at the IIUSA's EB-5 Industry Achievement Awards with the "EB-5 Hero" award and the "Industry Thought Leader" award for his service and contributions to the industry.
From 2004 through 2006, Mr. Divine served as chief counsel of United States Citizenship and Immigration Services and also served as acting director and acting deputy director of USCIS between 2005 and 2006, spearheading the USCIS Transformation Program, testifying in Congress about the E-Verify system, enhancing operational security, and increasing transparency of rules and procedures. He is a frequent speaker on U.S. and international immigration rules, policies and procedures and has authored Immigration Practice since 1994.
EB-5 Investors Magazine provides a platform for skilled EB-5 professionals to discuss pressing matters and keep readers up to date on the constantly changing law and legislation pertaining to EB-5. Honorees in the EB-5 "Top Attorney" lists are selected through an EB-5 industry-wide vote and input and analysis from the magazine's editorial board and in-house team. Candidates are evaluated based on their experience in the EB-5 industry as well as their track record and reputation within the field.
|
"""Spyse Tkinter UI agent module"""
from spyse.core.agents.ui import UIAgent
from spyse.core.content.content import ACLMessage
from Tkinter import *
class TkinterAgent(UIAgent):
"""A Tkinter UI agent"""
# http://www.python.org/topics/tkinter/
# the next two lines of code need to be commented out for epydoc to work
__root = Tk()
__root.withdraw()
def __init__(self, name, mts, **namedargs):
super(TkinterAgent, self).__init__(name, mts, **namedargs)
#self.__root = Tk()
# if not hasattr(self, '__root'):
# self.__root = Tk()
# self.__root.title("Root")
#self.__root.withdraw() # won't need this
#self.__root.mainloop()
# setattr(self, '__root', Tk())
self.top = Toplevel(self.__root)
self.top.title(name)
#self.top.protocol("WM_DELETE_WINDOW", self.top.destroy)
self.create_widgets(self.top)
def make_Tk(root):
root.title("Spyse Agent Management System")
title_label = Label(root)
title_label["text"]="Spyse - Agent Management System"
title_label.pack()
quit_button = Button(root, command=quit) # doesn't work
quit_button["text"]="Quit"
quit_button.pack()
#root.withdraw()
def create_widgets(self, frame):
# Override
pass
def take_down(self):
print "destroying toplevel"
#self.top.destroy()
# TODO: is there a better way to kill windows ???
@classmethod
def run_GUI(cls):
cls.__root.mainloop()
# run_GUI = classmethod(run_GUI)
|
OK, this is totally being bookmarked for my next NY trip. The only thing NY doesn't have, honestly, is a really, really good burrito. If you have that craving, grab a flight to San Francisco, and go to any place within 2 blocks of anywhere. Mmmmmm.
I've only heard how good California burritos are. Never had the opportunity to try one. I've spent such a small amount of time on the West Coast US.
But I can say, The Hat, also called El Sombero in the East Village has amazing margaritas. You definitely have to go for their standard, tequila and lime, not frozen. It comes by the pitcher, and after drinking a few glasses, I do seem to remember the burritos being really good.
Nice !! I love getting the run down of where people like to eat. Thanks!
|
# -*- coding: utf-8 -*-
"""
jbmst_search_java.py
Created on 2013/06/28
Copyright (C) 2011-2013 Nippon Telegraph and Telephone Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Java search process.
Extension to search for the file processing of java.
Do not search for the comment text.
If only Search Keyword1, and returns the results of the search in Search Keyword1.
If the Search Keyword2 is also present, and returns the results to find the search file again by the Search Keyword2.
[Environment] Python 2.7
"""
import re, sys,os
SINGLE_COMMENT = "SINGLE_COMMENT"
MULTI_COMMENT = "MULTI_COMMENT"
MULTI_COMMENT_END = "MULTI_COMMENT_END"
JAVA_SOURCE = "JAVA_SOURCE"
"""
Check single comment, multi comment, whether the source is searched record,
and returns a status corresponding to the type of statement.
@param pLine:Record to search for files
@retutn Type of sentence of one line to search for file
"""
def isSingleComment(pLine,LINE_HEAD_COMMENT_STR = "//"):
JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT =None
m = re.search("^\s*"+LINE_HEAD_COMMENT_STR,pLine)
if m:
return SINGLE_COMMENT,JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT
else:
#support end of line comment
m = re.search("(\s*\w*)"+LINE_HEAD_COMMENT_STR,pLine)
if m:
m = re.search("[^"+LINE_HEAD_COMMENT_STR+"]*",pLine)
if m != None:
JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT = m.group()
m = re.search("^\s*/\*",pLine)
if m:
m = re.search("\*/\s*$",pLine)
if m:
return SINGLE_COMMENT,JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT
else:
return MULTI_COMMENT,JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT
else:
#support end of line comment
m = re.search("(\s*\w*)/\*.*\*/$",pLine)
if m:
result = m.group()
if result != None:
index = len(result)
JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT =pLine[:-index]
return JAVA_SOURCE,JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT
"""
Search records it is determined whether the end of the multi comment statement.
@param pLine:Record to search for files
@retutn Type of sentence of one line to search for file
"""
def isMultiCommentEnd(pLine):
m = re.search("\*/\s*$",pLine)
if m:
return MULTI_COMMENT_END
return MULTI_COMMENT
"""
Function is not using
@param pSeachKey
@param pLine
@return
"""
def searchByLine(pSeachKey,pLine):
m = re.search(pSeachKey,pLine)
if m:
return "FOUND"
return "NOT_FOUND"
"""
If this is not the comment text, to search for in the Search Keyword1 or Search Keyword2.
Set the search list the corresponding line number of the line that matches the above.
@param pSearchFile File to be searched
@param pSearchStr Search Keyword1 or Search Keyword2
@return List of search corresponding line
"""
def search_open_file(pSearchFile,pSearchStr,isFirstMatchExit=False,LINE_HEAD_COMMENT_STR = "//",isSemicolonParser=False,pSearchStr2="",pFlag=0):
current_line_status = "NONE"
line_count = 0
line_count_list = []
searchTargetBody = ""
searchTargetBodyIncludedComment= ""
# Open the search files
f = open(pSearchFile, "r")
for line in f:
searchTargetBodyIncludedComment += line
line_count += 1
# Determine the type of sentence
line_status ,JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT= isSingleComment(line,LINE_HEAD_COMMENT_STR)
# Distributes the processing according to the type of sentence
if ( current_line_status == MULTI_COMMENT):
# If multi-sentence comment
if (isMultiCommentEnd(line) == MULTI_COMMENT_END):
# If the multi-comment statement is completed
current_line_status = JAVA_SOURCE
else:
if (line_status == JAVA_SOURCE):
# If this is not the comment text
# suuport end of line comment
if JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT != None:
line = JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT
#セミコロンまでをひとつのトークンとして検索を行う
if isSemicolonParser == True:
searchTargetBody += line
if hasEndSemicolon(searchTargetBody) == True:
find_result = findByKeywords(pSearchStr,pSearchStr2,LINE_HEAD_COMMENT_STR,searchTargetBody,searchTargetBodyIncludedComment.rstrip(),line_count,pFlag)
line_count_list += find_result
searchTargetBodyIncludedComment = ""
searchTargetBody = ""
else:
m = findAll(pSearchStr,line,pFlag)
if m:
for hit in m:
line_count_list.append(line_count)
if isFirstMatchExit == True:
f.close()
return line_count_list
current_line_status = line_status
f.close()
return line_count_list
def findAll(pSearchStr,pLine,pFlag=0):
return re.findall(pSearchStr,pLine,pFlag)
def hasEndSemicolon(pTarget):
if re.search(".*;\s*$",pTarget):
return True
return False
def hasEndBackSlash(pTarget):
if re.search(".*\\s*$",pTarget):
return True
return False
# def getIndexBaseEndofLine(body,match):
# print 'body:',body
# tokens = body.split(';')
# if len(tokens) != 0:
# if not match.end() +1 > len(body):
# match_after_line = body[match.end()+1:]
# print 'match_after_line' ,match_after_line
# m = match_after_line.split(';')
# if m:
# return m[0].count('\n')
# else:
# return 0
def getMatch(pSearchStr2,append_line):
match = re.finditer(pSearchStr2,append_line)
return len(match),match
def findByKeywords(pSearchStr1,pSearchStr2,LINE_HEAD_COMMENT_STR,pSearchTarget,pSearchTargetIncludedComment,pline_count,pFlag=0):
result_list = []
#print pSearchTarget
#print pSearchStr1
# コメントを除去したものを対象がヒットしない場合は処理しない
m= re.findall(pSearchStr1,pSearchTarget.replace('\n',''),pFlag)
if len(m) == 0:
return result_list
if pSearchStr2 == "":
searchKey =pSearchStr1
else:
searchKey =pSearchStr2
lines = pSearchTargetIncludedComment.split('\n')
line_length = len(lines)
line_count = 0
current_line_status = "NONE"
firstMatch = False
append_line = ""
match_len = 0
for line in lines:
line_count += 1
line_status ,JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT= isSingleComment(line,LINE_HEAD_COMMENT_STR)
if current_line_status == MULTI_COMMENT:
# If multi-sentence comment
if isMultiCommentEnd(line) == MULTI_COMMENT_END:
# If the multi-comment statement is completed
current_line_status = JAVA_SOURCE
else:
if line_status == JAVA_SOURCE:
# If this is not the comment text
# suuport end of line comment
if JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT != None:
line = JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT
append_line += line
if firstMatch == False:
match = re.finditer(searchKey,append_line,pFlag)
i = 0
for m in match:
result_list.append(pline_count - (line_length -line_count))
i += 1
firstMatch = True
if i !=0:
match_len = i
else:
match = re.finditer(searchKey,append_line,pFlag)
i = 0
for m in match:
if i >= match_len:
result_list.append(pline_count - (line_length -line_count))
i = i + 1
if i > 0:
match_len = i
current_line_status = line_status
return result_list
def searchInterfaceMethod(pSearchFile,LINE_HEAD_COMMENT_STR="//"):
current_line_status = "NONE"
line_count = 0
methodname_list = []
# Open the search files
f = open(pSearchFile, "r")
for line in f:
line_count += 1
# Determine the type of sentence
line_status,JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT= isSingleComment(line,LINE_HEAD_COMMENT_STR)
# Distributes the processing according to the type of sentence
if ( current_line_status == MULTI_COMMENT):
# If multi-sentence comment
if (isMultiCommentEnd(line) == MULTI_COMMENT_END):
# If the multi-comment statement is completed
current_line_status = JAVA_SOURCE
else:
if (line_status == JAVA_SOURCE):
# If this is not the comment text
# suuport end of line comment
if JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT != None:
line = JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT
m = re.search("^(?!.*\s+(static|new)\s+).*$",line)
if m != None:
m =re.search("\w+\s+(\w+)\s*\(.*",line)
if m:
method_name=m.group(1)
methodname_list.append(method_name)
f.close()
return methodname_list
"""
If only Search Keyword1, and returns the results of the search in Search Keyword1.
If the Search Keyword2 is also present, and returns the results to find the search file again by the Search Keyword2.
@param pSearchFile File to be searched
@param pSearchStr1 Search Keyword1
@param pSearchStr2 Search Keyword2
@retutn List of lines that hit the search
"""
def searchByFile(pSearchFile,pSearchStr1,pSearchStr2,isFirstMatchExit=False,LINE_HEAD_COMMENT_STR = "//",IS_SEMICOLON_PARSER=False,FLAG=0):
result_hit_count_list = []
if pSearchStr2 != "" and IS_SEMICOLON_PARSER == True:
#SEMICOLON_PARSERの場合のみ、そのまま、第2キワードで検索を実施する。
return search_open_file(pSearchFile,pSearchStr1,True,LINE_HEAD_COMMENT_STR,IS_SEMICOLON_PARSER,pSearchStr2,FLAG)
else:
result_hit_count_list = search_open_file(pSearchFile,pSearchStr1,False,LINE_HEAD_COMMENT_STR,IS_SEMICOLON_PARSER,"",FLAG)
hit_total_cnt = len(result_hit_count_list)
if hit_total_cnt!= 0 and pSearchStr2 != "":
result_hit_count_list = search_open_file(pSearchFile,pSearchStr2,isFirstMatchExit,LINE_HEAD_COMMENT_STR,IS_SEMICOLON_PARSER,"",FLAG)
return result_hit_count_list
def wrapSearchByFile(param):
try:
return (searchByFile(*param),param[0])
except Exception,e:
raise Exception, '%s , searchTargetFile = %s' % (e,param[0])
def wrapSearchOpenFile(param):
try:
return (search_open_file(*param),param[0])
except Exception,e:
raise Exception, '%s , searchTargetFile = %s' % (e,param[0])
|
Note: If watch is turned over and band is bent in opposite direction towards face it can crack. The band should only be bent down.
Black resin case with polished top ring and printed graphics and Chinese movement.
|
# Copyright 2001 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Maximum Entropy code.
Uses Improved Iterative Scaling:
XXX ref
# XXX need to define terminology
"""
import math
from Numeric import *
from Bio import listfns
# XXX typecodes for Numeric
# XXX multiprocessor
MAX_IIS_ITERATIONS = 10000 # Maximum iterations for IIS.
IIS_CONVERGE = 1E-5 # Convergence criteria for IIS.
MAX_NEWTON_ITERATIONS = 100 # Maximum iterations on Newton's method.
NEWTON_CONVERGE = 1E-10 # Convergence criteria for Newton's method.
class MaxEntropy:
"""Holds information for a Maximum Entropy classifier.
Members:
classes List of the possible classes of data.
alphas List of the weights for each feature.
feature_fns List of the feature functions.
"""
def __init__(self):
self.classes = []
self.alphas = []
self.feature_fns = []
def calculate(me, observation):
"""calculate(me, observation) -> list of log probs
Calculate the log of the probability for each class. me is a
MaxEntropy object that has been trained. observation is a vector
representing the observed data. The return value is a list of
unnormalized log probabilities for each class.
"""
scores = []
for klass in range(len(me.classes)):
lprob = 0.0
for fn, alpha in map(None, me.feature_fns, me.alphas):
lprob += fn(observation, klass) * alpha
scores.append(lprob)
return scores
def classify(me, observation):
"""classify(me, observation) -> class
Classify an observation into a class.
"""
scores = calculate(me, observation)
max_score, klass = scores[0], me.classes[0]
for i in range(1, len(scores)):
if scores[i] > max_score:
max_score, klass = scores[i], me.classes[i]
return klass
def _eval_feature_fn(fn, xs, classes):
"""_eval_feature_fn(fn, xs, classes) -> dict of values
Evaluate a feature function on every instance of the training set
and class. fn is a callback function that takes two parameters: a
training instance and a class. Return a dictionary of (training
set index, class index) -> non-zero value. Values of 0 are not
stored in the dictionary.
"""
values = {}
for i in range(len(xs)):
for j in range(len(classes)):
f = fn(xs[i], classes[j])
if f != 0:
values[(i, j)] = f
return values
def _calc_empirical_expects(xs, ys, classes, features):
"""_calc_empirical_expects(xs, ys, classes, features) -> list of expectations
Calculate the expectation of each function from the data. This is
the constraint for the maximum entropy distribution. Return a
list of expectations, parallel to the list of features.
"""
# E[f_i] = SUM_x,y P(x, y) f(x, y)
# = 1/N f(x, y)
class2index = listfns.itemindex(classes)
ys_i = [class2index[y] for y in ys]
expect = []
N = len(xs)
for feature in features:
s = 0
for i in range(N):
s += feature.get((i, ys_i[i]), 0)
expect.append(float(s) / N)
return expect
def _calc_model_expects(xs, classes, features, alphas):
"""_calc_model_expects(xs, classes, features, alphas) -> list of expectations.
Calculate the expectation of each feature from the model. This is
not used in maximum entropy training, but provides a good function
for debugging.
"""
# SUM_X P(x) SUM_Y P(Y|X) F(X, Y)
# = 1/N SUM_X SUM_Y P(Y|X) F(X, Y)
p_yx = _calc_p_class_given_x(xs, classes, features, alphas)
expects = []
for feature in features:
sum = 0.0
for (i, j), f in feature.items():
sum += p_yx[i][j] * f
expects.append(sum/len(xs))
return expects
def _calc_p_class_given_x(xs, classes, features, alphas):
"""_calc_p_class_given_x(xs, classes, features, alphas) -> matrix
Calculate P(y|x), where y is the class and x is an instance from
the training set. Return a XSxCLASSES matrix of probabilities.
"""
prob_yx = zeros((len(xs), len(classes)), Float32)
# Calculate log P(y, x).
for feature, alpha in map(None, features, alphas):
for (x, y), f in feature.items():
prob_yx[x][y] += alpha * f
# Take an exponent to get P(y, x)
prob_yx = exp(prob_yx)
# Divide out the probability over each class, so we get P(y|x).
for i in range(len(xs)):
z = sum(prob_yx[i])
prob_yx[i] = prob_yx[i] / z
#prob_yx = []
#for i in range(len(xs)):
# z = 0.0 # Normalization factor for this x, over all classes.
# probs = [0.0] * len(classes)
# for j in range(len(classes)):
# log_p = 0.0 # log of the probability of f(x, y)
# for k in range(len(features)):
# log_p += alphas[k] * features[k].get((i, j), 0.0)
# probs[j] = math.exp(log_p)
# z += probs[j]
# # Normalize the probabilities for this x.
# probs = map(lambda x, z=z: x/z, probs)
# prob_yx.append(probs)
return prob_yx
def _calc_f_sharp(N, nclasses, features):
"""_calc_f_sharp(N, nclasses, features) -> matrix of f sharp values."""
# f#(x, y) = SUM_i feature(x, y)
f_sharp = zeros((N, nclasses))
for feature in features:
for (i, j), f in feature.items():
f_sharp[i][j] += f
return f_sharp
def _iis_solve_delta(N, feature, f_sharp, empirical, prob_yx):
# Solve delta using Newton's method for:
# SUM_x P(x) * SUM_c P(c|x) f_i(x, c) e^[delta_i * f#(x, c)] = 0
delta = 0.0
iters = 0
while iters < MAX_NEWTON_ITERATIONS: # iterate for Newton's method
f_newton = df_newton = 0.0 # evaluate the function and derivative
for (i, j), f in feature.items():
prod = prob_yx[i][j] * f * math.exp(delta * f_sharp[i][j])
f_newton += prod
df_newton += prod * f_sharp[i][j]
f_newton, df_newton = empirical - f_newton / N, -df_newton / N
ratio = f_newton / df_newton
delta -= ratio
if math.fabs(ratio) < NEWTON_CONVERGE: # converged
break
iters = iters + 1
else:
raise "Newton's method did not converge"
return delta
def _train_iis(xs, classes, features, f_sharp, alphas, e_empirical):
# Do one iteration of hill climbing to find better alphas.
# This is a good function to parallelize.
# Pre-calculate P(y|x)
p_yx = _calc_p_class_given_x(xs, classes, features, alphas)
N = len(xs)
newalphas = alphas[:]
for i in range(len(alphas)):
delta = _iis_solve_delta(N, features[i], f_sharp, e_empirical[i], p_yx)
newalphas[i] += delta
return newalphas
def train(training_set, results, feature_fns, update_fn=None):
"""train(training_set, results, feature_fns[, update_fn]) -> MaxEntropy object
Train a maximum entropy classifier on a training set.
training_set is a list of observations. results is a list of the
class assignments for each observation. feature_fns is a list of
the features. These are callback functions that take an
observation and class and return a 1 or 0. update_fn is a
callback function that's called at each training iteration. It is
passed a MaxEntropy object that encapsulates the current state of
the training.
"""
if not len(training_set):
raise ValueError, "No data in the training set."
if len(training_set) != len(results):
raise ValueError, "training_set and results should be parallel lists."
# Rename variables for convenience.
xs, ys = training_set, results
# Get a list of all the classes that need to be trained.
classes = listfns.items(results)
classes.sort()
# Cache values for all features.
features = [_eval_feature_fn(fn, training_set, classes)
for fn in feature_fns]
# Cache values for f#.
f_sharp = _calc_f_sharp(len(training_set), len(classes), features)
# Pre-calculate the empirical expectations of the features.
e_empirical = _calc_empirical_expects(xs, ys, classes, features)
# Now train the alpha parameters to weigh each feature.
alphas = [0.0] * len(features)
iters = 0
while iters < MAX_IIS_ITERATIONS:
nalphas = _train_iis(xs, classes, features, f_sharp,
alphas, e_empirical)
diff = map(lambda x, y: math.fabs(x-y), alphas, nalphas)
diff = reduce(lambda x, y: x+y, diff, 0)
alphas = nalphas
me = MaxEntropy()
me.alphas, me.classes, me.feature_fns = alphas, classes, feature_fns
if update_fn is not None:
update_fn(me)
if diff < IIS_CONVERGE: # converged
break
else:
raise "IIS did not converge"
return me
|
Scientists have shown just how mind-bogglingly complex are the genetics underpinning the development of cancer.
For the second time a team from Washington University has decoded the complete DNA of a patient with a form of leukaemia.
But the suite of key genetic mutations they found were completely different from those uncovered following analysis of their first patient last year.
The latest study does reveal some potentially significant findings.
One of the new mutations found in the second patient was also found in samples taken from 15 other patients with the same disease, acute myeloid leukaemia (AML).
The same mutation is also thought to play a role in the development of a type of brain tumour called a glioma.
A second new mutation was also found in another AML patient.
By using a state-of-art gene sequencing technique, the Washington team became the first to decode the entire genome of a cancer patient last year.
Once they have the full menu of DNA from cancer cells, the researchers can compare it with DNA from healthy cells to pinpoint genetic mutations which probably play a key role in the development of the disease.
The hope is that armed with this information scientists will be able to develop new drugs to target cancer.
But lead researcher Dr Elaine Mardis said: "Only by sequencing thousands of cancer genomes are we going to find and make sense of the complex web of genetic mutations and the altered molecular pathways in this disease.
"What we find may lead us to completely restructure the way we define tumour types and subtypes."
Her colleague Dr Timothy Ley said: "Currently, we don't have great information about how patients with this particular subtype of AML will respond to treatment, so most of them are treated similarly up front.
"By defining the mutations that cause AML in different people, we hope to determine which patients need aggressive treatment, and which can be treated effectively with less intense therapies."
The patient in the latest study was a 38-year-old man who had been in remission for three years.
Analysis revealed 64 genetic mutations which were most likely to play a role in cancer development.
Of these 52 were found in long stretches of DNA that do not contain genes, but which potentially affect how and when neighbouring genes become active.
The researchers compared the results with samples from 187 other AML patients.
They found the same mutation linked to brain tumours in 15 samples, making it one of the most common mutations yet linked to AML.
None of the mutations uncovered from analysis of the first patient was subsequently found in any other AML patient.
Dr Jodie Moffat, Cancer Research UK's senior health information officer, said: "It's exciting that these detailed studies to understand the genetic basis of cancer are now possible due to advances in technology.
"The genetic factors involved in leukaemia are particularly complex, so anything new we can learn is very welcome.
"But further research will be needed before scientists can reveal which parts of the genetic puzzle can actually be used to improve the lives of cancer patients."
|
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Field, Div
from django import forms
from django.core.exceptions import ValidationError
from django.shortcuts import get_object_or_404
from django.utils.formats import date_format
from django.utils.translation import ugettext_lazy
from register.forms import SelectDateOfBirthWidget, MULTIPLE_REGISTRATION_ERROR
from register.models import Bicycle, Candidate
class CreateCandidateForm(forms.ModelForm):
class Meta:
model = Candidate
fields = ('first_name', 'last_name', 'date_of_birth')
labels = {'first_name': ugettext_lazy('First name'),
'last_name': ugettext_lazy('Last name'),
'date_of_birth': ugettext_lazy('Date of birth')}
widgets = {'date_of_birth': SelectDateOfBirthWidget}
def __init__(self, *args, **kwargs):
super(CreateCandidateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(Div(Div(Field('first_name'),
Field('last_name'),
Field('date_of_birth'),
css_class="col-xs-12 col-md-8"),
css_class="form-group row"))
self.helper.add_input(Submit('submit', 'Submit',
css_class='col-xs-3 btn-info'))
def clean(self):
cleaned_data = super(CreateCandidateForm, self).clean()
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
date_of_birth = cleaned_data.get('date_of_birth')
if Candidate.get_matching(first_name=first_name,
last_name=last_name,
date_of_birth=date_of_birth):
raise ValidationError(MULTIPLE_REGISTRATION_ERROR)
return cleaned_data
def get_hidden_field(name, var):
def trunk(var):
if var:
return var
return ""
return [Field(name, type='hidden', value=trunk(var))]
def get_hidden_fields(candidate_id, event_id, bicycle_id):
return (get_hidden_field('candidate_id', candidate_id) +
get_hidden_field('event_id', event_id) +
get_hidden_field('bicycle_id', bicycle_id))
class DeleteCandidateForm(forms.Form):
event_id = forms.IntegerField(min_value=0, required=False)
bicycle_id = forms.IntegerField(min_value=0, required=False)
candidate_id = forms.IntegerField(min_value=0)
def __init__(self, candidate_id=None, event_id=None, bicycle_id=None,
*args, **kwargs):
super(DeleteCandidateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(*get_hidden_fields(
candidate_id, event_id, bicycle_id))
self.helper.add_input(Submit('submit', 'Delete Candidate',
css_class='col-xs-3 btn-info'))
class InviteCandidateForm(forms.Form):
event_id = forms.IntegerField(min_value=0, required=False)
candidate_id = forms.IntegerField(min_value=0)
invitation_event_id = forms.IntegerField(min_value=0)
def __init__(self, candidate_id=None, event_id=None,
bicycle_id=None, # pylint: disable=unused-argument
*args, **kwargs):
super(InviteCandidateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
if candidate_id:
candidate = get_object_or_404(Candidate, id=candidate_id)
event_choices = [(event.id, date_format(event.due_date,
'DATETIME_FORMAT'))
for event in candidate.events_not_invited_to]
self.fields['invitation_event_id'] = forms.ChoiceField(
choices=event_choices)
layout = [Field('invitation_event_id')]
layout += get_hidden_field('candidate_id', candidate_id)
layout += get_hidden_field('event_id', event_id)
self.helper.layout = Layout(*layout)
self.helper.form_show_labels = False
self.helper.add_input(Submit('submit', 'Submit',
css_class='col-xs-3 btn-info'))
class ModifyCandidateForm(forms.ModelForm):
event_id = forms.IntegerField(min_value=0, required=False)
bicycle_id = forms.IntegerField(min_value=0, required=False)
candidate_id = forms.IntegerField(min_value=0)
class Meta:
model = Candidate
fields = ('first_name', 'last_name', 'date_of_birth',
'event_id', 'bicycle_id', 'candidate_id')
labels = {'first_name': ugettext_lazy('First name'),
'last_name': ugettext_lazy('Last name'),
'date_of_birth': ugettext_lazy('Date of birth')}
widgets = {'date_of_birth': SelectDateOfBirthWidget}
def __init__(self, candidate_id=None, event_id=None, bicycle_id=None,
*args, **kwargs):
super(ModifyCandidateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
if candidate_id:
candidate = get_object_or_404(Candidate, id=candidate_id)
self.initial = {'first_name': candidate.first_name,
'last_name': candidate.last_name,
'date_of_birth': candidate.date_of_birth}
layout = [Layout(Div(Div(Field('first_name'),
Field('last_name'),
Field('date_of_birth'),
css_class="col-xs-12 col-md-8"),
css_class="form-group row"))]
layout += get_hidden_fields(candidate_id, event_id, bicycle_id)
self.helper.layout = Layout(*layout)
self.helper.add_input(Submit('submit', 'Submit',
css_class='col-xs-3 btn-info'))
def clean(self):
cleaned_data = super(ModifyCandidateForm, self).clean()
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
date_of_birth = cleaned_data.get('date_of_birth')
candidate_id = cleaned_data['candidate_id']
if Candidate.get_matching(
first_name=first_name,
last_name=last_name,
date_of_birth=date_of_birth).exclude(id=candidate_id):
raise ValidationError(MULTIPLE_REGISTRATION_ERROR)
return cleaned_data
class RefundForm(forms.Form):
event_id = forms.IntegerField(min_value=0, required=False)
bicycle_id = forms.IntegerField(min_value=0, required=False)
candidate_id = forms.IntegerField(min_value=0)
def __init__(self, candidate_id=None, event_id=None, bicycle_id=None,
*args, **kwargs):
super(RefundForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(*get_hidden_fields(
candidate_id, event_id, bicycle_id))
self.helper.add_input(Submit('submit', 'Refund bicycle',
css_class='col-xs-3 btn-info'))
class HandoverForm(forms.ModelForm):
event_id = forms.IntegerField(min_value=0, required=False)
bicycle_id = forms.IntegerField(min_value=0, required=False)
candidate_id = forms.IntegerField(min_value=0)
class Meta:
model = Bicycle
fields = ['bicycle_number', 'general_remarks', 'lock_combination',
'color', 'brand', 'event_id', 'candidate_id', 'bicycle_id']
def __init__(self, candidate_id=None, event_id=None, bicycle_id=None,
*args, **kwargs):
super(HandoverForm, self).__init__(*args, **kwargs)
self.fields['general_remarks'].required = False
self.helper = FormHelper()
self.helper.form_method = 'post'
layout = [Field('bicycle_number'),
Field('lock_combination'),
Field('color'),
Field('brand')]
layout += get_hidden_fields(candidate_id, event_id, bicycle_id)
layout += ['general_remarks']
self.helper.layout = Layout(*layout)
self.helper.add_input(Submit('submit', 'Submit',
css_class='col-xs-3 btn-info'))
class EventForm(forms.Form):
due_date = forms.DateTimeField(input_formats=['%d.%m.%Y %H:%M',
'%m/%d/%Y %I:%M %p'])
class InviteForm(forms.Form):
event_id = forms.IntegerField(min_value=0)
choice_1 = forms.IntegerField(min_value=0)
choice_2 = forms.IntegerField(min_value=0)
choice_3 = forms.IntegerField(min_value=0)
choice_4 = forms.IntegerField(min_value=0)
|
Parsons Audio in Wellesley, Mass., announces that this year’s Parsons Audio Expo will take place on Thursday, November 8, 2012, at the Holiday Inn Boston Dedham. The annual event focuses on education and emerging trends within the professional audio industry. The Expo will run from 10 a.m. to 6 p.m. and is free and open to the public.
Scheduled speakers include Grammy Award–winning engineers Bob Ludwig, Frank Filipetti and Adam Ayan, as well as Kevin Killen (U2, Elvis Costello), Buford Jones (Eric Clapton, David Bowie, Pink Floyd), Susan Rogers (Prince, David Byrne) and others.
Find more information about Parsons Audio Expo 2012 at paudio.com/expo.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test gspaths library."""
from __future__ import print_function
from chromite.lib import cros_test_lib
from chromite.lib.paygen import gspaths
class GsPathsDataTest(cros_test_lib.TestCase):
"""Tests for structs defined in GsPaths."""
def testBuild(self):
default_input = {
'channel': 'foo-channel',
'board': 'board-name',
'version': '1.2.3',
}
default_expected = {
'bucket': None,
'channel': 'foo-channel',
'board': 'board-name',
'version': '1.2.3',
'uri': None,
}
expected_str = ("Build definition (board='board-name',"
" version='1.2.3', channel='foo-channel')")
build = gspaths.Build(default_input)
self.assertEqual(build, default_expected)
self.assertEqual(expected_str, str(build))
class GsPathsChromeosReleasesTest(cros_test_lib.TestCase):
"""Tests for gspaths.ChromeosReleases."""
# Standard Chrome OS releases names.
_CHROMEOS_RELEASES_BUCKET = 'chromeos-releases'
# Google Storage path, image and payload name base templates.
_GS_BUILD_PATH_TEMPLATE = 'gs://%(bucket)s/%(channel)s/%(board)s/%(version)s'
_IMAGE_NAME_TEMPLATE = (
'chromeos_%(image_version)s_%(board)s_%(signed_image_type)s_'
'%(image_channel)s_%(key)s.bin')
_UNSIGNED_IMAGE_ARCHIVE_NAME_TEMPLATE = (
'ChromeOS-%(unsigned_image_type)s-%(milestone)s-%(image_version)s-'
'%(board)s.tar.xz')
_FULL_PAYLOAD_NAME_TEMPLATE = (
'chromeos_%(image_version)s_%(board)s_%(image_channel)s_full_%(key)s.bin-'
'%(random_str)s.signed')
_DELTA_PAYLOAD_NAME_TEMPLATE = (
'chromeos_%(src_version)s-%(image_version)s_%(board)s_%(image_channel)s_'
'delta_%(key)s.bin-%(random_str)s.signed')
_FULL_DLC_PAYLOAD_NAME_TEMPLATE = (
'dlc_%(dlc_id)s_%(dlc_package)s_%(image_version)s_%(board)s_'
'%(image_channel)s_full.bin-%(random_str)s.signed')
_UNSIGNED_FULL_PAYLOAD_NAME_TEMPLATE = (
'chromeos_%(image_version)s_%(board)s_%(image_channel)s_full_'
'%(unsigned_image_type)s.bin-%(random_str)s')
_UNSIGNED_DELTA_PAYLOAD_NAME_TEMPLATE = (
'chromeos_%(src_version)s-%(image_version)s_%(board)s_%(image_channel)s_'
'delta_%(unsigned_image_type)s.bin-%(random_str)s')
# Compound templates.
_GS_IMAGE_PATH_TEMPLATE = '/'.join(
(_GS_BUILD_PATH_TEMPLATE, _IMAGE_NAME_TEMPLATE))
_GS_UNSIGNED_IMAGE_ARCHIVE_PATH_TEMPLATE = '/'.join(
(_GS_BUILD_PATH_TEMPLATE, _UNSIGNED_IMAGE_ARCHIVE_NAME_TEMPLATE))
_GS_PAYLOADS_PATH_TEMPLATE = '/'.join((_GS_BUILD_PATH_TEMPLATE, 'payloads'))
_GS_PAYLOADS_SIGNING_PATH_TEMPLATE = '/'.join((_GS_BUILD_PATH_TEMPLATE,
'payloads', 'signing'))
_GS_FULL_PAYLOAD_PATH_TEMPLATE = '/'.join(
(_GS_PAYLOADS_PATH_TEMPLATE, _FULL_PAYLOAD_NAME_TEMPLATE))
_GS_DELTA_PAYLOAD_PATH_TEMPLATE = '/'.join(
(_GS_PAYLOADS_PATH_TEMPLATE, _DELTA_PAYLOAD_NAME_TEMPLATE))
_GS_FULL_DLC_PAYLOAD_PATH_TEMPLATE = '/'.join(
(_GS_PAYLOADS_PATH_TEMPLATE, 'dlc', '%(dlc_id)s', '%(dlc_package)s',
_FULL_DLC_PAYLOAD_NAME_TEMPLATE))
def setUp(self):
# Shared attributes (signed + unsigned images).
self.bucket = 'crt'
self.channel = 'foo-channel'
self.board = 'board-name'
self.version = '1.2.3'
self.build = gspaths.Build(bucket=self.bucket, channel=self.channel,
board=self.board, version=self.version)
self.release_build = gspaths.Build(bucket=self._CHROMEOS_RELEASES_BUCKET,
channel=self.channel, board=self.board,
version=self.version)
# Attributes for DLC.
self.dlc_id = 'dummy-dlc'
self.dlc_package = 'dummy-package'
# Signed image attributes.
self.key = 'mp-v3'
self.signed_image_type = 'base'
# Unsigned (test) image attributes.
self.milestone = 'R12'
self.unsigned_image_type = 'test'
# Attributes used for payload testing.
self.src_version = '1.1.1'
self.random_str = '1234567890'
self.src_build = gspaths.Build(bucket=self.bucket, channel=self.channel,
board=self.board, version=self.src_version)
# Dictionaries for populating templates.
self.image_attrs = dict(
bucket=self.bucket,
channel=self.channel,
image_channel=self.channel,
board=self.board,
version=self.version,
image_version=self.version,
key=self.key,
signed_image_type=self.signed_image_type)
self.unsigned_image_archive_attrs = dict(
bucket=self.bucket,
channel=self.channel,
image_channel=self.channel,
board=self.board,
version=self.version,
image_version=self.version,
milestone=self.milestone,
unsigned_image_type=self.unsigned_image_type)
self.all_attrs = dict(self.image_attrs,
src_version=self.src_version,
random_str=self.random_str,
**self.unsigned_image_archive_attrs)
def _Populate(self, template, **kwargs):
"""Populates a template string with override attributes.
This will use the default test attributes to populate a given string
template. It will further override default field values with the values
provided by the optional named arguments.
Args:
template: a string with named substitution fields
kwargs: named attributes to override the defaults
"""
attrs = dict(self.all_attrs, **kwargs)
return template % attrs
def _PopulateGsPath(self, base_path, suffix=None, **kwargs):
"""Populates a Google Storage path template w/ optional suffix.
Args:
base_path: a path string template with named substitution fields
suffix: a path suffix to append to the given base path
kwargs: named attributes to override the defaults
"""
template = base_path
if suffix:
template += '/' + suffix
return self._Populate(template, **kwargs)
def testBuildUri(self):
self.assertEqual(
gspaths.ChromeosReleases.BuildUri(self.build),
self._PopulateGsPath(self._GS_BUILD_PATH_TEMPLATE))
def testBuildPayloadsUri(self):
self.assertEqual(
gspaths.ChromeosReleases.BuildPayloadsUri(self.build),
self._PopulateGsPath(self._GS_PAYLOADS_PATH_TEMPLATE))
def testBuildPayloadsSigningUri(self):
self.assertEqual(
gspaths.ChromeosReleases.BuildPayloadsSigningUri(self.build),
self._PopulateGsPath(self._GS_PAYLOADS_SIGNING_PATH_TEMPLATE))
self.assertEqual(
gspaths.ChromeosReleases.BuildPayloadsFlagUri(
self.build, gspaths.ChromeosReleases.LOCK),
self._PopulateGsPath(self._GS_PAYLOADS_PATH_TEMPLATE,
suffix='LOCK_flag'))
def testImageName(self):
self.assertEqual(
gspaths.ChromeosReleases.ImageName(self.channel,
self.board,
self.version,
self.key,
self.signed_image_type),
self._Populate(self._IMAGE_NAME_TEMPLATE))
def testDLCImageName(self):
self.assertEqual(gspaths.ChromeosReleases.DLCImageName(), 'dlc.img')
def testUnsignedImageArchiveName(self):
self.assertEqual(
gspaths.ChromeosReleases.UnsignedImageArchiveName(
self.board,
self.version,
self.milestone,
self.unsigned_image_type),
self._Populate(self._UNSIGNED_IMAGE_ARCHIVE_NAME_TEMPLATE))
def testImageUri(self):
self.assertEqual(
gspaths.ChromeosReleases.ImageUri(self.build, self.key,
self.signed_image_type),
self._Populate(self._GS_IMAGE_PATH_TEMPLATE))
def testUnsignedImageUri(self):
self.assertEqual(
gspaths.ChromeosReleases.UnsignedImageUri(self.build, self.milestone,
self.unsigned_image_type),
self._Populate(self._GS_UNSIGNED_IMAGE_ARCHIVE_PATH_TEMPLATE))
@staticmethod
def _IncrementVersion(version, inc_amount=1):
version_part = version.rpartition('.')
return '.'.join((version_part[0], str(int(version_part[2]) + inc_amount)))
def testParseImageUri(self):
npo_version = self._IncrementVersion(self.version)
npo_channel = 'nplusone-channel'
basic_dict = dict(self.image_attrs)
npo_dict = dict(self.image_attrs,
bucket=self._CHROMEOS_RELEASES_BUCKET,
image_version=npo_version,
image_channel=npo_channel)
basic_dict['uri'] = uri_basic = self._GS_IMAGE_PATH_TEMPLATE % basic_dict
npo_dict['uri'] = uri_npo = self._GS_IMAGE_PATH_TEMPLATE % npo_dict
expected_basic = gspaths.Image(build=self.build,
image_type=self.signed_image_type,
key=self.key,
uri=uri_basic)
expected_basic_str = gspaths.ChromeosReleases.ImageName(
expected_basic.build.channel, expected_basic.build.board,
expected_basic.build.version, expected_basic.key,
expected_basic.image_type)
expected_npo = gspaths.Image(build=self.release_build,
key=self.key,
image_type=self.signed_image_type,
image_channel=npo_channel,
image_version=npo_version,
uri=uri_npo)
expected_npo_str = gspaths.ChromeosReleases.ImageName(
expected_npo.image_channel, expected_npo.build.board,
expected_npo.image_version, expected_npo.key, expected_npo.image_type)
basic_image = gspaths.ChromeosReleases.ParseImageUri(uri_basic)
self.assertEqual(basic_image, expected_basic)
self.assertEqual(str(basic_image), expected_basic_str)
npo_image = gspaths.ChromeosReleases.ParseImageUri(uri_npo)
self.assertEqual(npo_image, expected_npo)
self.assertEqual(str(npo_image), expected_npo_str)
signer_output = ('gs://chromeos-releases/dev-channel/link/4537.7.0/'
'chromeos_4537.7.1_link_recovery_nplusone-channel_'
'mp-v4.bin.1.payload.hash.update_signer.signed.bin')
bad_image = gspaths.ChromeosReleases.ParseImageUri(signer_output)
self.assertEqual(bad_image, None)
def testParseDLCImageUri(self):
image_uri = ('gs://chromeos-releases/foo-channel/board-name/1.2.3/dlc/'
'%s/%s/%s') % (self.dlc_id, self.dlc_package,
gspaths.ChromeosReleases.DLCImageName())
dlc_image = gspaths.ChromeosReleases.ParseDLCImageUri(image_uri)
expected_dlc_image = gspaths.DLCImage(
build=self.release_build, key=None, uri=image_uri,
dlc_id=self.dlc_id, dlc_package=self.dlc_package,
dlc_image=gspaths.ChromeosReleases.DLCImageName())
self.assertEqual(dlc_image, expected_dlc_image)
def testParseUnsignedImageUri(self):
attr_dict = dict(self.unsigned_image_archive_attrs)
attr_dict['uri'] = uri = (
self._GS_UNSIGNED_IMAGE_ARCHIVE_PATH_TEMPLATE % attr_dict)
expected = gspaths.UnsignedImageArchive(build=self.build,
milestone=self.milestone,
image_type=self.unsigned_image_type,
uri=uri)
expected_str = gspaths.ChromeosReleases.UnsignedImageArchiveName(
expected.build.board, expected.build.version, expected.milestone,
expected.image_type)
image = gspaths.ChromeosReleases.ParseUnsignedImageUri(uri)
self.assertEqual(image, expected)
self.assertEqual(str(image), expected_str)
def testPayloadNamePreset(self):
full = gspaths.ChromeosReleases.PayloadName(channel=self.channel,
board=self.board,
version=self.version,
key=self.key,
random_str=self.random_str)
delta = gspaths.ChromeosReleases.PayloadName(channel=self.channel,
board=self.board,
version=self.version,
key=self.key,
src_version=self.src_version,
random_str=self.random_str)
full_unsigned = gspaths.ChromeosReleases.PayloadName(
channel=self.channel,
board=self.board,
version=self.version,
random_str=self.random_str,
unsigned_image_type=self.unsigned_image_type)
delta_unsigned = gspaths.ChromeosReleases.PayloadName(
channel=self.channel,
board=self.board,
version=self.version,
src_version=self.src_version,
random_str=self.random_str,
unsigned_image_type=self.unsigned_image_type)
self.assertEqual(full, self._Populate(self._FULL_PAYLOAD_NAME_TEMPLATE))
self.assertEqual(delta, self._Populate(self._DELTA_PAYLOAD_NAME_TEMPLATE))
self.assertEqual(full_unsigned,
self._Populate(self._UNSIGNED_FULL_PAYLOAD_NAME_TEMPLATE))
self.assertEqual(delta_unsigned,
self._Populate(self._UNSIGNED_DELTA_PAYLOAD_NAME_TEMPLATE))
def testPayloadNameRandom(self):
full = gspaths.ChromeosReleases.PayloadName(channel=self.channel,
board=self.board,
version=self.version,
key=self.key)
delta = gspaths.ChromeosReleases.PayloadName(channel=self.channel,
board=self.board,
version=self.version,
key=self.key,
src_version=self.src_version)
# Isolate the actual random string, transplant it in the reference template.
full_random_str = full.split('-')[-1].partition('.')[0]
self.assertEqual(
full,
self._Populate(self._FULL_PAYLOAD_NAME_TEMPLATE,
random_str=full_random_str))
delta_random_str = delta.split('-')[-1].partition('.')[0]
self.assertEqual(
delta,
self._Populate(self._DELTA_PAYLOAD_NAME_TEMPLATE,
random_str=delta_random_str))
def testPayloadDLC(self):
full = gspaths.ChromeosReleases.DLCPayloadName(
channel=self.channel,
board=self.board,
version=self.version,
random_str=self.random_str,
dlc_id=self.dlc_id,
dlc_package=self.dlc_package)
self.assertEqual(full, self._Populate(self._FULL_DLC_PAYLOAD_NAME_TEMPLATE,
dlc_id=self.dlc_id,
dlc_package=self.dlc_package,
random_str=self.random_str))
def testPayloadUri(self):
test_random_channel = 'test_random_channel'
test_max_version = '4.5.6'
test_min_version = '0.12.1.0'
min_full = gspaths.ChromeosReleases.PayloadUri(
build=self.build, random_str=self.random_str, key=self.key)
self.assertEqual(
min_full,
self._Populate(self._GS_FULL_PAYLOAD_PATH_TEMPLATE))
max_full = gspaths.ChromeosReleases.PayloadUri(
build=self.build, random_str=self.random_str, key=self.key,
image_channel=test_random_channel, image_version=test_max_version)
self.assertEqual(
max_full,
self._Populate(self._GS_FULL_PAYLOAD_PATH_TEMPLATE,
image_channel=test_random_channel,
image_version=test_max_version))
min_delta = gspaths.ChromeosReleases.PayloadUri(
build=self.build, random_str=self.random_str, key=self.key,
src_version=test_min_version)
self.assertEqual(
min_delta,
self._Populate(self._GS_DELTA_PAYLOAD_PATH_TEMPLATE,
src_version=test_min_version))
max_delta = gspaths.ChromeosReleases.PayloadUri(
build=self.build, random_str=self.random_str, key=self.key,
image_channel=test_random_channel, image_version=test_max_version,
src_version=test_min_version)
self.assertEqual(
max_delta,
self._Populate(self._GS_DELTA_PAYLOAD_PATH_TEMPLATE,
src_version=test_min_version,
image_version=test_max_version,
image_channel=test_random_channel))
dlc_full = gspaths.ChromeosReleases.DLCPayloadUri(
build=self.build, random_str=self.random_str, dlc_id=self.dlc_id,
dlc_package=self.dlc_package, image_channel=test_random_channel,
image_version=test_max_version)
self.assertEqual(
dlc_full,
self._Populate(self._GS_FULL_DLC_PAYLOAD_PATH_TEMPLATE,
src_version=test_min_version,
image_version=test_max_version,
image_channel=test_random_channel,
dlc_id=self.dlc_id,
dlc_package=self.dlc_package))
def testParsePayloadUri(self):
"""Test gsutils.ChromeosReleases.ParsePayloadUri()."""
image_version = '1.2.4'
full_uri = self._Populate(self._GS_FULL_PAYLOAD_PATH_TEMPLATE)
delta_uri = self._Populate(self._GS_DELTA_PAYLOAD_PATH_TEMPLATE)
max_full_uri = self._Populate(self._GS_FULL_PAYLOAD_PATH_TEMPLATE,
image_channel='image-channel',
image_version=image_version)
max_delta_uri = self._Populate(self._GS_DELTA_PAYLOAD_PATH_TEMPLATE,
image_channel='image-channel',
image_version=image_version)
self.assertDictEqual(
gspaths.ChromeosReleases.ParsePayloadUri(full_uri),
{
'tgt_image': gspaths.Image(build=self.build, key=self.key),
'src_image': None,
'build': self.build,
'uri': full_uri,
'exists': False
})
self.assertDictEqual(
gspaths.ChromeosReleases.ParsePayloadUri(delta_uri),
{
'src_image': gspaths.Image(build=self.src_build),
'tgt_image': gspaths.Image(build=self.build, key=self.key),
'build': self.build,
'uri': delta_uri,
'exists': False
})
self.assertDictEqual(
gspaths.ChromeosReleases.ParsePayloadUri(max_full_uri),
{
'tgt_image': gspaths.Image(build=self.build,
key=self.key,
image_version=image_version,
image_channel='image-channel'),
'src_image': None,
'build': self.build,
'uri': max_full_uri,
'exists': False
})
self.assertDictEqual(
gspaths.ChromeosReleases.ParsePayloadUri(max_delta_uri),
{
'src_image': gspaths.Image(build=self.src_build),
'tgt_image': gspaths.Image(build=self.build,
key=self.key,
image_version=image_version,
image_channel='image-channel'),
'build': self.build,
'uri': max_delta_uri,
'exists': False
})
def testBuildValuesFromUri(self):
"""Tests BuildValuesFromUri"""
exp = (r'^gs://(?P<bucket>.*)/(?P<channel>.*)/(?P<board>.*)/'
r'(?P<version>.*)/chromeos_(?P<image_version>[^_]+)_'
r'(?P=board)_(?P<image_type>[^_]+)_(?P<image_channel>[^_]+)_'
'(?P<key>[^_]+).bin$')
uri = ('gs://chromeos-releases/dev-channel/link/4537.7.0/'
'chromeos_4537.7.1_link_recovery_nplusone-channel_mp-v4.bin')
values = gspaths.Build.BuildValuesFromUri(exp, uri)
self.assertEqual(values, {'build': gspaths.Build(bucket='chromeos-releases',
version='4537.7.0',
board='link',
channel='dev-channel'),
'image_version': '4537.7.1',
'image_type': 'recovery',
'image_channel': 'nplusone-channel',
'key': 'mp-v4'})
uri = 'foo-uri'
self.assertIsNone(gspaths.Build.BuildValuesFromUri(exp, uri))
class GsPathsTest(cros_test_lib.TestCase):
"""Test general gspaths utilities."""
def testVersionKey(self):
"""Test VersionKey, especially for new-style versus old-style."""
values = ['1.2.3', '1.2.2', '2.0.0', '1.1.4',
'1.2.3.4', '1.2.3.3', '1.2.4.4', '1.2.4.5', '1.3.3.4',
'0.1.2.3', '0.14.45.32']
sorted_values = sorted(values, key=gspaths.VersionKey)
reverse_sorted_values = sorted(reversed(values), key=gspaths.VersionKey)
expected_values = ['0.1.2.3', '0.14.45.32',
'1.2.3.3', '1.2.3.4', '1.2.4.4', '1.2.4.5', '1.3.3.4',
'1.1.4', '1.2.2', '1.2.3', '2.0.0']
self.assertEqual(sorted_values, expected_values)
self.assertEqual(reverse_sorted_values, expected_values)
def testVersionGreater(self):
"""Test VersionGreater, especially for new-style versus old-style."""
self.assertTrue(gspaths.VersionGreater('1.2.3', '1.2.2'))
self.assertTrue(gspaths.VersionGreater('1.2.3', '1.1.4'))
self.assertTrue(gspaths.VersionGreater('2.0.0', '1.2.3'))
self.assertFalse(gspaths.VersionGreater('1.2.3', '1.2.3'))
self.assertFalse(gspaths.VersionGreater('1.2.2', '1.2.3'))
self.assertFalse(gspaths.VersionGreater('1.1.4', '1.2.3'))
self.assertFalse(gspaths.VersionGreater('1.2.3', '2.0.0'))
self.assertTrue(gspaths.VersionGreater('1.2.3.4', '1.2.3.3'))
self.assertTrue(gspaths.VersionGreater('1.2.4.4', '1.2.3.4'))
self.assertTrue(gspaths.VersionGreater('1.3.3.4', '1.2.4.5'))
self.assertTrue(gspaths.VersionGreater('2.0.0.0', '1.2.3.4'))
self.assertFalse(gspaths.VersionGreater('1.2.3.4', '1.2.3.4'))
self.assertFalse(gspaths.VersionGreater('1.2.3.3', '1.2.3.4'))
self.assertFalse(gspaths.VersionGreater('1.2.3.4', '1.2.4.4'))
self.assertFalse(gspaths.VersionGreater('1.2.4.5', '1.3.3.4'))
self.assertFalse(gspaths.VersionGreater('1.2.3.4', '2.0.0.0'))
self.assertTrue(gspaths.VersionGreater('1.2.3', '1.2.3.4'))
self.assertTrue(gspaths.VersionGreater('1.2.3', '0.1.2.3'))
self.assertFalse(gspaths.VersionGreater('1.2.3.4', '1.2.3'))
self.assertFalse(gspaths.VersionGreater('0.1.2.3', '1.2.3'))
def testIsImage(self):
a = float(3.14)
self.assertFalse(gspaths.IsImage(a))
b = gspaths.Image()
self.assertTrue(gspaths.IsImage(b))
def testIsUnsignedImageArchive(self):
a = float(3.14)
self.assertFalse(gspaths.IsUnsignedImageArchive(a))
b = gspaths.UnsignedImageArchive()
self.assertTrue(gspaths.IsUnsignedImageArchive(b))
class ImageTest(cros_test_lib.TestCase):
"""Test Image class implementation."""
def setUp(self):
self.build = gspaths.Build(bucket='crt', channel='foo-channel',
board='board-name', version='1.2.3')
def testImage_DefaultImageType(self):
default_image = gspaths.Image(build=self.build)
self.assertEqual('recovery', default_image.image_type)
def testImage_CustomImageType(self):
custom_image_type = 'base'
custom_image = gspaths.Image(build=self.build, image_type=custom_image_type)
self.assertEqual(custom_image_type, custom_image.image_type)
|
Debossed with the silhouette of President Teddy Roosevelt and the reminder to “Keep Exploring,” the moleskin-type Rough Rider Journal features a pocket-envelope on the back cover, lined pages, elastic enclosure and ribbon bookmark.
SKU: JOUR-RR. Category: Americana. Tag: accessories.
|
"""Provides scheduling routines for stackless tasklets.
The scheduler itself runs as a tasklet. It blocks waiting
for input on the channel passed in. When new data is sent
on this channel, the scheduler wakes and begins processing
of the data.
"""
import stackless
from pype import Pype
from graph import get_pairlist, topsort
import sys
import traceback
def sched(ch, graph):
"""Sits in an infinite loop waiting on the channel to recieve data.
The procedure prolog takes care of sorting the
input graph into a dependency list and initializing
the filter tasklets used to construct the graph.
@param graph: The graph representing the work flow
@type graph: Python dict organized as a graph struct
@param ch: The stackless channel to listen on
@type ch: stackless.channel
@return: nothing
"""
edgeList = get_pairlist(graph)
nodes = topsort(edgeList)
tasks = []
inputEdge = Pype()
for n in nodes:
# start this microthread
tasks.append(stackless.tasklet(n.run)())
try:
# get this nodes outputs
edges = graph[n]
except:
pass
else:
# for each output
for e in edges:
e1 = Pype()
# does this port exist
if not n.has_port(edges[e][0]):
print 'Trying to connect undefined output port', n, edges[e][0]
sys.exit(1)
n.connect_output(edges[e][0], e1)
# does this port exist
if not e.has_port(edges[e][1]):
print 'Trying to connect undefined input port', e, edges[e][1]
sys.exit(1)
e.connect_input(edges[e][1], e1)
# Added so that incoming data is fed to every input adapter
# should check if in exists and create it if it doesn't
# because a user could remove the input port by accident
inputEdges = []
for n in nodes:
if n.get_type() == 'ADAPTER':
ie = Pype()
n.connect_input('in', ie)
inputEdges.append(ie)
#nodes[0].connect_input('in', inputEdge)
while True:
data = ch.receive()
for ie in inputEdges:
ie.send(data)
#inputEdge.send(data)
try:
tasks[0].run()
except:
traceback.print_exc()
|
Welcome to episode #663 of Six Pixels of Separation.
Here it is: Six Pixels of Separation – Episode #663 – Host: Mitch Joel. One of the most prolific and fascinating business professionals in the area of better brand storytelling and marketing is Bernadette Jiwa. She is a veritable force of nature who creates frequent and compelling prose that are sprinkled with optimism and wisdom. Last September she was on the show to discuss her book, Story Driven – You Don’t Need To Compete When You Know Who You Are, and she’s back with a brand new book called, The Right Story – A Brief Guide to Changing The World. I consider her to be a true business philosopher, because Bernadette delivers the goods. When she not writing books and publishing articles, Bernadette helps companies of all sizes to build a brand to be proud of. Bernadette has a whole bunch of business books that you must read: Hunch, Meaningful, Marketing: A Love Story, Fortune Cookie Principle, Difference and Make Your Idea Matter. It is a thrill to have her back on the show to discuss her latest, The Right Story. Enjoy the conversation….
Here is my conversation with Bernadette Jiwa.
The Right Story – A Brief Guide to Changing The World.
Story Driven – You Don’t Need To Compete When You Know Who You Are.
Download the Podcast here: Six Pixels of Separation – Episode #663 – Host: Mitch Joel.
It’s hard for me to argue as I don’t have much understanding of what Byron Sharp or what Binet & Field have said. With that, I do not think it’s a one-size fits or that’s it’s binary. It’s a choice. People love stories. Why not build your brand around a story?
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.