repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
eesatfan/openpli-enigma2 | lib/python/Tools/StbHardware.py | 52 | 1840 | from fcntl import ioctl
from struct import pack, unpack
def getFPVersion():
ret = None
try:
ret = long(open("/proc/stb/fp/version", "r").read())
except IOError:
try:
fp = open("/dev/dbox/fp0")
ret = ioctl(fp.fileno(),0)
except IOError:
print "getFPVersion failed!"
return ret
def setFPWakeuptime(wutime):
try:
open("/proc/stb/fp/wakeup_time", "w").write(str(wutime))
except IOError:
try:
fp = open("/dev/dbox/fp0")
ioctl(fp.fileno(), 6, pack('L', wutime)) # set wake up
except IOError:
print "setFPWakeupTime failed!"
def setRTCtime(wutime):
try:
open("/proc/stb/fp/rtc", "w").write(str(wutime))
except IOError:
try:
fp = open("/dev/dbox/fp0")
ioctl(fp.fileno(), 0x101, pack('L', wutime)) # set wake up
except IOError:
print "setRTCtime failed!"
def getFPWakeuptime():
ret = 0
try:
ret = long(open("/proc/stb/fp/wakeup_time", "r").read())
except IOError:
try:
fp = open("/dev/dbox/fp0")
ret = unpack('L', ioctl(fp.fileno(), 5, ' '))[0] # get wakeuptime
except IOError:
print "getFPWakeupTime failed!"
return ret
wasTimerWakeup = None
def getFPWasTimerWakeup():
global wasTimerWakeup
if wasTimerWakeup is not None:
return wasTimerWakeup
wasTimerWakeup = False
try:
wasTimerWakeup = int(open("/proc/stb/fp/was_timer_wakeup", "r").read()) and True or False
except:
try:
fp = open("/dev/dbox/fp0")
wasTimerWakeup = unpack('B', ioctl(fp.fileno(), 9, ' '))[0] and True or False
except IOError:
print "wasTimerWakeup failed!"
if wasTimerWakeup:
# clear hardware status
clearFPWasTimerWakeup()
return wasTimerWakeup
def clearFPWasTimerWakeup():
try:
open("/proc/stb/fp/was_timer_wakeup", "w").write('0')
except:
try:
fp = open("/dev/dbox/fp0")
ioctl(fp.fileno(), 10)
except IOError:
print "clearFPWasTimerWakeup failed!"
| gpl-2.0 |
grap/OpenUpgrade | addons/website_certification/__openerp__.py | 320 | 1562 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Certified People',
'category': 'Website',
'website': 'https://www.odoo.com/page/website-builder',
'summary': 'Display your network of certified people on your website',
'version': '1.0',
'author': 'OpenERP S.A.',
'depends': ['marketing', 'website'],
'description': """
Display your network of certified people on your website
""",
'data': [
'security/ir.model.access.csv',
'views/website_certification_views.xml',
'views/website_certification_templates.xml',
],
'installable': True,
}
| agpl-3.0 |
PeterDaveHello/eden | modules/s3/s3gis.py | 3 | 392902 | # -*- coding: utf-8 -*-
""" GIS Module
@requires: U{B{I{gluon}} <http://web2py.com>}
@requires: U{B{I{shapely}} <http://trac.gispython.org/lab/wiki/Shapely>}
@copyright: (c) 2010-2015 Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("GIS",
"S3Map",
"S3ExportPOI",
"S3ImportPOI",
)
import datetime # Needed for Feed Refresh checks & web2py version check
import os
import re
import sys
#import logging
import urllib # Needed for urlencoding
import urllib2 # Needed for quoting & error handling on fetch
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
try:
from lxml import etree # Needed to follow NetworkLinks
except ImportError:
print >> sys.stderr, "ERROR: lxml module needed for XML handling"
raise
KML_NAMESPACE = "http://earth.google.com/kml/2.2"
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import *
# Here are dependencies listed for reference:
#from gluon import current
#from gluon.html import *
#from gluon.http import HTTP, redirect
from gluon.fileutils import parse_version
from gluon.languages import lazyT, regex_translate
from gluon.storage import Storage
from s3dal import Rows
from s3datetime import s3_format_datetime, s3_parse_datetime
from s3fields import s3_all_meta_field_names
from s3rest import S3Method
from s3track import S3Trackable
from s3utils import s3_include_ext, s3_unicode
DEBUG = False
if DEBUG:
print >> sys.stderr, "S3GIS: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
# Map WKT types to db types
GEOM_TYPES = {"point": 1,
"linestring": 2,
"polygon": 3,
"multipoint": 4,
"multilinestring": 5,
"multipolygon": 6,
"geometrycollection": 7,
}
# km
RADIUS_EARTH = 6371.01
# Compact JSON encoding
SEPARATORS = (",", ":")
# Map Defaults
# Also in static/S3/s3.gis.js
# http://dev.openlayers.org/docs/files/OpenLayers/Strategy/Cluster-js.html
CLUSTER_ATTRIBUTE = "colour"
CLUSTER_DISTANCE = 20 # pixels
CLUSTER_THRESHOLD = 2 # minimum # of features to form a cluster
# Garmin GPS Symbols
GPS_SYMBOLS = ("Airport",
"Amusement Park"
"Ball Park",
"Bank",
"Bar",
"Beach",
"Bell",
"Boat Ramp",
"Bowling",
"Bridge",
"Building",
"Campground",
"Car",
"Car Rental",
"Car Repair",
"Cemetery",
"Church",
"Circle with X",
"City (Capitol)",
"City (Large)",
"City (Medium)",
"City (Small)",
"Civil",
"Contact, Dreadlocks",
"Controlled Area",
"Convenience Store",
"Crossing",
"Dam",
"Danger Area",
"Department Store",
"Diver Down Flag 1",
"Diver Down Flag 2",
"Drinking Water",
"Exit",
"Fast Food",
"Fishing Area",
"Fitness Center",
"Flag",
"Forest",
"Gas Station",
"Geocache",
"Geocache Found",
"Ghost Town",
"Glider Area",
"Golf Course",
"Green Diamond",
"Green Square",
"Heliport",
"Horn",
"Hunting Area",
"Information",
"Levee",
"Light",
"Live Theater",
"Lodging",
"Man Overboard",
"Marina",
"Medical Facility",
"Mile Marker",
"Military",
"Mine",
"Movie Theater",
"Museum",
"Navaid, Amber",
"Navaid, Black",
"Navaid, Blue",
"Navaid, Green",
"Navaid, Green/Red",
"Navaid, Green/White",
"Navaid, Orange",
"Navaid, Red",
"Navaid, Red/Green",
"Navaid, Red/White",
"Navaid, Violet",
"Navaid, White",
"Navaid, White/Green",
"Navaid, White/Red",
"Oil Field",
"Parachute Area",
"Park",
"Parking Area",
"Pharmacy",
"Picnic Area",
"Pizza",
"Post Office",
"Private Field",
"Radio Beacon",
"Red Diamond",
"Red Square",
"Residence",
"Restaurant",
"Restricted Area",
"Restroom",
"RV Park",
"Scales",
"Scenic Area",
"School",
"Seaplane Base",
"Shipwreck",
"Shopping Center",
"Short Tower",
"Shower",
"Skiing Area",
"Skull and Crossbones",
"Soft Field",
"Stadium",
"Summit",
"Swimming Area",
"Tall Tower",
"Telephone",
"Toll Booth",
"TracBack Point",
"Trail Head",
"Truck Stop",
"Tunnel",
"Ultralight Area",
"Water Hydrant",
"Waypoint",
"White Buoy",
"White Dot",
"Zoo"
)
# -----------------------------------------------------------------------------
class GIS(object):
"""
GeoSpatial functions
"""
# Used to disable location tree updates during prepopulate.
# It is not appropriate to use auth.override for this, as there are times
# (e.g. during tests) when auth.override is turned on, but location tree
# updates should still be enabled.
disable_update_location_tree = False
def __init__(self):
messages = current.messages
#messages.centroid_error = str(A("Shapely", _href="http://pypi.python.org/pypi/Shapely/", _target="_blank")) + " library not found, so can't find centroid!"
messages.centroid_error = "Shapely library not functional, so can't find centroid! Install Geos & Shapely for Line/Polygon support"
messages.unknown_type = "Unknown Type!"
messages.invalid_wkt_point = "Invalid WKT: must be like POINT(3 4)"
messages.invalid_wkt = "Invalid WKT: see http://en.wikipedia.org/wiki/Well-known_text"
messages.lon_empty = "Invalid: Longitude can't be empty if Latitude specified!"
messages.lat_empty = "Invalid: Latitude can't be empty if Longitude specified!"
messages.unknown_parent = "Invalid: %(parent_id)s is not a known Location"
self.DEFAULT_SYMBOL = "White Dot"
self.hierarchy_level_keys = ("L0", "L1", "L2", "L3", "L4", "L5")
self.hierarchy_levels = {}
self.max_allowed_level_num = 4
self.relevant_hierarchy_levels = None
# -------------------------------------------------------------------------
@staticmethod
def gps_symbols():
return GPS_SYMBOLS
# -------------------------------------------------------------------------
def download_kml(self, record_id, filename, session_id_name, session_id):
"""
Download a KML file:
- unzip it if-required
- follow NetworkLinks recursively if-required
Save the file to the /uploads folder
Designed to be called asynchronously using:
current.s3task.async("download_kml", [record_id, filename])
@param record_id: id of the record in db.gis_layer_kml
@param filename: name to save the file as
@param session_id_name: name of the session
@param session_id: id of the session
@ToDo: Pass error messages to Result & have JavaScript listen for these
"""
request = current.request
table = current.s3db.gis_layer_kml
record = current.db(table.id == record_id).select(table.url,
limitby=(0, 1)
).first()
url = record.url
filepath = os.path.join(request.global_settings.applications_parent,
request.folder,
"uploads",
"gis_cache",
filename)
warning = self.fetch_kml(url, filepath, session_id_name, session_id)
# @ToDo: Handle errors
#query = (cachetable.name == name)
if "URLError" in warning or "HTTPError" in warning:
# URL inaccessible
if os.access(filepath, os.R_OK):
statinfo = os.stat(filepath)
if statinfo.st_size:
# Use cached version
#date = db(query).select(cachetable.modified_on,
# limitby=(0, 1)).first().modified_on
#response.warning += "%s %s %s\n" % (url,
# T("not accessible - using cached version from"),
# str(date))
#url = URL(c="default", f="download",
# args=[filename])
pass
else:
# 0k file is all that is available
#response.warning += "%s %s\n" % (url,
# T("not accessible - no cached version available!"))
# skip layer
return
else:
# No cached version available
#response.warning += "%s %s\n" % (url,
# T("not accessible - no cached version available!"))
# skip layer
return
else:
# Download was succesful
#db(query).update(modified_on=request.utcnow)
if "ParseError" in warning:
# @ToDo Parse detail
#response.warning += "%s: %s %s\n" % (T("Layer"),
# name,
# T("couldn't be parsed so NetworkLinks not followed."))
pass
if "GroundOverlay" in warning or "ScreenOverlay" in warning:
#response.warning += "%s: %s %s\n" % (T("Layer"),
# name,
# T("includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly."))
# Code to support GroundOverlay:
# https://github.com/openlayers/openlayers/pull/759
pass
# -------------------------------------------------------------------------
def fetch_kml(self, url, filepath, session_id_name, session_id):
"""
Fetch a KML file:
- unzip it if-required
- follow NetworkLinks recursively if-required
Returns a file object
Designed as a helper function for download_kml()
"""
from gluon.tools import fetch
response = current.response
public_url = current.deployment_settings.get_base_public_url()
warning = ""
local = False
if not url.startswith("http"):
local = True
url = "%s%s" % (public_url, url)
elif len(url) > len(public_url) and url[:len(public_url)] == public_url:
local = True
if local:
# Keep Session for local URLs
import Cookie
cookie = Cookie.SimpleCookie()
cookie[session_id_name] = session_id
# For sync connections
current.session._unlock(response)
try:
file = fetch(url, cookie=cookie)
except urllib2.URLError:
warning = "URLError"
return warning
except urllib2.HTTPError:
warning = "HTTPError"
return warning
else:
try:
file = fetch(url)
except urllib2.URLError:
warning = "URLError"
return warning
except urllib2.HTTPError:
warning = "HTTPError"
return warning
filenames = []
if file[:2] == "PK":
# Unzip
fp = StringIO(file)
import zipfile
myfile = zipfile.ZipFile(fp)
files = myfile.infolist()
main = None
candidates = []
for _file in files:
filename = _file.filename
if filename == "doc.kml":
main = filename
elif filename[-4:] == ".kml":
candidates.append(filename)
if not main:
if candidates:
# Any better way than this to guess which KML file is the main one?
main = candidates[0]
else:
response.error = "KMZ contains no KML Files!"
return ""
# Write files to cache (other than the main one)
request = current.request
path = os.path.join(request.folder, "static", "cache", "kml")
if not os.path.exists(path):
os.makedirs(path)
for _file in files:
filename = _file.filename
if filename != main:
if "/" in filename:
_filename = filename.split("/")
dir = os.path.join(path, _filename[0])
if not os.path.exists(dir):
os.mkdir(dir)
_filepath = os.path.join(path, *_filename)
else:
_filepath = os.path.join(path, filename)
try:
f = open(_filepath, "wb")
except:
# Trying to write the Folder
pass
else:
filenames.append(filename)
__file = myfile.read(filename)
f.write(__file)
f.close()
# Now read the main one (to parse)
file = myfile.read(main)
myfile.close()
# Check for NetworkLink
if "<NetworkLink>" in file:
try:
# Remove extraneous whitespace
parser = etree.XMLParser(recover=True, remove_blank_text=True)
tree = etree.XML(file, parser)
# Find contents of href tag (must be a better way?)
url = ""
for element in tree.iter():
if element.tag == "{%s}href" % KML_NAMESPACE:
url = element.text
if url:
# Follow NetworkLink (synchronously)
warning2 = self.fetch_kml(url, filepath)
warning += warning2
except (etree.XMLSyntaxError,):
e = sys.exc_info()[1]
warning += "<ParseError>%s %s</ParseError>" % (e.line, e.errormsg)
# Check for Overlays
if "<GroundOverlay>" in file:
warning += "GroundOverlay"
if "<ScreenOverlay>" in file:
warning += "ScreenOverlay"
for filename in filenames:
replace = "%s/%s" % (URL(c="static", f="cache", args=["kml"]),
filename)
# Rewrite all references to point to the correct place
# need to catch <Icon><href> (which could be done via lxml)
# & also <description><![CDATA[<img src=" (which can't)
file = file.replace(filename, replace)
# Write main file to cache
f = open(filepath, "w")
f.write(file)
f.close()
return warning
# -------------------------------------------------------------------------
@staticmethod
def geocode(address, postcode=None, Lx_ids=None, geocoder="google"):
"""
Geocode an Address
- used by S3LocationSelector
settings.get_gis_geocode_imported_addresses
@param address: street address
@param postcode: postcode
@param Lx_ids: list of ancestor IDs
@param geocoder: which geocoder service to use
"""
from geopy import geocoders
if geocoder == "google":
g = geocoders.GoogleV3()
elif geocoder == "yahoo":
apikey = current.deployment_settings.get_gis_api_yahoo()
g = geocoders.Yahoo(apikey)
else:
# @ToDo
raise NotImplementedError
location = address
if postcode:
location = "%s,%s" % (location, postcode)
Lx = L5 = L4 = L3 = L2 = L1 = L0 = None
if Lx_ids:
# Convert Lx IDs to Names
table = current.s3db.gis_location
limit = len(Lx_ids)
if limit > 1:
query = (table.id.belongs(Lx_ids))
else:
query = (table.id == Lx_ids[0])
db = current.db
Lx = db(query).select(table.id,
table.name,
table.level,
table.gis_feature_type,
# Better as separate query
#table.lon_min,
#table.lat_min,
#table.lon_max,
#table.lat_max,
# Better as separate query
#table.wkt,
limitby=(0, limit),
orderby=~table.level
)
if Lx:
Lx_names = ",".join([l.name for l in Lx])
location = "%s,%s" % (location, Lx_names)
for l in Lx:
if l.level == "L0":
L0 = l.id
continue
elif l.level == "L1":
L1 = l.id
continue
elif l.level == "L2":
L2 = l.id
continue
elif l.level == "L3":
L3 = l.id
continue
elif l.level == "L4":
L4 = l.id
continue
elif l.level == "L5":
L5 = l.id
Lx = Lx.as_dict()
try:
results = g.geocode(location, exactly_one=False)
if len(results) == 1:
place, (lat, lon) = results[0]
if Lx:
output = None
# Check Results are for a specific address & not just that for the City
results = g.geocode(Lx_names, exactly_one=False)
if not results:
output = "Can't check that these results are specific enough"
for result in results:
place2, (lat2, lon2) = result
if place == place2:
output = "We can only geocode to the Lx"
break
if not output:
# Check Results are within relevant bounds
L0_row = None
wkt = None
if L5 and Lx[L5]["gis_feature_type"] != 1:
wkt = db(table.id == L5).select(table.wkt,
limitby=(0, 1)
).first().wkt
used_Lx = L5
elif L4 and Lx[L4]["gis_feature_type"] != 1:
wkt = db(table.id == L4).select(table.wkt,
limitby=(0, 1)
).first().wkt
used_Lx = L4
elif L3 and Lx[L3]["gis_feature_type"] != 1:
wkt = db(table.id == L3).select(table.wkt,
limitby=(0, 1)
).first().wkt
used_Lx = L3
elif L2 and Lx[L2]["gis_feature_type"] != 1:
wkt = db(table.id == L2).select(table.wkt,
limitby=(0, 1)
).first().wkt
used_Lx = L2
elif L1 and Lx[L1]["gis_feature_type"] != 1:
wkt = db(table.id == L1).select(table.wkt,
limitby=(0, 1)
).first().wkt
used_Lx = L1
elif L0:
L0_row = db(table.id == L0).select(table.wkt,
table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max,
limitby=(0, 1)
).first()
if not L0_row.wkt.startswith("POI"): # Point
wkt = L0_row.wkt
used_Lx = L0
if wkt:
from shapely.geometry import point
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
current.log.info("S3GIS",
"Upgrade Shapely for Performance enhancements")
test = point.Point(lon, lat)
shape = wkt_loads(wkt)
ok = test.intersects(shape)
if not ok:
output = "Returned value not within %s" % Lx[used_Lx]["name"]
elif L0:
# Check within country at least
if not L0_row:
L0_row = db(table.id == L0).select(table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max,
limitby=(0, 1)
).first()
if lat < L0_row["lat_max"] and \
lat > L0_row["lat_min"] and \
lon < L0_row["lon_max"] and \
lon > L0_row["lon_min"]:
ok = True
else:
ok = False
output = "Returned value not within %s" % Lx["name"]
else:
# We'll just have to trust it!
ok = True
if ok:
output = dict(lat=lat, lon=lon)
else:
# We'll just have to trust it!
output = dict(lat=lat, lon=lon)
elif len(results):
output = "Multiple results found"
# @ToDo: Iterate through the results to see if just 1 is within the right bounds
else:
output = "No results found"
except:
error = sys.exc_info()[1]
output = str(error)
return output
# -------------------------------------------------------------------------
@staticmethod
def geocode_r(lat, lon):
"""
Geocode an Address
- used by S3LocationSelector
settings.get_gis_geocode_imported_addresses
@param address: street address
@param postcode: postcode
@param Lx_ids: list of ancestor IDs
@param geocoder: which geocoder service to use
"""
if not lat or not lon:
return "Need Lat & Lon"
results = ""
# Check vaguely valid
try:
lat = float(lat)
except ValueError:
results = "Latitude is Invalid!"
try:
lon = float(lon)
except ValueError:
results += "Longitude is Invalid!"
if not results:
if lon > 180 or lon < -180:
results = "Longitude must be between -180 & 180!"
elif lat > 90 or lat < -90:
results = "Latitude must be between -90 & 90!"
else:
table = current.s3db.gis_location
query = (table.level != None) & \
(table.deleted != True)
if current.deployment_settings.get_gis_spatialdb():
point = "POINT(%s %s)" % (lon, lat)
query &= (table.the_geom.st_intersects(point))
rows = current.db(query).select(table.id,
table.level,
)
results = {}
for row in rows:
results[row.level] = row.id
else:
# Oh dear, this is going to be slow :/
# Filter to the BBOX initially
query &= (table.lat_min < lat) & \
(table.lat_max > lat) & \
(table.lon_min < lon) & \
(table.lon_max > lon)
rows = current.db(query).select(table.id,
table.level,
table.wkt,
)
from shapely.geometry import point
from shapely.wkt import loads as wkt_loads
test = point.Point(lon, lat)
results = {}
for row in rows:
shape = wkt_loads(row.wkt)
ok = test.intersects(shape)
if ok:
#print "Level: %s, id: %s" % (row.level, row.id)
results[row.level] = row.id
return results
# -------------------------------------------------------------------------
@staticmethod
def get_bearing(lat_start, lon_start, lat_end, lon_end):
"""
Given a Start & End set of Coordinates, return a Bearing
Formula from: http://www.movable-type.co.uk/scripts/latlong.html
"""
import math
# shortcuts
cos = math.cos
sin = math.sin
delta_lon = lon_start - lon_end
bearing = math.atan2(sin(delta_lon) * cos(lat_end),
(cos(lat_start) * sin(lat_end)) - \
(sin(lat_start) * cos(lat_end) * cos(delta_lon))
)
# Convert to a compass bearing
bearing = (bearing + 360) % 360
return bearing
# -------------------------------------------------------------------------
def get_bounds(self, features=None, parent=None,
bbox_min_size = 0.05, bbox_inset = 0.007):
"""
Calculate the Bounds of a list of Point Features, suitable for
setting map bounds. If no features are supplied, the current map
configuration bounds will be returned.
e.g. When a map is displayed that focuses on a collection of points,
the map is zoomed to show just the region bounding the points.
e.g. To use in GPX export for correct zooming
`
Ensure a minimum size of bounding box, and that the points
are inset from the border.
@param features: A list of point features
@param bbox_min_size: Minimum bounding box - gives a minimum width
and height in degrees for the region shown.
Without this, a map showing a single point would not show any
extent around that point.
@param bbox_inset: Bounding box insets - adds a small amount of
distance outside the points.
Without this, the outermost points would be on the bounding
box, and might not be visible.
@return: An appropriate map bounding box, as a dict:
dict(lon_min=lon_min, lat_min=lat_min,
lon_max=lon_max, lat_max=lat_max)
@ToDo: Support Polygons (separate function?)
"""
if features:
lon_min = 180
lat_min = 90
lon_max = -180
lat_max = -90
# Is this a simple feature set or the result of a join?
try:
lon = features[0].lon
simple = True
except (AttributeError, KeyError):
simple = False
# @ToDo: Optimised Geospatial routines rather than this crude hack
for feature in features:
try:
if simple:
lon = feature.lon
lat = feature.lat
else:
# A Join
lon = feature.gis_location.lon
lat = feature.gis_location.lat
except AttributeError:
# Skip any rows without the necessary lat/lon fields
continue
# Also skip those set to None. Note must use explicit test,
# as zero is a legal value.
if lon is None or lat is None:
continue
lon_min = min(lon, lon_min)
lat_min = min(lat, lat_min)
lon_max = max(lon, lon_max)
lat_max = max(lat, lat_max)
# Assure a reasonable-sized box.
delta_lon = (bbox_min_size - (lon_max - lon_min)) / 2.0
if delta_lon > 0:
lon_min -= delta_lon
lon_max += delta_lon
delta_lat = (bbox_min_size - (lat_max - lat_min)) / 2.0
if delta_lat > 0:
lat_min -= delta_lat
lat_max += delta_lat
# Move bounds outward by specified inset.
lon_min -= bbox_inset
lon_max += bbox_inset
lat_min -= bbox_inset
lat_max += bbox_inset
else:
# no features
config = GIS.get_config()
if config.lat_min is not None:
lat_min = config.lat_min
else:
lat_min = -90
if config.lon_min is not None:
lon_min = config.lon_min
else:
lon_min = -180
if config.lat_max is not None:
lat_max = config.lat_max
else:
lat_max = 90
if config.lon_max is not None:
lon_max = config.lon_max
else:
lon_max = 180
return dict(lon_min=lon_min, lat_min=lat_min,
lon_max=lon_max, lat_max=lat_max)
# -------------------------------------------------------------------------
def get_parent_bounds(self, parent=None):
"""
Get bounds from the specified (parent) location and its ancestors.
This is used to validate lat, lon, and bounds for child locations.
Caution: This calls update_location_tree if the parent bounds are
not set. During prepopulate, update_location_tree is disabled,
so unless the parent contains its own bounds (i.e. they do not need
to be propagated down from its ancestors), this will not provide a
check on location nesting. Prepopulate data should be prepared to
be correct. A set of candidate prepopulate data can be tested by
importing after prepopulate is run.
@param parent: A location_id to provide bounds suitable
for validating child locations
@return: bounding box and parent location name, as a list:
[lat_min, lon_min, lat_max, lon_max, parent_name]
@ToDo: Support Polygons (separate function?)
"""
table = current.s3db.gis_location
db = current.db
parent = db(table.id == parent).select(table.id,
table.level,
table.name,
table.parent,
table.path,
table.lon,
table.lat,
table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max).first()
if parent.lon_min is None or \
parent.lon_max is None or \
parent.lat_min is None or \
parent.lat_max is None or \
parent.lon_min == parent.lon_max or \
parent.lat_min == parent.lat_max:
# This is unsuitable - try higher parent
if parent.level == "L1":
if parent.parent:
# We can trust that L0 should have the data from prepop
L0 = db(table.id == parent.parent).select(table.name,
table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max).first()
return L0.lat_min, L0.lon_min, L0.lat_max, L0.lon_max, L0.name
if parent.path:
path = parent.path
else:
# This will return None during prepopulate.
path = GIS.update_location_tree(dict(id=parent.id,
level=parent.level))
if path:
path_list = map(int, path.split("/"))
rows = db(table.id.belongs(path_list)).select(table.level,
table.name,
table.lat,
table.lon,
table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max,
orderby=table.level)
row_list = rows.as_list()
row_list.reverse()
ok = False
for row in row_list:
if row["lon_min"] is not None and row["lon_max"] is not None and \
row["lat_min"] is not None and row["lat_max"] is not None and \
row["lon"] != row["lon_min"] != row["lon_max"] and \
row["lat"] != row["lat_min"] != row["lat_max"]:
ok = True
break
if ok:
# This level is suitable
return row["lat_min"], row["lon_min"], row["lat_max"], row["lon_max"], row["name"]
else:
# This level is suitable
return parent.lat_min, parent.lon_min, parent.lat_max, parent.lon_max, parent.name
# No ancestor bounds available -- use the active gis_config.
config = GIS.get_config()
if config:
return config.lat_min, config.lon_min, config.lat_max, config.lon_max, None
# Last resort -- fall back to no restriction.
return -90, -180, 90, 180, None
# -------------------------------------------------------------------------
@staticmethod
def _lookup_parent_path(feature_id):
"""
Helper that gets parent and path for a location.
"""
db = current.db
table = db.gis_location
feature = db(table.id == feature_id).select(table.id,
table.name,
table.level,
table.path,
table.parent,
limitby=(0, 1)).first()
return feature
# -------------------------------------------------------------------------
@staticmethod
def get_children(id, level=None):
"""
Return a list of IDs of all GIS Features which are children of
the requested feature, using Materialized path for retrieving
the children
This has been chosen over Modified Preorder Tree Traversal for
greater efficiency:
http://eden.sahanafoundation.org/wiki/HaitiGISToDo#HierarchicalTrees
@param: level - optionally filter by level
@return: Rows object containing IDs & Names
Note: This does NOT include the parent location itself
"""
db = current.db
try:
table = db.gis_location
except:
# Being run from CLI for debugging
table = current.s3db.gis_location
query = (table.deleted == False)
if level:
query &= (table.level == level)
term = str(id)
path = table.path
query &= ((path.like(term + "/%")) | \
(path.like("%/" + term + "/%")))
children = db(query).select(table.id,
table.name)
return children
# -------------------------------------------------------------------------
@staticmethod
def get_parents(feature_id, feature=None, ids_only=False):
"""
Returns a list containing ancestors of the requested feature.
If the caller already has the location row, including path and
parent fields, they can supply it via feature to avoid a db lookup.
If ids_only is false, each element in the list is a gluon.sql.Row
containing the gis_location record of an ancestor of the specified
location.
If ids_only is true, just returns a list of ids of the parents.
This avoids a db lookup for the parents if the specified feature
has a path.
List elements are in the opposite order as the location path and
exclude the specified location itself, i.e. element 0 is the parent
and the last element is the most distant ancestor.
Assists lazy update of a database without location paths by calling
update_location_tree to get the path.
Note that during prepopulate, update_location_tree is disabled,
in which case this will only return the immediate parent.
"""
if not feature or "path" not in feature or "parent" not in feature:
feature = GIS._lookup_parent_path(feature_id)
if feature and (feature.path or feature.parent):
if feature.path:
path = feature.path
else:
path = GIS.update_location_tree(feature)
if path:
path_list = map(int, path.split("/"))
if len(path_list) == 1:
# No parents - path contains only this feature.
return None
# Get only ancestors
path_list = path_list[:-1]
# Get path in the desired -- reversed -- order.
path_list.reverse()
elif feature.parent:
path_list = [feature.parent]
else:
return None
# If only ids are wanted, stop here.
if ids_only:
return path_list
# Retrieve parents - order in which they're returned is arbitrary.
s3db = current.s3db
table = s3db.gis_location
query = (table.id.belongs(path_list))
fields = [table.id, table.name, table.level, table.lat, table.lon]
unordered_parents = current.db(query).select(cache=s3db.cache,
*fields)
# Reorder parents in order of reversed path.
unordered_ids = [row.id for row in unordered_parents]
parents = [unordered_parents[unordered_ids.index(path_id)]
for path_id in path_list if path_id in unordered_ids]
return parents
else:
return None
# -------------------------------------------------------------------------
def get_parent_per_level(self, results, feature_id,
feature=None,
ids=True,
names=True):
"""
Adds ancestor of requested feature for each level to supplied dict.
If the caller already has the location row, including path and
parent fields, they can supply it via feature to avoid a db lookup.
If a dict is not supplied in results, one is created. The results
dict is returned in either case.
If ids=True and names=False (used by old S3LocationSelectorWidget):
For each ancestor, an entry is added to results, like
ancestor.level : ancestor.id
If ids=False and names=True (used by address_onvalidation):
For each ancestor, an entry is added to results, like
ancestor.level : ancestor.name
If ids=True and names=True (used by new S3LocationSelectorWidget):
For each ancestor, an entry is added to results, like
ancestor.level : {name : ancestor.name, id: ancestor.id}
"""
if not results:
results = {}
_id = feature_id
# if we don't have a feature or a feature ID return the dict as-is
if not feature_id and not feature:
return results
if not feature_id and "path" not in feature and "parent" in feature:
# gis_location_onvalidation on a Create => no ID yet
# Read the Parent's path instead
feature = self._lookup_parent_path(feature.parent)
_id = feature.id
elif not feature or "path" not in feature or "parent" not in feature:
feature = self._lookup_parent_path(feature_id)
if feature and (feature.path or feature.parent):
if feature.path:
path = feature.path
else:
path = self.update_location_tree(feature)
# Get ids of ancestors at each level.
if feature.parent:
strict = self.get_strict_hierarchy(feature.parent)
else:
strict = self.get_strict_hierarchy(_id)
if path and strict and not names:
# No need to do a db lookup for parents in this case -- we
# know the levels of the parents from their position in path.
# Note ids returned from db are ints, not strings, so be
# consistent with that.
path_ids = map(int, path.split("/"))
# This skips the last path element, which is the supplied
# location.
for (i, _id) in enumerate(path_ids[:-1]):
results["L%i" % i] = _id
elif path:
ancestors = self.get_parents(_id, feature=feature)
if ancestors:
for ancestor in ancestors:
if ancestor.level and ancestor.level in self.hierarchy_level_keys:
if names and ids:
results[ancestor.level] = Storage()
results[ancestor.level].name = ancestor.name
results[ancestor.level].id = ancestor.id
elif names:
results[ancestor.level] = ancestor.name
else:
results[ancestor.level] = ancestor.id
if not feature_id:
# Add the Parent in (we only need the version required for gis_location onvalidation here)
results[feature.level] = feature.name
if names:
# We need to have entries for all levels
# (both for address onvalidation & new LocationSelector)
hierarchy_level_keys = self.hierarchy_level_keys
for key in hierarchy_level_keys:
if not results.has_key(key):
results[key] = None
return results
# -------------------------------------------------------------------------
def update_table_hierarchy_labels(self, tablename=None):
"""
Re-set table options that depend on location_hierarchy
Only update tables which are already defined
"""
levels = ("L1", "L2", "L3", "L4", "L5")
labels = self.get_location_hierarchy()
db = current.db
if tablename and tablename in db:
# Update the specific table which has just been defined
table = db[tablename]
if tablename == "gis_location":
labels["L0"] = current.messages.COUNTRY
table.level.requires = \
IS_EMPTY_OR(IS_IN_SET(labels))
else:
for level in levels:
table[level].label = labels[level]
else:
# Do all Tables which are already defined
# gis_location
if "gis_location" in db:
table = db.gis_location
table.level.requires = \
IS_EMPTY_OR(IS_IN_SET(labels))
# These tables store location hierarchy info for XSLT export.
# Labels are used for PDF & XLS Reports
tables = ["org_office",
#"pr_person",
"pr_address",
"cr_shelter",
"asset_asset",
#"hms_hospital",
]
for tablename in tables:
if tablename in db:
table = db[tablename]
for level in levels:
table[level].label = labels[level]
# -------------------------------------------------------------------------
@staticmethod
def set_config(config_id=None, force_update_cache=False):
"""
Reads the specified GIS config from the DB, caches it in response.
Passing in a false or non-existent id will cause the personal config,
if any, to be used, else the site config (uuid SITE_DEFAULT), else
their fallback values defined in this class.
If force_update_cache is true, the config will be read and cached in
response even if the specified config is the same as what's already
cached. Used when the config was just written.
The config itself will be available in response.s3.gis.config.
Scalar fields from the gis_config record and its linked
gis_projection record have the same names as the fields in their
tables and can be accessed as response.s3.gis.<fieldname>.
Returns the id of the config it actually used, if any.
@param: config_id. use '0' to set the SITE_DEFAULT
@ToDo: Merge configs for Event
"""
_gis = current.response.s3.gis
# If an id has been supplied, try it first. If it matches what's in
# response, there's no work to do.
if config_id and not force_update_cache and \
_gis.config and \
_gis.config.id == config_id:
return
db = current.db
s3db = current.s3db
ctable = s3db.gis_config
mtable = s3db.gis_marker
ptable = s3db.gis_projection
stable = s3db.gis_style
fields = (ctable.id,
ctable.default_location_id,
ctable.region_location_id,
ctable.geocoder,
ctable.lat_min,
ctable.lat_max,
ctable.lon_min,
ctable.lon_max,
ctable.zoom,
ctable.lat,
ctable.lon,
ctable.pe_id,
ctable.wmsbrowser_url,
ctable.wmsbrowser_name,
ctable.zoom_levels,
mtable.image,
mtable.height,
mtable.width,
ptable.epsg,
ptable.proj4js,
ptable.maxExtent,
ptable.units,
)
cache = Storage()
row = None
rows = None
if config_id:
# Merge this one with the Site Default
query = (ctable.id == config_id) | \
(ctable.uuid == "SITE_DEFAULT")
# May well not be complete, so Left Join
left = (ptable.on(ptable.id == ctable.projection_id),
stable.on((stable.config_id == ctable.id) & \
(stable.layer_id == None)),
mtable.on(mtable.id == stable.marker_id),
)
rows = db(query).select(*fields,
left=left,
orderby=ctable.pe_type,
limitby=(0, 2))
if len(rows) == 1:
# The requested config must be invalid, so just use site default
row = rows.first()
elif config_id is 0:
# Use site default
query = (ctable.uuid == "SITE_DEFAULT")
# May well not be complete, so Left Join
left = (ptable.on(ptable.id == ctable.projection_id),
stable.on((stable.config_id == ctable.id) & \
(stable.layer_id == None)),
mtable.on(mtable.id == stable.marker_id),
)
row = db(query).select(*fields,
limitby=(0, 1)).first()
if not row:
# No configs found at all
_gis.config = cache
return cache
# If no id supplied, extend the site config with any personal or OU configs
if not rows and not row:
auth = current.auth
if auth.is_logged_in():
# Read personalised config, if available.
user = auth.user
pe_id = user.pe_id
# Also look for OU configs
pes = []
if user.organisation_id:
# Add the user account's Org to the list
# (Will take lower-priority than Personal)
otable = s3db.org_organisation
org = db(otable.id == user.organisation_id).select(otable.pe_id,
limitby=(0, 1)
).first()
try:
pes.append(org.pe_id)
except:
current.log.warning("Unable to find Org %s" % user.organisation_id)
if current.deployment_settings.get_org_branches():
# Also look for Parent Orgs
ancestors = s3db.pr_get_ancestors(org.pe_id)
pes += ancestors
if user.site_id:
# Add the user account's Site to the list
# (Will take lower-priority than Org/Personal)
site_pe_id = s3db.pr_get_pe_id("org_site", user.site_id)
if site_pe_id:
pes.append(site_pe_id)
if user.org_group_id:
# Add the user account's Org Group to the list
# (Will take lower-priority than Site/Org/Personal)
ogtable = s3db.org_group
ogroup = db(ogtable.id == user.org_group_id).select(ogtable.pe_id,
limitby=(0, 1)
).first()
pes = list(pes)
try:
pes.append(ogroup.pe_id)
except:
current.log.warning("Unable to find Org Group %s" % user.org_group_id)
query = (ctable.uuid == "SITE_DEFAULT") | \
((ctable.pe_id == pe_id) & \
(ctable.pe_default != False))
if len(pes) == 1:
query |= (ctable.pe_id == pes[0])
else:
query |= (ctable.pe_id.belongs(pes))
# Personal/OU may well not be complete, so Left Join
left = (ptable.on(ptable.id == ctable.projection_id),
stable.on((stable.config_id == ctable.id) & \
(stable.layer_id == None)),
mtable.on(mtable.id == stable.marker_id),
)
# Order by pe_type (defined in gis_config)
# @ToDo: Sort orgs from the hierarchy?
# (Currently we just have branch > non-branch in pe_type)
rows = db(query).select(*fields,
left=left,
orderby=ctable.pe_type)
if len(rows) == 1:
row = rows.first()
if rows and not row:
# Merge Configs
cache["ids"] = []
for row in rows:
config = row["gis_config"]
if not config_id:
config_id = config.id
cache["ids"].append(config.id)
for key in config:
if key in ["delete_record", "gis_layer_config", "gis_menu", "update_record"]:
continue
if key not in cache or cache[key] is None:
cache[key] = config[key]
if "epsg" not in cache or cache["epsg"] is None:
projection = row["gis_projection"]
for key in ["epsg", "units", "maxExtent", "proj4js"]:
cache[key] = projection[key] if key in projection \
else None
if "marker_image" not in cache or \
cache["marker_image"] is None:
marker = row["gis_marker"]
for key in ["image", "height", "width"]:
cache["marker_%s" % key] = marker[key] if key in marker \
else None
# Add NULL values for any that aren't defined, to avoid KeyErrors
for key in ["epsg", "units", "proj4js", "maxExtent",
"marker_image", "marker_height", "marker_width",
]:
if key not in cache:
cache[key] = None
if not row:
# No personal config or not logged in. Use site default.
query = (ctable.uuid == "SITE_DEFAULT") & \
(mtable.id == stable.marker_id) & \
(stable.config_id == ctable.id) & \
(stable.layer_id == None) & \
(ptable.id == ctable.projection_id)
row = db(query).select(*fields,
limitby=(0, 1)).first()
if not row:
# No configs found at all
_gis.config = cache
return cache
if not cache:
# We had a single row
config = row["gis_config"]
config_id = config.id
cache["ids"] = [config_id]
projection = row["gis_projection"]
marker = row["gis_marker"]
for key in config:
cache[key] = config[key]
for key in ["epsg", "maxExtent", "proj4js", "units"]:
cache[key] = projection[key] if key in projection else None
for key in ["image", "height", "width"]:
cache["marker_%s" % key] = marker[key] if key in marker \
else None
# Store the values
_gis.config = cache
return cache
# -------------------------------------------------------------------------
@staticmethod
def get_config():
"""
Returns the current GIS config structure.
@ToDo: Config() class
"""
_gis = current.response.s3.gis
if not _gis.config:
# Ask set_config to put the appropriate config in response.
if current.session.s3.gis_config_id:
GIS.set_config(current.session.s3.gis_config_id)
else:
GIS.set_config()
return _gis.config
# -------------------------------------------------------------------------
def get_location_hierarchy(self, level=None, location=None):
"""
Returns the location hierarchy and it's labels
@param: level - a specific level for which to lookup the label
@param: location - the location_id to lookup the location for
currently only the actual location is supported
@ToDo: Do a search of parents to allow this
lookup for any location
"""
_levels = self.hierarchy_levels
_location = location
if not location and _levels:
# Use cached value
if level:
if level in _levels:
return _levels[level]
else:
return level
else:
return _levels
COUNTRY = current.messages.COUNTRY
if level == "L0":
return COUNTRY
db = current.db
s3db = current.s3db
table = s3db.gis_hierarchy
fields = (table.uuid,
table.L1,
table.L2,
table.L3,
table.L4,
table.L5,
)
query = (table.uuid == "SITE_DEFAULT")
if not location:
config = GIS.get_config()
location = config.region_location_id
if location:
# Try the Region, but ensure we have the fallback available in a single query
query = query | (table.location_id == location)
rows = db(query).select(cache=s3db.cache,
*fields)
if len(rows) > 1:
# Remove the Site Default
_filter = lambda row: row.uuid == "SITE_DEFAULT"
rows.exclude(_filter)
elif not rows:
# prepop hasn't run yet
if level:
return level
levels = OrderedDict()
hierarchy_level_keys = self.hierarchy_level_keys
for key in hierarchy_level_keys:
if key == "L0":
levels[key] = COUNTRY
else:
levels[key] = key
return levels
T = current.T
row = rows.first()
if level:
try:
return T(row[level])
except:
return level
else:
levels = OrderedDict()
hierarchy_level_keys = self.hierarchy_level_keys
for key in hierarchy_level_keys:
if key == "L0":
levels[key] = COUNTRY
elif key in row and row[key]:
# Only include rows with values
levels[key] = str(T(row[key]))
if not _location:
# Cache the value
self.hierarchy_levels = levels
if level:
return levels[level]
else:
return levels
# -------------------------------------------------------------------------
def get_strict_hierarchy(self, location=None):
"""
Returns the strict hierarchy value from the current config.
@param: location - the location_id of the record to check
"""
s3db = current.s3db
table = s3db.gis_hierarchy
# Read the system default
# @ToDo: Check for an active gis_config region?
query = (table.uuid == "SITE_DEFAULT")
if location:
# Try the Location's Country, but ensure we have the fallback available in a single query
query = query | (table.location_id == self.get_parent_country(location))
rows = current.db(query).select(table.uuid,
table.strict_hierarchy,
cache=s3db.cache)
if len(rows) > 1:
# Remove the Site Default
_filter = lambda row: row.uuid == "SITE_DEFAULT"
rows.exclude(_filter)
row = rows.first()
if row:
strict = row.strict_hierarchy
else:
# Pre-pop hasn't run yet
return False
return strict
# -------------------------------------------------------------------------
def get_max_hierarchy_level(self):
"""
Returns the deepest level key (i.e. Ln) in the current hierarchy.
- used by gis_location_onvalidation()
"""
location_hierarchy = self.get_location_hierarchy()
return max(location_hierarchy)
# -------------------------------------------------------------------------
def get_all_current_levels(self, level=None):
"""
Get the current hierarchy levels plus non-hierarchy levels.
"""
all_levels = OrderedDict()
all_levels.update(self.get_location_hierarchy())
#T = current.T
#all_levels["GR"] = T("Location Group")
#all_levels["XX"] = T("Imported")
if level:
try:
return all_levels[level]
except Exception, e:
return level
else:
return all_levels
# -------------------------------------------------------------------------
def get_relevant_hierarchy_levels(self, as_dict=False):
"""
Get current location hierarchy levels relevant for the user
"""
levels = self.relevant_hierarchy_levels
if not levels:
levels = OrderedDict(self.get_location_hierarchy())
if len(current.deployment_settings.get_gis_countries()) == 1 or \
current.response.s3.gis.config.region_location_id:
levels.pop("L0", None)
self.relevant_hierarchy_levels = levels
if not as_dict:
return levels.keys()
else:
return levels
# -------------------------------------------------------------------------
@staticmethod
def get_countries(key_type="id"):
"""
Returns country code or L0 location id versus name for all countries.
The lookup is cached in the session
If key_type is "code", these are returned as an OrderedDict with
country code as the key. If key_type is "id", then the location id
is the key. In all cases, the value is the name.
"""
session = current.session
if "gis" not in session:
session.gis = Storage()
gis = session.gis
if gis.countries_by_id:
cached = True
else:
cached = False
if not cached:
s3db = current.s3db
table = s3db.gis_location
ttable = s3db.gis_location_tag
query = (table.level == "L0") & \
(ttable.tag == "ISO2") & \
(ttable.location_id == table.id)
countries = current.db(query).select(table.id,
table.name,
ttable.value,
orderby=table.name)
if not countries:
return []
countries_by_id = OrderedDict()
countries_by_code = OrderedDict()
for row in countries:
location = row["gis_location"]
countries_by_id[location.id] = location.name
countries_by_code[row["gis_location_tag"].value] = location.name
# Cache in the session
gis.countries_by_id = countries_by_id
gis.countries_by_code = countries_by_code
if key_type == "id":
return countries_by_id
else:
return countries_by_code
elif key_type == "id":
return gis.countries_by_id
else:
return gis.countries_by_code
# -------------------------------------------------------------------------
@staticmethod
def get_country(key, key_type="id"):
"""
Returns country name for given code or id from L0 locations.
The key can be either location id or country code, as specified
by key_type.
"""
if key:
if current.gis.get_countries(key_type):
if key_type == "id":
return current.session.gis.countries_by_id[key]
else:
return current.session.gis.countries_by_code[key]
return None
# -------------------------------------------------------------------------
def get_parent_country(self, location, key_type="id"):
"""
Returns the parent country for a given record
@param: location: the location or id to search for
@param: key_type: whether to return an id or code
@ToDo: Optimise to not use try/except
"""
if not location:
return None
db = current.db
s3db = current.s3db
# @ToDo: Avoid try/except here!
# - separate parameters best as even isinstance is expensive
try:
# location is passed as integer (location_id)
table = s3db.gis_location
location = db(table.id == location).select(table.id,
table.path,
table.level,
limitby=(0, 1),
cache=s3db.cache).first()
except:
# location is passed as record
pass
if location.level == "L0":
if key_type == "id":
return location.id
elif key_type == "code":
ttable = s3db.gis_location_tag
query = (ttable.tag == "ISO2") & \
(ttable.location_id == location.id)
tag = db(query).select(ttable.value,
limitby=(0, 1)).first()
try:
return tag.value
except:
return None
else:
parents = self.get_parents(location.id,
feature=location)
if parents:
for row in parents:
if row.level == "L0":
if key_type == "id":
return row.id
elif key_type == "code":
ttable = s3db.gis_location_tag
query = (ttable.tag == "ISO2") & \
(ttable.location_id == row.id)
tag = db(query).select(ttable.value,
limitby=(0, 1)).first()
try:
return tag.value
except:
return None
return None
# -------------------------------------------------------------------------
def get_default_country(self, key_type="id"):
"""
Returns the default country for the active gis_config
@param: key_type: whether to return an id or code
"""
config = GIS.get_config()
if config.default_location_id:
return self.get_parent_country(config.default_location_id,
key_type=key_type)
return None
# -------------------------------------------------------------------------
def get_features_in_polygon(self, location, tablename=None, category=None):
"""
Returns a gluon.sql.Rows of Features within a Polygon.
The Polygon can be either a WKT string or the ID of a record in the
gis_location table
Currently unused.
@ToDo: Optimise to not use try/except
"""
from shapely.geos import ReadingError
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
current.log.info("S3GIS",
"Upgrade Shapely for Performance enhancements")
db = current.db
s3db = current.s3db
locations = s3db.gis_location
try:
location_id = int(location)
# Check that the location is a polygon
location = db(locations.id == location_id).select(locations.wkt,
locations.lon_min,
locations.lon_max,
locations.lat_min,
locations.lat_max,
limitby=(0, 1)
).first()
if location:
wkt = location.wkt
if wkt and (wkt.startswith("POLYGON") or \
wkt.startswith("MULTIPOLYGON")):
# ok
lon_min = location.lon_min
lon_max = location.lon_max
lat_min = location.lat_min
lat_max = location.lat_max
else:
current.log.error("Location searched within isn't a Polygon!")
return None
except: # @ToDo: need specific exception
wkt = location
if (wkt.startswith("POLYGON") or wkt.startswith("MULTIPOLYGON")):
# ok
lon_min = None
else:
current.log.error("This isn't a Polygon!")
return None
try:
polygon = wkt_loads(wkt)
except: # @ToDo: need specific exception
current.log.error("Invalid Polygon!")
return None
table = s3db[tablename]
if "location_id" not in table.fields():
# @ToDo: Add any special cases to be able to find the linked location
current.log.error("This table doesn't have a location_id!")
return None
query = (table.location_id == locations.id)
if "deleted" in table.fields:
query &= (table.deleted == False)
# @ToDo: Check AAA (do this as a resource filter?)
features = db(query).select(locations.wkt,
locations.lat,
locations.lon,
table.ALL)
output = Rows()
# @ToDo: provide option to use PostGIS/Spatialite
# settings = current.deployment_settings
# if settings.gis.spatialdb and settings.database.db_type == "postgres":
if lon_min is None:
# We have no BBOX so go straight to the full geometry check
for row in features:
_location = row.gis_location
wkt = _location.wkt
if wkt is None:
lat = _location.lat
lon = _location.lon
if lat is not None and lon is not None:
wkt = self.latlon_to_wkt(lat, lon)
else:
continue
try:
shape = wkt_loads(wkt)
if shape.intersects(polygon):
# Save Record
output.records.append(row)
except ReadingError:
current.log.error("Error reading wkt of location with id",
value=row.id)
else:
# 1st check for Features included within the bbox (faster)
def in_bbox(row):
_location = row.gis_location
return (_location.lon > lon_min) & \
(_location.lon < lon_max) & \
(_location.lat > lat_min) & \
(_location.lat < lat_max)
for row in features.find(lambda row: in_bbox(row)):
# Search within this subset with a full geometry check
# Uses Shapely.
_location = row.gis_location
wkt = _location.wkt
if wkt is None:
lat = _location.lat
lon = _location.lon
if lat is not None and lon is not None:
wkt = self.latlon_to_wkt(lat, lon)
else:
continue
try:
shape = wkt_loads(wkt)
if shape.intersects(polygon):
# Save Record
output.records.append(row)
except ReadingError:
current.log.error("Error reading wkt of location with id",
value = row.id)
return output
# -------------------------------------------------------------------------
@staticmethod
def get_polygon_from_bounds(bbox):
"""
Given a gis_location record or a bounding box dict with keys
lon_min, lon_max, lat_min, lat_max, construct a WKT polygon with
points at the corners.
"""
lon_min = bbox["lon_min"]
lon_max = bbox["lon_max"]
lat_min = bbox["lat_min"]
lat_max = bbox["lat_max"]
# Take the points in a counterclockwise direction.
points = [(lon_min, lat_min),
(lon_min, lat_max),
(lon_max, lat_max),
(lon_min, lat_max),
(lon_min, lat_min)]
pairs = ["%s %s" % (p[0], p[1]) for p in points]
wkt = "POLYGON ((%s))" % ", ".join(pairs)
return wkt
# -------------------------------------------------------------------------
@staticmethod
def get_bounds_from_radius(lat, lon, radius):
"""
Compute a bounding box given a Radius (in km) of a LatLon Location
Note the order of the parameters.
@return a dict containing the bounds with keys min_lon, max_lon,
min_lat, max_lat
See:
http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates
"""
import math
radians = math.radians
degrees = math.degrees
MIN_LAT = radians(-90) # -PI/2
MAX_LAT = radians(90) # PI/2
MIN_LON = radians(-180) # -PI
MAX_LON = radians(180) # PI
# Convert to radians for the calculation
r = float(radius) / RADIUS_EARTH
radLat = radians(lat)
radLon = radians(lon)
# Calculate the bounding box
minLat = radLat - r
maxLat = radLat + r
if (minLat > MIN_LAT) and (maxLat < MAX_LAT):
deltaLon = math.asin(math.sin(r) / math.cos(radLat))
minLon = radLon - deltaLon
if (minLon < MIN_LON):
minLon += 2 * math.pi
maxLon = radLon + deltaLon
if (maxLon > MAX_LON):
maxLon -= 2 * math.pi
else:
# Special care for Poles & 180 Meridian:
# http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates#PolesAnd180thMeridian
minLat = max(minLat, MIN_LAT)
maxLat = min(maxLat, MAX_LAT)
minLon = MIN_LON
maxLon = MAX_LON
# Convert back to degrees
minLat = degrees(minLat)
minLon = degrees(minLon)
maxLat = degrees(maxLat)
maxLon = degrees(maxLon)
return dict(lat_min = minLat,
lat_max = maxLat,
lon_min = minLon,
lon_max = maxLon)
# -------------------------------------------------------------------------
def get_features_in_radius(self, lat, lon, radius, tablename=None, category=None):
"""
Returns Features within a Radius (in km) of a LatLon Location
Unused
"""
import math
db = current.db
settings = current.deployment_settings
if settings.gis.spatialdb and settings.database.db_type == "postgres":
# Use PostGIS routine
# The ST_DWithin function call will automatically include a bounding box comparison that will make use of any indexes that are available on the geometries.
# @ToDo: Support optional Category (make this a generic filter?)
import psycopg2
import psycopg2.extras
dbname = settings.database.database
username = settings.database.username
password = settings.database.password
host = settings.database.host
port = settings.database.port or "5432"
# Convert km to degrees (since we're using the_geom not the_geog)
radius = math.degrees(float(radius) / RADIUS_EARTH)
connection = psycopg2.connect("dbname=%s user=%s password=%s host=%s port=%s" % (dbname, username, password, host, port))
cursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
info_string = "SELECT column_name, udt_name FROM information_schema.columns WHERE table_name = 'gis_location' or table_name = '%s';" % tablename
cursor.execute(info_string)
# @ToDo: Look at more optimal queries for just those fields we need
if tablename:
# Lookup the resource
query_string = cursor.mogrify("SELECT * FROM gis_location, %s WHERE %s.location_id = gis_location.id and ST_DWithin (ST_GeomFromText ('POINT (%s %s)', 4326), the_geom, %s);" % (tablename, tablename, lat, lon, radius))
else:
# Lookup the raw Locations
query_string = cursor.mogrify("SELECT * FROM gis_location WHERE ST_DWithin (ST_GeomFromText ('POINT (%s %s)', 4326), the_geom, %s);" % (lat, lon, radius))
cursor.execute(query_string)
# @ToDo: Export Rows?
features = []
for record in cursor:
d = dict(record.items())
row = Storage()
# @ToDo: Optional support for Polygons
if tablename:
row.gis_location = Storage()
row.gis_location.id = d["id"]
row.gis_location.lat = d["lat"]
row.gis_location.lon = d["lon"]
row.gis_location.lat_min = d["lat_min"]
row.gis_location.lon_min = d["lon_min"]
row.gis_location.lat_max = d["lat_max"]
row.gis_location.lon_max = d["lon_max"]
row[tablename] = Storage()
row[tablename].id = d["id"]
row[tablename].name = d["name"]
else:
row.name = d["name"]
row.id = d["id"]
row.lat = d["lat"]
row.lon = d["lon"]
row.lat_min = d["lat_min"]
row.lon_min = d["lon_min"]
row.lat_max = d["lat_max"]
row.lon_max = d["lon_max"]
features.append(row)
return features
#elif settings.database.db_type == "mysql":
# Do the calculation in MySQL to pull back only the relevant rows
# Raw MySQL Formula from: http://blog.peoplesdns.com/archives/24
# PI = 3.141592653589793, mysql's pi() function returns 3.141593
#pi = math.pi
#query = """SELECT name, lat, lon, acos(SIN( PI()* 40.7383040 /180 )*SIN( PI()*lat/180 ))+(cos(PI()* 40.7383040 /180)*COS( PI()*lat/180) *COS(PI()*lon/180-PI()* -73.99319 /180))* 3963.191
#AS distance
#FROM gis_location
#WHERE 1=1
#AND 3963.191 * ACOS( (SIN(PI()* 40.7383040 /180)*SIN(PI() * lat/180)) + (COS(PI()* 40.7383040 /180)*cos(PI()*lat/180)*COS(PI() * lon/180-PI()* -73.99319 /180))) < = 1.5
#ORDER BY 3963.191 * ACOS((SIN(PI()* 40.7383040 /180)*SIN(PI()*lat/180)) + (COS(PI()* 40.7383040 /180)*cos(PI()*lat/180)*COS(PI() * lon/180-PI()* -73.99319 /180)))"""
# db.executesql(query)
else:
# Calculate in Python
# Pull back all the rows within a square bounding box (faster than checking all features manually)
# Then check each feature within this subset
# http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates
# @ToDo: Support optional Category (make this a generic filter?)
bbox = self.get_bounds_from_radius(lat, lon, radius)
# shortcut
locations = db.gis_location
query = (locations.lat > bbox["lat_min"]) & \
(locations.lat < bbox["lat_max"]) & \
(locations.lon > bbox["lon_min"]) & \
(locations.lon < bbox["lon_max"])
deleted = (locations.deleted == False)
empty = (locations.lat != None) & (locations.lon != None)
query = deleted & empty & query
if tablename:
# Lookup the resource
table = current.s3db[tablename]
query &= (table.location_id == locations.id)
records = db(query).select(table.ALL,
locations.id,
locations.name,
locations.level,
locations.lat,
locations.lon,
locations.lat_min,
locations.lon_min,
locations.lat_max,
locations.lon_max)
else:
# Lookup the raw Locations
records = db(query).select(locations.id,
locations.name,
locations.level,
locations.lat,
locations.lon,
locations.lat_min,
locations.lon_min,
locations.lat_max,
locations.lon_max)
features = Rows()
for row in records:
# Calculate the Great Circle distance
if tablename:
distance = self.greatCircleDistance(lat,
lon,
row["gis_location.lat"],
row["gis_location.lon"])
else:
distance = self.greatCircleDistance(lat,
lon,
row.lat,
row.lon)
if distance < radius:
features.records.append(row)
else:
# skip
continue
return features
# -------------------------------------------------------------------------
def get_latlon(self, feature_id, filter=False):
"""
Returns the Lat/Lon for a Feature
used by display_feature() in gis controller
@param feature_id: the feature ID
@param filter: Filter out results based on deployment_settings
"""
db = current.db
table = db.gis_location
feature = db(table.id == feature_id).select(table.id,
table.lat,
table.lon,
table.parent,
table.path,
limitby=(0, 1)).first()
# Zero is an allowed value, hence explicit test for None.
if "lon" in feature and "lat" in feature and \
(feature.lat is not None) and (feature.lon is not None):
return dict(lon=feature.lon, lat=feature.lat)
else:
# Step through ancestors to first with lon, lat.
parents = self.get_parents(feature.id, feature=feature)
if parents:
for row in parents:
lon = row.get("lon", None)
lat = row.get("lat", None)
if (lon is not None) and (lat is not None):
return dict(lon=lon, lat=lat)
# Invalid feature_id
return None
# -------------------------------------------------------------------------
@staticmethod
def get_locations(table,
query,
join = True,
geojson = True,
):
"""
Returns the locations for an XML export
- used by GIS.get_location_data() and S3PivotTable.geojson()
@ToDo: Support multiple locations for a single resource
(e.g. a Project working in multiple Communities)
"""
db = current.db
tablename = table._tablename
gtable = current.s3db.gis_location
settings = current.deployment_settings
tolerance = settings.get_gis_simplify_tolerance()
output = {}
if settings.get_gis_spatialdb():
if geojson:
# Do the Simplify & GeoJSON direct from the DB
web2py_installed_version = parse_version(current.request.global_settings.web2py_version)
web2py_installed_datetime = web2py_installed_version[4] # datetime_index = 4
if web2py_installed_datetime >= datetime.datetime(2015, 1, 17, 0, 7, 4):
# Use http://www.postgis.org/docs/ST_SimplifyPreserveTopology.html
rows = db(query).select(table.id,
gtable.the_geom.st_simplifypreservetopology(tolerance).st_asgeojson(precision=4).with_alias("geojson"))
else:
# Use http://www.postgis.org/docs/ST_Simplify.html
rows = db(query).select(table.id,
gtable.the_geom.st_simplify(tolerance).st_asgeojson(precision=4).with_alias("geojson"))
for row in rows:
output[row[tablename].id] = row.geojson
else:
# Do the Simplify direct from the DB
rows = db(query).select(table.id,
gtable.the_geom.st_simplify(tolerance).st_astext().with_alias("wkt"))
for row in rows:
output[row[tablename].id] = row.wkt
else:
rows = db(query).select(table.id,
gtable.wkt)
simplify = GIS.simplify
if geojson:
# Simplify the polygon to reduce download size
if join:
for row in rows:
g = simplify(row["gis_location"].wkt,
tolerance=tolerance,
output="geojson")
if g:
output[row[tablename].id] = g
else:
for row in rows:
g = simplify(row.wkt,
tolerance=tolerance,
output="geojson")
if g:
output[row.id] = g
else:
# Simplify the polygon to reduce download size
# & also to work around the recursion limit in libxslt
# http://blog.gmane.org/gmane.comp.python.lxml.devel/day=20120309
if join:
for row in rows:
wkt = simplify(row["gis_location"].wkt)
if wkt:
output[row[tablename].id] = wkt
else:
for row in rows:
wkt = simplify(row.wkt)
if wkt:
output[row.id] = wkt
return output
# -------------------------------------------------------------------------
@staticmethod
def get_location_data(resource, attr_fields=None):
"""
Returns the locations, markers and popup tooltips for an XML export
e.g. Feature Layers or Search results (Feature Resources)
e.g. Exports in KML, GeoRSS or GPX format
Called by S3REST: S3Resource.export_tree()
@param: resource - S3Resource instance (required)
@param: attr_fields - list of attr_fields to use instead of reading
from get_vars or looking up in gis_layer_feature
"""
tablename = resource.tablename
if tablename == "gis_feature_query":
# Requires no special handling: XSLT uses normal fields
return dict()
NONE = current.messages["NONE"]
#if DEBUG:
# start = datetime.datetime.now()
db = current.db
s3db = current.s3db
request = current.request
get_vars = request.get_vars
ftable = s3db.gis_layer_feature
layer = None
layer_id = get_vars.get("layer", None)
if layer_id:
# Feature Layer
# e.g. Search results loaded as a Feature Resource layer
layer = db(ftable.layer_id == layer_id).select(ftable.attr_fields,
# @ToDo: Deprecate
ftable.popup_fields,
ftable.individual,
ftable.points,
ftable.trackable,
limitby=(0, 1)
).first()
else:
# e.g. KML, GeoRSS or GPX export
# e.g. Volunteer Layer in Vulnerability module
controller = request.controller
function = request.function
query = (ftable.controller == controller) & \
(ftable.function == function)
layers = db(query).select(ftable.layer_id,
ftable.attr_fields,
ftable.popup_fields, # @ToDo: Deprecate
ftable.style_default, # @ToDo: Rename as no longer really 'style'
ftable.individual,
ftable.points,
ftable.trackable,
)
if len(layers) > 1:
layers.exclude(lambda row: row.style_default == False)
if len(layers) > 1:
# We can't provide details for the whole layer, but need to do a per-record check
return None
if layers:
layer = layers.first()
layer_id = layer.layer_id
if not attr_fields:
# Try get_vars
attr_fields = get_vars.get("attr", [])
if attr_fields:
attr_fields = attr_fields.split(",")
popup_fields = get_vars.get("popup", [])
if popup_fields:
popup_fields = popup_fields.split(",")
if layer:
if not popup_fields:
# Lookup from gis_layer_feature
popup_fields = layer.popup_fields or []
if not attr_fields:
# Lookup from gis_layer_feature
# @ToDo: Consider parsing these from style.popup_format instead
# - see S3Report.geojson()
attr_fields = layer.attr_fields or []
individual = layer.individual
points = layer.points
trackable = layer.trackable
else:
if not popup_fields:
popup_fields = ["name"]
individual = False
points = False
trackable = False
table = resource.table
pkey = table._id.name
attributes = {}
markers = {}
styles = {}
_pkey = table[pkey]
# Ensure there are no ID represents to confuse things
_pkey.represent = None
geojson = current.auth.permission.format == "geojson"
if geojson:
# Build the Attributes now so that representations can be
# looked-up in bulk rather than as a separate lookup per record
if popup_fields:
# Old-style
attr_fields = list(set(popup_fields + attr_fields))
if attr_fields:
attr = {}
# Make a copy for the pkey insertion
fields = list(attr_fields)
if pkey not in fields:
fields.insert(0, pkey)
data = resource.select(fields,
limit = None,
represent = True,
show_links = False)
rfields = data["rfields"]
attr_cols = {}
for f in rfields:
fname = f.fname
selector = f.selector
if fname in attr_fields or selector in attr_fields:
fieldname = f.colname
tname, fname = fieldname.split(".")
try:
ftype = db[tname][fname].type
except AttributeError:
# FieldMethod
ftype = None
attr_cols[fieldname] = (ftype, fname)
_pkey = str(_pkey)
rows = data["rows"]
for row in rows:
record_id = int(row[_pkey])
if attr_cols:
attribute = {}
for fieldname in attr_cols:
represent = row[fieldname]
if represent and represent != NONE:
# Skip empty fields
_attr = attr_cols[fieldname]
ftype = _attr[0]
if ftype == "integer":
if isinstance(represent, lazyT):
# Integer is just a lookup key
represent = s3_unicode(represent)
else:
# Attributes should be numbers not strings
# NB This also relies on decoding within geojson/export.xsl and S3XML.__element2json()
try:
represent = int(represent.replace(",", ""))
except:
# @ToDo: Don't assume this i18n formatting...better to have no represent & then bypass the s3_unicode in select too
# (although we *do* want the represent in the tooltips!)
pass
elif ftype == "double":
# Attributes should be numbers not strings
try:
float_represent = float(represent.replace(",", ""))
int_represent = int(float_represent)
if int_represent == float_represent:
represent = int_represent
else:
represent = float_represent
except:
# @ToDo: Don't assume this i18n formatting...better to have no represent & then bypass the s3_unicode in select too
# (although we *do* want the represent in the tooltips!)
pass
else:
represent = s3_unicode(represent)
attribute[_attr[1]] = represent
attr[record_id] = attribute
attributes[tablename] = attr
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - start
# duration = "{:.2f}".format(duration.total_seconds())
# if layer_id:
# layer_name = db(ftable.id == layer_id).select(ftable.name,
# limitby=(0, 1)
# ).first().name
# else:
# layer_name = "Unknown"
# _debug("Attributes lookup of layer %s completed in %s seconds" % \
# (layer_name, duration))
_markers = get_vars.get("markers", None)
if _markers:
# Add a per-feature Marker
marker_fn = s3db.get_config(tablename, "marker_fn")
if marker_fn:
m = {}
for record in resource:
m[record[pkey]] = marker_fn(record)
else:
# No configuration found so use default marker for all
c, f = tablename.split("_", 1)
m = GIS.get_marker(c, f)
markers[tablename] = m
if individual:
# Add a per-feature Style
# Optionally restrict to a specific Config?
#config = GIS.get_config()
stable = s3db.gis_style
query = (stable.deleted == False) & \
(stable.layer_id == layer_id) & \
(stable.record_id.belongs(resource._ids))
#((stable.config_id == config.id) |
# (stable.config_id == None))
rows = db(query).select(stable.record_id,
stable.style)
for row in rows:
styles[row.record_id] = json.dumps(row.style, separators=SEPARATORS)
styles[tablename] = styles
else:
# KML, GeoRSS or GPX
marker_fn = s3db.get_config(tablename, "marker_fn")
if marker_fn:
# Add a per-feature Marker
for record in resource:
markers[record[pkey]] = marker_fn(record)
else:
# No configuration found so use default marker for all
c, f = tablename.split("_", 1)
markers = GIS.get_marker(c, f)
markers[tablename] = markers
# Lookup the LatLons now so that it can be done as a single
# query rather than per record
#if DEBUG:
# start = datetime.datetime.now()
latlons = {}
#wkts = {}
geojsons = {}
gtable = s3db.gis_location
if trackable:
# Use S3Track
ids = resource._ids
# Ensure IDs in ascending order
ids.sort()
try:
tracker = S3Trackable(table, record_ids=ids)
except SyntaxError:
# This table isn't trackable
pass
else:
_latlons = tracker.get_location(_fields=[gtable.lat,
gtable.lon])
index = 0
for _id in ids:
_location = _latlons[index]
latlons[_id] = (_location.lat, _location.lon)
index += 1
if not latlons:
join = True
#custom = False
if "location_id" in table.fields:
query = (table.id.belongs(resource._ids)) & \
(table.location_id == gtable.id)
elif "site_id" in table.fields:
stable = s3db.org_site
query = (table.id.belongs(resource._ids)) & \
(table.site_id == stable.site_id) & \
(stable.location_id == gtable.id)
elif tablename == "gis_location":
join = False
query = (table.id.belongs(resource._ids))
else:
# Look at the Context
context = resource.get_config("context")
if context:
location_context = context.get("location")
else:
location_context = None
if not location_context:
# Can't display this resource on the Map
return None
# @ToDo: Proper system rather than this hack_which_works_for_current_usecase
# Resolve selector (which automatically attaches any required component)
rfield = resource.resolve_selector(location_context)
if "." in location_context:
# Component
alias, cfield = location_context.split(".", 1)
try:
component = resource.components[alias]
except:
# Invalid alias
# Can't display this resource on the Map
return None
ctablename = component.tablename
ctable = s3db[ctablename]
query = (table.id.belongs(resource._ids)) & \
rfield.join[ctablename] & \
(ctable[cfield] == gtable.id)
#custom = True
# Clear components again
resource.components = Storage()
# @ToDo:
#elif "$" in location_context:
else:
# Can't display this resource on the Map
return None
if geojson and not points:
geojsons[tablename] = GIS.get_locations(table, query, join, geojson)
# @ToDo: Support Polygons in KML, GPX & GeoRSS
#else:
# wkts[tablename] = GIS.get_locations(table, query, join, geojson)
else:
# Points
rows = db(query).select(table.id,
gtable.lat,
gtable.lon)
#if custom:
# # Add geoJSONs
#elif join:
# @ToDo: Support records with multiple locations
# (e.g. an Org with multiple Facs)
if join:
for row in rows:
_location = row["gis_location"]
latlons[row[tablename].id] = (_location.lat, _location.lon)
else:
for row in rows:
latlons[row.id] = (row.lat, row.lon)
_latlons = {}
if latlons:
_latlons[tablename] = latlons
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - start
# duration = "{:.2f}".format(duration.total_seconds())
# _debug("latlons lookup of layer %s completed in %s seconds" % \
# (layer_name, duration))
# Used by S3XML's gis_encode()
return dict(geojsons = geojsons,
latlons = _latlons,
#wkts = wkts,
attributes = attributes,
markers = markers,
styles = styles,
)
# -------------------------------------------------------------------------
@staticmethod
def get_marker(controller=None,
function=None,
filter=None,
):
"""
Returns a Marker dict
- called by xml.gis_encode() for non-geojson resources
- called by S3Map.widget() if no marker_fn supplied
"""
marker = None
if controller and function:
# Lookup marker in the gis_style table
db = current.db
s3db = current.s3db
ftable = s3db.gis_layer_feature
stable = s3db.gis_style
mtable = s3db.gis_marker
config = GIS.get_config()
query = (ftable.controller == controller) & \
(ftable.function == function) & \
(ftable.aggregate == False)
left = (stable.on((stable.layer_id == ftable.layer_id) & \
(stable.record_id == None) & \
((stable.config_id == config.id) | \
(stable.config_id == None))),
mtable.on(mtable.id == stable.marker_id),
)
if filter:
query &= (ftable.filter == filter)
if current.deployment_settings.get_database_type() == "postgres":
# None is last
orderby = stable.config_id
else:
# None is 1st
orderby = ~stable.config_id
layers = db(query).select(mtable.image,
mtable.height,
mtable.width,
ftable.style_default,
stable.gps_marker,
left=left,
orderby=orderby)
if len(layers) > 1:
layers.exclude(lambda row: row["gis_layer_feature.style_default"] == False)
if len(layers) == 1:
layer = layers.first()
else:
# Can't differentiate
layer = None
if layer:
_marker = layer["gis_marker"]
if _marker.image:
marker = dict(image=_marker.image,
height=_marker.height,
width=_marker.width,
gps_marker=layer["gis_style"].gps_marker
)
if not marker:
# Default
marker = Marker().as_dict()
return marker
# -------------------------------------------------------------------------
@staticmethod
def get_style(layer_id=None,
aggregate=None,
):
"""
Returns a Style dict
- called by S3Report.geojson()
"""
style = None
if layer_id:
style = Style(layer_id=layer_id,
aggregate=aggregate).as_dict()
if not style:
# Default
style = Style().as_dict()
return style
# -------------------------------------------------------------------------
@staticmethod
def get_screenshot(config_id, temp=True, height=None, width=None):
"""
Save a Screenshot of a saved map
@requires:
PhantomJS http://phantomjs.org
Selenium https://pypi.python.org/pypi/selenium
"""
# @ToDo: allow selection of map_id
map_id = "default_map"
#from selenium import webdriver
# Custom version which is patched to access native PhantomJS functions added to GhostDriver/PhantomJS in:
# https://github.com/watsonmw/ghostdriver/commit/d9b65ed014ed9ff8a5e852cc40e59a0fd66d0cf1
from webdriver import WebDriver
from selenium.common.exceptions import TimeoutException, WebDriverException
from selenium.webdriver.support.ui import WebDriverWait
request = current.request
cachepath = os.path.join(request.folder, "static", "cache", "jpg")
if not os.path.exists(cachepath):
try:
os.mkdir(cachepath)
except OSError, os_error:
error = "GIS: JPEG files cannot be saved: %s %s" % \
(cachepath, os_error)
current.log.error(error)
current.session.error = error
redirect(URL(c="gis", f="index", vars={"config_id": config_id}))
# Copy the current working directory to revert back to later
cwd = os.getcwd()
# Change to the Cache folder (can't render directly there from execute_phantomjs)
os.chdir(cachepath)
#driver = webdriver.PhantomJS()
# Disable Proxy for Win32 Network Latency issue
driver = WebDriver(service_args=["--proxy-type=none"])
# Change back for other parts
os.chdir(cwd)
settings = current.deployment_settings
if height is None:
# Set the size of the browser to match the map
height = settings.get_gis_map_height()
if width is None:
width = settings.get_gis_map_width()
# For Screenshots
#height = 410
#width = 820
driver.set_window_size(width + 5, height + 20)
# Load the homepage
# (Cookie needs to be set on same domain as it takes effect)
base_url = "%s/%s" % (settings.get_base_public_url(),
request.application)
driver.get(base_url)
if not current.auth.override:
# Reuse current session to allow access to ACL-controlled resources
response = current.response
session_id = response.session_id
driver.add_cookie({"name": response.session_id_name,
"value": session_id,
"path": "/",
})
# For sync connections
current.session._unlock(response)
# Load the map
url = "%s/gis/map_viewing_client?print=1&config=%s" % (base_url,
config_id)
driver.get(url)
# Wait for map to load (including it's layers)
# Alternative approach: https://raw.githubusercontent.com/ariya/phantomjs/master/examples/waitfor.js
def map_loaded(driver):
test = '''return S3.gis.maps['%s'].s3.loaded''' % map_id
try:
result = driver.execute_script(test)
except WebDriverException, e:
result = False
return result
try:
# Wait for up to 100s (large screenshots take a long time for layers to load)
WebDriverWait(driver, 100).until(map_loaded)
except TimeoutException, e:
driver.quit()
current.log.error("Timeout: %s" % e)
return None
# Save the Output
# @ToDo: Can we use StringIO instead of cluttering filesystem?
# @ToDo: Allow option of PDF (as well as JPG)
# https://github.com/ariya/phantomjs/blob/master/examples/rasterize.js
if temp:
filename = "%s.jpg" % session_id
else:
filename = "config_%s.jpg" % config_id
# Cannot control file size (no access to clipRect) or file format
#driver.save_screenshot(os.path.join(cachepath, filename))
#driver.page.clipRect = {"top": 10,
# "left": 5,
# "width": width,
# "height": height
# }
#driver.page.render(filename, {"format": "jpeg", "quality": "100"})
script = '''
var page = this;
page.clipRect = {top: 10,
left: 5,
width: %(width)s,
height: %(height)s
};
page.render('%(filename)s', {format: 'jpeg', quality: '100'});''' % \
dict(width = width,
height = height,
filename = filename,
)
try:
result = driver.execute_phantomjs(script)
except WebDriverException, e:
driver.quit()
current.log.error("WebDriver crashed: %s" % e)
return None
driver.quit()
if temp:
# This was a temporary config for creating the screenshot, then delete it now
ctable = current.s3db.gis_config
the_set = current.db(ctable.id == config_id)
config = the_set.select(ctable.temp,
limitby=(0, 1)
).first()
try:
if config.temp:
the_set.delete()
except:
# Record not found?
pass
# Pass the result back to the User
return filename
# -------------------------------------------------------------------------
@staticmethod
def get_shapefile_geojson(resource):
"""
Lookup Shapefile Layer polygons once per layer and not per-record
Called by S3REST: S3Resource.export_tree()
@ToDo: Vary simplification level & precision by Zoom level
- store this in the style?
"""
db = current.db
tablename = "gis_layer_shapefile_%s" % resource._ids[0]
table = db[tablename]
query = resource.get_query()
fields = []
fappend = fields.append
for f in table.fields:
if f not in ("layer_id", "lat", "lon"):
fappend(f)
attributes = {}
geojsons = {}
settings = current.deployment_settings
tolerance = settings.get_gis_simplify_tolerance()
if settings.get_gis_spatialdb():
# Do the Simplify & GeoJSON direct from the DB
fields.remove("the_geom")
fields.remove("wkt")
_fields = [table[f] for f in fields]
rows = db(query).select(table.the_geom.st_simplify(tolerance).st_asgeojson(precision=4).with_alias("geojson"),
*_fields)
for row in rows:
_row = row[tablename]
_id = _row.id
geojsons[_id] = row.geojson
_attributes = {}
for f in fields:
if f not in ("id"):
_attributes[f] = _row[f]
attributes[_id] = _attributes
else:
_fields = [table[f] for f in fields]
rows = db(query).select(*_fields)
simplify = GIS.simplify
for row in rows:
# Simplify the polygon to reduce download size
geojson = simplify(row.wkt, tolerance=tolerance,
output="geojson")
_id = row.id
if geojson:
geojsons[_id] = geojson
_attributes = {}
for f in fields:
if f not in ("id", "wkt"):
_attributes[f] = row[f]
attributes[_id] = _attributes
_attributes = {}
_attributes[tablename] = attributes
_geojsons = {}
_geojsons[tablename] = geojsons
# return 'locations'
return dict(attributes = _attributes,
geojsons = _geojsons)
# -------------------------------------------------------------------------
@staticmethod
def get_theme_geojson(resource):
"""
Lookup Theme Layer polygons once per layer and not per-record
Called by S3REST: S3Resource.export_tree()
@ToDo: Vary precision by Lx
- store this (& tolerance map) in the style?
"""
s3db = current.s3db
tablename = "gis_theme_data"
table = s3db.gis_theme_data
gtable = s3db.gis_location
query = (table.id.belongs(resource._ids)) & \
(table.location_id == gtable.id)
geojsons = {}
# @ToDo: How to get the tolerance to vary by level?
# - add Stored Procedure?
#if current.deployment_settings.get_gis_spatialdb():
# # Do the Simplify & GeoJSON direct from the DB
# rows = current.db(query).select(table.id,
# gtable.the_geom.st_simplify(0.01).st_asgeojson(precision=4).with_alias("geojson"))
# for row in rows:
# geojsons[row["gis_theme_data.id"]] = row.geojson
#else:
rows = current.db(query).select(table.id,
gtable.level,
gtable.wkt)
simplify = GIS.simplify
tolerance = {"L0": 0.01,
"L1": 0.005,
"L2": 0.00125,
"L3": 0.000625,
"L4": 0.0003125,
"L5": 0.00015625,
}
for row in rows:
grow = row.gis_location
# Simplify the polygon to reduce download size
geojson = simplify(grow.wkt,
tolerance=tolerance[grow.level],
output="geojson")
if geojson:
geojsons[row["gis_theme_data.id"]] = geojson
_geojsons = {}
_geojsons[tablename] = geojsons
# Return 'locations'
return dict(geojsons = _geojsons)
# -------------------------------------------------------------------------
@staticmethod
def greatCircleDistance(lat1, lon1, lat2, lon2, quick=True):
"""
Calculate the shortest distance (in km) over the earth's sphere between 2 points
Formulae from: http://www.movable-type.co.uk/scripts/latlong.html
(NB We could also use PostGIS functions, where possible, instead of this query)
"""
import math
# shortcuts
cos = math.cos
sin = math.sin
radians = math.radians
if quick:
# Spherical Law of Cosines (accurate down to around 1m & computationally quick)
lat1 = radians(lat1)
lat2 = radians(lat2)
lon1 = radians(lon1)
lon2 = radians(lon2)
distance = math.acos(sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(lon2 - lon1)) * RADIUS_EARTH
return distance
else:
# Haversine
#asin = math.asin
sqrt = math.sqrt
pow = math.pow
dLat = radians(lat2 - lat1)
dLon = radians(lon2 - lon1)
a = pow(sin(dLat / 2), 2) + cos(radians(lat1)) * cos(radians(lat2)) * pow(sin(dLon / 2), 2)
c = 2 * math.atan2(sqrt(a), sqrt(1 - a))
#c = 2 * asin(sqrt(a)) # Alternate version
# Convert radians to kilometers
distance = RADIUS_EARTH * c
return distance
# -------------------------------------------------------------------------
@staticmethod
def create_poly(feature):
"""
Create a .poly file for OpenStreetMap exports
http://wiki.openstreetmap.org/wiki/Osmosis/Polygon_Filter_File_Format
"""
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
current.log.info("S3GIS",
"Upgrade Shapely for Performance enhancements")
name = feature.name
if "wkt" in feature:
wkt = feature.wkt
else:
# WKT not included by default in feature, so retrieve this now
table = current.s3db.gis_location
wkt = current.db(table.id == feature.id).select(table.wkt,
limitby=(0, 1)
).first().wkt
try:
shape = wkt_loads(wkt)
except:
error = "Invalid WKT: %s" % name
current.log.error(error)
return error
geom_type = shape.geom_type
if geom_type == "MultiPolygon":
polygons = shape.geoms
elif geom_type == "Polygon":
polygons = [shape]
else:
error = "Unsupported Geometry: %s, %s" % (name, geom_type)
current.log.error(error)
return error
if os.path.exists(os.path.join(os.getcwd(), "temp")): # use web2py/temp
TEMP = os.path.join(os.getcwd(), "temp")
else:
import tempfile
TEMP = tempfile.gettempdir()
filename = "%s.poly" % name
filepath = os.path.join(TEMP, filename)
File = open(filepath, "w")
File.write("%s\n" % filename)
count = 1
for polygon in polygons:
File.write("%s\n" % count)
points = polygon.exterior.coords
for point in points:
File.write("\t%s\t%s\n" % (point[0], point[1]))
File.write("END\n")
count += 1
File.write("END\n")
File.close()
return None
# -------------------------------------------------------------------------
@staticmethod
def export_admin_areas(countries=[],
levels=("L0", "L1", "L2", "L3"),
format="geojson",
simplify=0.01,
decimals=4,
):
"""
Export admin areas to /static/cache for use by interactive web-mapping services
- designed for use by the Vulnerability Mapping
@param countries: list of ISO2 country codes
@param levels: list of which Lx levels to export
@param format: Only GeoJSON supported for now (may add KML &/or OSM later)
@param simplify: tolerance for the simplification algorithm. False to disable simplification
@param decimals: number of decimal points to include in the coordinates
"""
db = current.db
s3db = current.s3db
table = s3db.gis_location
ifield = table.id
if countries:
ttable = s3db.gis_location_tag
cquery = (table.level == "L0") & \
(table.end_date == None) & \
(ttable.location_id == ifield) & \
(ttable.tag == "ISO2") & \
(ttable.value.belongs(countries))
else:
# All countries
cquery = (table.level == "L0") & \
(table.end_date == None) & \
(table.deleted != True)
if current.deployment_settings.get_gis_spatialdb():
spatial = True
_field = table.the_geom
if simplify:
# Do the Simplify & GeoJSON direct from the DB
field = _field.st_simplify(simplify).st_asgeojson(precision=decimals).with_alias("geojson")
else:
# Do the GeoJSON direct from the DB
field = _field.st_asgeojson(precision=decimals).with_alias("geojson")
else:
spatial = False
field = table.wkt
if simplify:
_simplify = GIS.simplify
else:
from shapely.wkt import loads as wkt_loads
from ..geojson import dumps
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
current.log.info("S3GIS",
"Upgrade Shapely for Performance enhancements")
folder = os.path.join(current.request.folder, "static", "cache")
features = []
append = features.append
if "L0" in levels:
# Reduce the decimals in output by 1
_decimals = decimals -1
if spatial:
if simplify:
field = _field.st_simplify(simplify).st_asgeojson(precision=_decimals).with_alias("geojson")
else:
field = _field.st_asgeojson(precision=_decimals).with_alias("geojson")
countries = db(cquery).select(ifield,
field)
for row in countries:
if spatial:
id = row["gis_location"].id
geojson = row.geojson
elif simplify:
id = row.id
wkt = row.wkt
if wkt:
geojson = _simplify(wkt, tolerance=simplify,
decimals=_decimals,
output="geojson")
else:
name = db(table.id == id).select(table.name,
limitby=(0, 1)).first().name
print >> sys.stderr, "No WKT: L0 %s %s" % (name, id)
continue
else:
id = row.id
shape = wkt_loads(row.wkt)
# Compact Encoding
geojson = dumps(shape, separators=SEPARATORS)
if geojson:
f = dict(type = "Feature",
properties = {"id": id},
geometry = json.loads(geojson)
)
append(f)
if features:
data = dict(type = "FeatureCollection",
features = features
)
# Output to file
filename = os.path.join(folder, "countries.geojson")
File = open(filename, "w")
File.write(json.dumps(data, separators=SEPARATORS))
File.close()
q1 = (table.level == "L1") & \
(table.deleted != True) & \
(table.end_date == None)
q2 = (table.level == "L2") & \
(table.deleted != True) & \
(table.end_date == None)
q3 = (table.level == "L3") & \
(table.deleted != True) & \
(table.end_date == None)
q4 = (table.level == "L4") & \
(table.deleted != True) & \
(table.end_date == None)
if "L1" in levels:
if "L0" not in levels:
countries = db(cquery).select(ifield)
if simplify:
# We want greater precision when zoomed-in more
simplify = simplify / 2 # 0.005 with default setting
if spatial:
field = _field.st_simplify(simplify).st_asgeojson(precision=decimals).with_alias("geojson")
for country in countries:
if not spatial or "L0" not in levels:
_id = country.id
else:
_id = country["gis_location"].id
query = q1 & (table.parent == _id)
features = []
append = features.append
rows = db(query).select(ifield,
field)
for row in rows:
if spatial:
id = row["gis_location"].id
geojson = row.geojson
elif simplify:
id = row.id
wkt = row.wkt
if wkt:
geojson = _simplify(wkt, tolerance=simplify,
decimals=decimals,
output="geojson")
else:
name = db(table.id == id).select(table.name,
limitby=(0, 1)).first().name
print >> sys.stderr, "No WKT: L1 %s %s" % (name, id)
continue
else:
id = row.id
shape = wkt_loads(row.wkt)
# Compact Encoding
geojson = dumps(shape, separators=SEPARATORS)
if geojson:
f = dict(type = "Feature",
properties = {"id": id},
geometry = json.loads(geojson)
)
append(f)
if features:
data = dict(type = "FeatureCollection",
features = features
)
# Output to file
filename = os.path.join(folder, "1_%s.geojson" % _id)
File = open(filename, "w")
File.write(json.dumps(data, separators=SEPARATORS))
File.close()
else:
current.log.debug("No L1 features in %s" % _id)
if "L2" in levels:
if "L0" not in levels and "L1" not in levels:
countries = db(cquery).select(ifield)
if simplify:
# We want greater precision when zoomed-in more
simplify = simplify / 4 # 0.00125 with default setting
if spatial:
field = _field.st_simplify(simplify).st_asgeojson(precision=decimals).with_alias("geojson")
for country in countries:
if not spatial or "L0" not in levels:
id = country.id
else:
id = country["gis_location"].id
query = q1 & (table.parent == id)
l1s = db(query).select(ifield)
for l1 in l1s:
query = q2 & (table.parent == l1.id)
features = []
append = features.append
rows = db(query).select(ifield,
field)
for row in rows:
if spatial:
id = row["gis_location"].id
geojson = row.geojson
elif simplify:
id = row.id
wkt = row.wkt
if wkt:
geojson = _simplify(wkt, tolerance=simplify,
decimals=decimals,
output="geojson")
else:
name = db(table.id == id).select(table.name,
limitby=(0, 1)).first().name
print >> sys.stderr, "No WKT: L2 %s %s" % (name, id)
continue
else:
id = row.id
shape = wkt_loads(row.wkt)
# Compact Encoding
geojson = dumps(shape, separators=SEPARATORS)
if geojson:
f = dict(type = "Feature",
properties = {"id": id},
geometry = json.loads(geojson)
)
append(f)
if features:
data = dict(type = "FeatureCollection",
features = features
)
# Output to file
filename = os.path.join(folder, "2_%s.geojson" % l1.id)
File = open(filename, "w")
File.write(json.dumps(data, separators=SEPARATORS))
File.close()
else:
current.log.debug("No L2 features in %s" % l1.id)
if "L3" in levels:
if "L0" not in levels and "L1" not in levels and "L2" not in levels:
countries = db(cquery).select(ifield)
if simplify:
# We want greater precision when zoomed-in more
simplify = simplify / 2 # 0.000625 with default setting
if spatial:
field = _field.st_simplify(simplify).st_asgeojson(precision=decimals).with_alias("geojson")
for country in countries:
if not spatial or "L0" not in levels:
id = country.id
else:
id = country["gis_location"].id
query = q1 & (table.parent == id)
l1s = db(query).select(ifield)
for l1 in l1s:
query = q2 & (table.parent == l1.id)
l2s = db(query).select(ifield)
for l2 in l2s:
query = q3 & (table.parent == l2.id)
features = []
append = features.append
rows = db(query).select(ifield,
field)
for row in rows:
if spatial:
id = row["gis_location"].id
geojson = row.geojson
elif simplify:
id = row.id
wkt = row.wkt
if wkt:
geojson = _simplify(wkt, tolerance=simplify,
decimals=decimals,
output="geojson")
else:
name = db(table.id == id).select(table.name,
limitby=(0, 1)).first().name
print >> sys.stderr, "No WKT: L3 %s %s" % (name, id)
continue
else:
id = row.id
shape = wkt_loads(row.wkt)
# Compact Encoding
geojson = dumps(shape, separators=SEPARATORS)
if geojson:
f = dict(type = "Feature",
properties = {"id": id},
geometry = json.loads(geojson)
)
append(f)
if features:
data = dict(type = "FeatureCollection",
features = features
)
# Output to file
filename = os.path.join(folder, "3_%s.geojson" % l2.id)
File = open(filename, "w")
File.write(json.dumps(data, separators=SEPARATORS))
File.close()
else:
current.log.debug("No L3 features in %s" % l2.id)
if "L4" in levels:
if "L0" not in levels and "L1" not in levels and "L2" not in levels and "L3" not in levels:
countries = db(cquery).select(ifield)
if simplify:
# We want greater precision when zoomed-in more
simplify = simplify / 2 # 0.0003125 with default setting
if spatial:
field = _field.st_simplify(simplify).st_asgeojson(precision=decimals).with_alias("geojson")
for country in countries:
if not spatial or "L0" not in levels:
id = country.id
else:
id = country["gis_location"].id
query = q1 & (table.parent == id)
l1s = db(query).select(ifield)
for l1 in l1s:
query = q2 & (table.parent == l1.id)
l2s = db(query).select(ifield)
for l2 in l2s:
query = q3 & (table.parent == l2.id)
l3s = db(query).select(ifield)
for l3 in l3s:
query = q4 & (table.parent == l3.id)
features = []
append = features.append
rows = db(query).select(ifield,
field)
for row in rows:
if spatial:
id = row["gis_location"].id
geojson = row.geojson
elif simplify:
id = row.id
wkt = row.wkt
if wkt:
geojson = _simplify(wkt, tolerance=simplify,
decimals=decimals,
output="geojson")
else:
name = db(table.id == id).select(table.name,
limitby=(0, 1)).first().name
print >> sys.stderr, "No WKT: L4 %s %s" % (name, id)
continue
else:
id = row.id
shape = wkt_loads(row.wkt)
# Compact Encoding
geojson = dumps(shape, separators=SEPARATORS)
if geojson:
f = dict(type = "Feature",
properties = {"id": id},
geometry = json.loads(geojson)
)
append(f)
if features:
data = dict(type = "FeatureCollection",
features = features
)
# Output to file
filename = os.path.join(folder, "4_%s.geojson" % l3.id)
File = open(filename, "w")
File.write(json.dumps(data, separators=SEPARATORS))
File.close()
else:
current.log.debug("No L4 features in %s" % l3.id)
# -------------------------------------------------------------------------
def import_admin_areas(self,
source="gadmv1",
countries=[],
levels=["L0", "L1", "L2"]
):
"""
Import Admin Boundaries into the Locations table
@param source - Source to get the data from.
Currently only GADM is supported: http://gadm.org
@param countries - List of ISO2 countrycodes to download data for
defaults to all countries
@param levels - Which levels of the hierarchy to import.
defaults to all 3 supported levels
"""
if source == "gadmv1":
try:
from osgeo import ogr
except:
current.log.error("Unable to import ogr. Please install python-gdal bindings: GDAL-1.8.1+")
return
if "L0" in levels:
self.import_gadm1_L0(ogr, countries=countries)
if "L1" in levels:
self.import_gadm1(ogr, "L1", countries=countries)
if "L2" in levels:
self.import_gadm1(ogr, "L2", countries=countries)
current.log.debug("All done!")
elif source == "gadmv1":
try:
from osgeo import ogr
except:
current.log.error("Unable to import ogr. Please install python-gdal bindings: GDAL-1.8.1+")
return
if "L0" in levels:
self.import_gadm2(ogr, "L0", countries=countries)
if "L1" in levels:
self.import_gadm2(ogr, "L1", countries=countries)
if "L2" in levels:
self.import_gadm2(ogr, "L2", countries=countries)
current.log.debug("All done!")
else:
current.log.warning("Only GADM is currently supported")
return
return
# -------------------------------------------------------------------------
@staticmethod
def import_gadm1_L0(ogr, countries=[]):
"""
Import L0 Admin Boundaries into the Locations table from GADMv1
- designed to be called from import_admin_areas()
- assumes that basic prepop has been done, so that no new records need to be created
@param ogr - The OGR Python module
@param countries - List of ISO2 countrycodes to download data for
defaults to all countries
"""
db = current.db
s3db = current.s3db
ttable = s3db.gis_location_tag
table = db.gis_location
layer = {
"url" : "http://gadm.org/data/gadm_v1_lev0_shp.zip",
"zipfile" : "gadm_v1_lev0_shp.zip",
"shapefile" : "gadm1_lev0",
"codefield" : "ISO2", # This field is used to uniquely identify the L0 for updates
"code2field" : "ISO" # This field is used to uniquely identify the L0 for parenting the L1s
}
# Copy the current working directory to revert back to later
cwd = os.getcwd()
# Create the working directory
TEMP = os.path.join(cwd, "temp")
if not os.path.exists(TEMP): # use web2py/temp/GADMv1 as a cache
import tempfile
TEMP = tempfile.gettempdir()
tempPath = os.path.join(TEMP, "GADMv1")
if not os.path.exists(tempPath):
try:
os.mkdir(tempPath)
except OSError:
current.log.error("Unable to create temp folder %s!" % tempPath)
return
# Set the current working directory
os.chdir(tempPath)
layerName = layer["shapefile"]
# Check if file has already been downloaded
fileName = layer["zipfile"]
if not os.path.isfile(fileName):
# Download the file
from gluon.tools import fetch
url = layer["url"]
current.log.debug("Downloading %s" % url)
try:
file = fetch(url)
except urllib2.URLError, exception:
current.log.error(exception)
return
fp = StringIO(file)
else:
current.log.debug("Using existing file %s" % fileName)
fp = open(fileName)
# Unzip it
current.log.debug("Unzipping %s" % layerName)
import zipfile
myfile = zipfile.ZipFile(fp)
for ext in ("dbf", "prj", "sbn", "sbx", "shp", "shx"):
fileName = "%s.%s" % (layerName, ext)
file = myfile.read(fileName)
f = open(fileName, "w")
f.write(file)
f.close()
myfile.close()
# Use OGR to read Shapefile
current.log.debug("Opening %s.shp" % layerName)
ds = ogr.Open("%s.shp" % layerName)
if ds is None:
current.log.error("Open failed.\n")
return
lyr = ds.GetLayerByName(layerName)
lyr.ResetReading()
codeField = layer["codefield"]
code2Field = layer["code2field"]
for feat in lyr:
code = feat.GetField(codeField)
if not code:
# Skip the entries which aren't countries
continue
if countries and code not in countries:
# Skip the countries which we're not interested in
continue
geom = feat.GetGeometryRef()
if geom is not None:
if geom.GetGeometryType() == ogr.wkbPoint:
pass
else:
query = (table.id == ttable.location_id) & \
(ttable.tag == "ISO2") & \
(ttable.value == code)
wkt = geom.ExportToWkt()
if wkt.startswith("LINESTRING"):
gis_feature_type = 2
elif wkt.startswith("POLYGON"):
gis_feature_type = 3
elif wkt.startswith("MULTIPOINT"):
gis_feature_type = 4
elif wkt.startswith("MULTILINESTRING"):
gis_feature_type = 5
elif wkt.startswith("MULTIPOLYGON"):
gis_feature_type = 6
elif wkt.startswith("GEOMETRYCOLLECTION"):
gis_feature_type = 7
code2 = feat.GetField(code2Field)
#area = feat.GetField("Shape_Area")
try:
id = db(query).select(table.id,
limitby=(0, 1)).first().id
query = (table.id == id)
db(query).update(gis_feature_type=gis_feature_type,
wkt=wkt)
ttable.insert(location_id = id,
tag = "ISO3",
value = code2)
#ttable.insert(location_id = location_id,
# tag = "area",
# value = area)
except db._adapter.driver.OperationalError, exception:
current.log.error(sys.exc_info[1])
else:
current.log.debug("No geometry\n")
# Close the shapefile
ds.Destroy()
db.commit()
# Revert back to the working directory as before.
os.chdir(cwd)
return
# -------------------------------------------------------------------------
def import_gadm1(self, ogr, level="L1", countries=[]):
"""
Import L1 Admin Boundaries into the Locations table from GADMv1
- designed to be called from import_admin_areas()
- assumes a fresh database with just Countries imported
@param ogr - The OGR Python module
@param level - "L1" or "L2"
@param countries - List of ISO2 countrycodes to download data for
defaults to all countries
"""
if level == "L1":
layer = {
"url" : "http://gadm.org/data/gadm_v1_lev1_shp.zip",
"zipfile" : "gadm_v1_lev1_shp.zip",
"shapefile" : "gadm1_lev1",
"namefield" : "NAME_1",
# Uniquely identify the L1 for updates
"sourceCodeField" : "ID_1",
"edenCodeField" : "GADM1",
# Uniquely identify the L0 for parenting the L1s
"parent" : "L0",
"parentSourceCodeField" : "ISO",
"parentEdenCodeField" : "ISO3",
}
elif level == "L2":
layer = {
"url" : "http://biogeo.ucdavis.edu/data/gadm/gadm_v1_lev2_shp.zip",
"zipfile" : "gadm_v1_lev2_shp.zip",
"shapefile" : "gadm_v1_lev2",
"namefield" : "NAME_2",
# Uniquely identify the L2 for updates
"sourceCodeField" : "ID_2",
"edenCodeField" : "GADM2",
# Uniquely identify the L0 for parenting the L1s
"parent" : "L1",
"parentSourceCodeField" : "ID_1",
"parentEdenCodeField" : "GADM1",
}
else:
current.log.warning("Level %s not supported!" % level)
return
import csv
import shutil
import zipfile
db = current.db
s3db = current.s3db
cache = s3db.cache
table = s3db.gis_location
ttable = s3db.gis_location_tag
csv.field_size_limit(2**20 * 100) # 100 megs
# Not all the data is encoded like this
# (unable to determine encoding - appears to be damaged in source):
# Azerbaijan L1
# Vietnam L1 & L2
ENCODING = "cp1251"
# from http://docs.python.org/library/csv.html#csv-examples
def latin_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
for row in csv.reader(unicode_csv_data):
yield [unicode(cell, ENCODING) for cell in row]
def latin_dict_reader(data, dialect=csv.excel, **kwargs):
reader = latin_csv_reader(data, dialect=dialect, **kwargs)
headers = reader.next()
for r in reader:
yield dict(zip(headers, r))
# Copy the current working directory to revert back to later
cwd = os.getcwd()
# Create the working directory
TEMP = os.path.join(cwd, "temp")
if not os.path.exists(TEMP): # use web2py/temp/GADMv1 as a cache
import tempfile
TEMP = tempfile.gettempdir()
tempPath = os.path.join(TEMP, "GADMv1")
if not os.path.exists(tempPath):
try:
os.mkdir(tempPath)
except OSError:
current.log.error("Unable to create temp folder %s!" % tempPath)
return
# Set the current working directory
os.chdir(tempPath)
# Remove any existing CSV folder to allow the new one to be created
try:
shutil.rmtree("CSV")
except OSError:
# Folder doesn't exist, so should be creatable
pass
layerName = layer["shapefile"]
# Check if file has already been downloaded
fileName = layer["zipfile"]
if not os.path.isfile(fileName):
# Download the file
from gluon.tools import fetch
url = layer["url"]
current.log.debug("Downloading %s" % url)
try:
file = fetch(url)
except urllib2.URLError, exception:
current.log.error(exception)
# Revert back to the working directory as before.
os.chdir(cwd)
return
fp = StringIO(file)
else:
current.log.debug("Using existing file %s" % fileName)
fp = open(fileName)
# Unzip it
current.log.debug("Unzipping %s" % layerName)
myfile = zipfile.ZipFile(fp)
for ext in ("dbf", "prj", "sbn", "sbx", "shp", "shx"):
fileName = "%s.%s" % (layerName, ext)
file = myfile.read(fileName)
f = open(fileName, "w")
f.write(file)
f.close()
myfile.close()
# Convert to CSV
current.log.debug("Converting %s.shp to CSV" % layerName)
# Simplified version of generic Shapefile Importer:
# http://svn.osgeo.org/gdal/trunk/gdal/swig/python/samples/ogr2ogr.py
bSkipFailures = False
nGroupTransactions = 200
nFIDToFetch = ogr.NullFID
inputFileName = "%s.shp" % layerName
inputDS = ogr.Open(inputFileName, False)
outputFileName = "CSV"
outputDriver = ogr.GetDriverByName("CSV")
outputDS = outputDriver.CreateDataSource(outputFileName, options=[])
# GADM only has 1 layer/source
inputLayer = inputDS.GetLayer(0)
inputFDefn = inputLayer.GetLayerDefn()
# Create the output Layer
outputLayer = outputDS.CreateLayer(layerName)
# Copy all Fields
#papszFieldTypesToString = []
inputFieldCount = inputFDefn.GetFieldCount()
panMap = [-1 for i in range(inputFieldCount)]
outputFDefn = outputLayer.GetLayerDefn()
nDstFieldCount = 0
if outputFDefn is not None:
nDstFieldCount = outputFDefn.GetFieldCount()
for iField in range(inputFieldCount):
inputFieldDefn = inputFDefn.GetFieldDefn(iField)
oFieldDefn = ogr.FieldDefn(inputFieldDefn.GetNameRef(),
inputFieldDefn.GetType())
oFieldDefn.SetWidth(inputFieldDefn.GetWidth())
oFieldDefn.SetPrecision(inputFieldDefn.GetPrecision())
# The field may have been already created at layer creation
iDstField = -1;
if outputFDefn is not None:
iDstField = outputFDefn.GetFieldIndex(oFieldDefn.GetNameRef())
if iDstField >= 0:
panMap[iField] = iDstField
elif outputLayer.CreateField(oFieldDefn) == 0:
# now that we've created a field, GetLayerDefn() won't return NULL
if outputFDefn is None:
outputFDefn = outputLayer.GetLayerDefn()
panMap[iField] = nDstFieldCount
nDstFieldCount = nDstFieldCount + 1
# Transfer features
nFeaturesInTransaction = 0
#iSrcZField = -1
inputLayer.ResetReading()
if nGroupTransactions > 0:
outputLayer.StartTransaction()
while True:
poDstFeature = None
if nFIDToFetch != ogr.NullFID:
# Only fetch feature on first pass.
if nFeaturesInTransaction == 0:
poFeature = inputLayer.GetFeature(nFIDToFetch)
else:
poFeature = None
else:
poFeature = inputLayer.GetNextFeature()
if poFeature is None:
break
nParts = 0
nIters = 1
for iPart in range(nIters):
nFeaturesInTransaction = nFeaturesInTransaction + 1
if nFeaturesInTransaction == nGroupTransactions:
outputLayer.CommitTransaction()
outputLayer.StartTransaction()
nFeaturesInTransaction = 0
poDstFeature = ogr.Feature(outputLayer.GetLayerDefn())
if poDstFeature.SetFromWithMap(poFeature, 1, panMap) != 0:
if nGroupTransactions > 0:
outputLayer.CommitTransaction()
current.log.error("Unable to translate feature %d from layer %s" % \
(poFeature.GetFID(), inputFDefn.GetName()))
# Revert back to the working directory as before.
os.chdir(cwd)
return
poDstGeometry = poDstFeature.GetGeometryRef()
if poDstGeometry is not None:
if nParts > 0:
# For -explodecollections, extract the iPart(th) of the geometry
poPart = poDstGeometry.GetGeometryRef(iPart).Clone()
poDstFeature.SetGeometryDirectly(poPart)
poDstGeometry = poPart
if outputLayer.CreateFeature(poDstFeature) != 0 and \
not bSkipFailures:
if nGroupTransactions > 0:
outputLayer.RollbackTransaction()
# Revert back to the working directory as before.
os.chdir(cwd)
return
if nGroupTransactions > 0:
outputLayer.CommitTransaction()
# Cleanup
outputDS.Destroy()
inputDS.Destroy()
fileName = "%s.csv" % layerName
filePath = os.path.join("CSV", fileName)
os.rename(filePath, fileName)
os.removedirs("CSV")
# Use OGR to read SHP for geometry
current.log.debug("Opening %s.shp" % layerName)
ds = ogr.Open("%s.shp" % layerName)
if ds is None:
current.log.debug("Open failed.\n")
# Revert back to the working directory as before.
os.chdir(cwd)
return
lyr = ds.GetLayerByName(layerName)
lyr.ResetReading()
# Use CSV for Name
current.log.debug("Opening %s.csv" % layerName)
rows = latin_dict_reader(open("%s.csv" % layerName))
nameField = layer["namefield"]
sourceCodeField = layer["sourceCodeField"]
edenCodeField = layer["edenCodeField"]
parentSourceCodeField = layer["parentSourceCodeField"]
parentLevel = layer["parent"]
parentEdenCodeField = layer["parentEdenCodeField"]
parentCodeQuery = (ttable.tag == parentEdenCodeField)
count = 0
for row in rows:
# Read Attributes
feat = lyr[count]
parentCode = feat.GetField(parentSourceCodeField)
query = (table.level == parentLevel) & \
parentCodeQuery & \
(ttable.value == parentCode)
parent = db(query).select(table.id,
ttable.value,
limitby=(0, 1),
cache=cache).first()
if not parent:
# Skip locations for which we don't have a valid parent
current.log.warning("Skipping - cannot find parent with key: %s, value: %s" % \
(parentEdenCodeField, parentCode))
count += 1
continue
if countries:
# Skip the countries which we're not interested in
if level == "L1":
if parent["gis_location_tag"].value not in countries:
#current.log.warning("Skipping %s as not in countries list" % parent["gis_location_tag"].value)
count += 1
continue
else:
# Check grandparent
country = self.get_parent_country(parent.id,
key_type="code")
if country not in countries:
count += 1
continue
# This is got from CSV in order to be able to handle the encoding
name = row.pop(nameField)
name.encode("utf8")
code = feat.GetField(sourceCodeField)
#area = feat.GetField("Shape_Area")
geom = feat.GetGeometryRef()
if geom is not None:
if geom.GetGeometryType() == ogr.wkbPoint:
lat = geom.GetX()
lon = geom.GetY()
id = table.insert(name=name,
level=level,
gis_feature_type=1,
lat=lat,
lon=lon,
parent=parent.id)
ttable.insert(location_id = id,
tag = edenCodeField,
value = code)
# ttable.insert(location_id = id,
# tag = "area",
# value = area)
else:
wkt = geom.ExportToWkt()
if wkt.startswith("LINESTRING"):
gis_feature_type = 2
elif wkt.startswith("POLYGON"):
gis_feature_type = 3
elif wkt.startswith("MULTIPOINT"):
gis_feature_type = 4
elif wkt.startswith("MULTILINESTRING"):
gis_feature_type = 5
elif wkt.startswith("MULTIPOLYGON"):
gis_feature_type = 6
elif wkt.startswith("GEOMETRYCOLLECTION"):
gis_feature_type = 7
id = table.insert(name=name,
level=level,
gis_feature_type=gis_feature_type,
wkt=wkt,
parent=parent.id)
ttable.insert(location_id = id,
tag = edenCodeField,
value = code)
# ttable.insert(location_id = id,
# tag = "area",
# value = area)
else:
current.log.debug("No geometry\n")
count += 1
# Close the shapefile
ds.Destroy()
db.commit()
current.log.debug("Updating Location Tree...")
try:
self.update_location_tree()
except MemoryError:
# If doing all L2s, it can break memory limits
# @ToDo: Check now that we're doing by level
current.log.critical("Memory error when trying to update_location_tree()!")
db.commit()
# Revert back to the working directory as before.
os.chdir(cwd)
return
# -------------------------------------------------------------------------
@staticmethod
def import_gadm2(ogr, level="L0", countries=[]):
"""
Import Admin Boundaries into the Locations table from GADMv2
- designed to be called from import_admin_areas()
- assumes that basic prepop has been done, so that no new L0 records need to be created
@param ogr - The OGR Python module
@param level - The OGR Python module
@param countries - List of ISO2 countrycodes to download data for
defaults to all countries
@ToDo: Complete this
- not currently possible to get all data from the 1 file easily
- no ISO2
- needs updating for gis_location_tag model
- only the lowest available levels accessible
- use GADMv1 for L0, L1, L2 & GADMv2 for specific lower?
"""
if level == "L0":
codeField = "ISO2" # This field is used to uniquely identify the L0 for updates
code2Field = "ISO" # This field is used to uniquely identify the L0 for parenting the L1s
elif level == "L1":
#nameField = "NAME_1"
codeField = "ID_1" # This field is used to uniquely identify the L1 for updates
code2Field = "ISO" # This field is used to uniquely identify the L0 for parenting the L1s
#parent = "L0"
#parentCode = "code2"
elif level == "L2":
#nameField = "NAME_2"
codeField = "ID_2" # This field is used to uniquely identify the L2 for updates
code2Field = "ID_1" # This field is used to uniquely identify the L1 for parenting the L2s
#parent = "L1"
#parentCode = "code"
else:
current.log.error("Level %s not supported!" % level)
return
db = current.db
s3db = current.s3db
table = s3db.gis_location
url = "http://gadm.org/data2/gadm_v2_shp.zip"
zipfile = "gadm_v2_shp.zip"
shapefile = "gadm2"
# Copy the current working directory to revert back to later
old_working_directory = os.getcwd()
# Create the working directory
if os.path.exists(os.path.join(os.getcwd(), "temp")): # use web2py/temp/GADMv2 as a cache
TEMP = os.path.join(os.getcwd(), "temp")
else:
import tempfile
TEMP = tempfile.gettempdir()
tempPath = os.path.join(TEMP, "GADMv2")
try:
os.mkdir(tempPath)
except OSError:
# Folder already exists - reuse
pass
# Set the current working directory
os.chdir(tempPath)
layerName = shapefile
# Check if file has already been downloaded
fileName = zipfile
if not os.path.isfile(fileName):
# Download the file
from gluon.tools import fetch
current.log.debug("Downloading %s" % url)
try:
file = fetch(url)
except urllib2.URLError, exception:
current.log.error(exception)
return
fp = StringIO(file)
else:
current.log.debug("Using existing file %s" % fileName)
fp = open(fileName)
# Unzip it
current.log.debug("Unzipping %s" % layerName)
import zipfile
myfile = zipfile.ZipFile(fp)
for ext in ("dbf", "prj", "sbn", "sbx", "shp", "shx"):
fileName = "%s.%s" % (layerName, ext)
file = myfile.read(fileName)
f = open(fileName, "w")
f.write(file)
f.close()
myfile.close()
# Use OGR to read Shapefile
current.log.debug("Opening %s.shp" % layerName)
ds = ogr.Open("%s.shp" % layerName)
if ds is None:
current.log.debug("Open failed.\n")
return
lyr = ds.GetLayerByName(layerName)
lyr.ResetReading()
for feat in lyr:
code = feat.GetField(codeField)
if not code:
# Skip the entries which aren't countries
continue
if countries and code not in countries:
# Skip the countries which we're not interested in
continue
geom = feat.GetGeometryRef()
if geom is not None:
if geom.GetGeometryType() == ogr.wkbPoint:
pass
else:
## FIXME
##query = (table.code == code)
wkt = geom.ExportToWkt()
if wkt.startswith("LINESTRING"):
gis_feature_type = 2
elif wkt.startswith("POLYGON"):
gis_feature_type = 3
elif wkt.startswith("MULTIPOINT"):
gis_feature_type = 4
elif wkt.startswith("MULTILINESTRING"):
gis_feature_type = 5
elif wkt.startswith("MULTIPOLYGON"):
gis_feature_type = 6
elif wkt.startswith("GEOMETRYCOLLECTION"):
gis_feature_type = 7
#code2 = feat.GetField(code2Field)
#area = feat.GetField("Shape_Area")
try:
## FIXME
db(query).update(gis_feature_type=gis_feature_type,
wkt=wkt)
#code2=code2,
#area=area
except db._adapter.driver.OperationalError, exception:
current.log.error(exception)
else:
current.log.debug("No geometry\n")
# Close the shapefile
ds.Destroy()
db.commit()
# Revert back to the working directory as before.
os.chdir(old_working_directory)
return
# -------------------------------------------------------------------------
def import_geonames(self, country, level=None):
"""
Import Locations from the Geonames database
@param country: the 2-letter country code
@param level: the ADM level to import
Designed to be run from the CLI
Levels should be imported sequentially.
It is assumed that L0 exists in the DB already
L1-L3 may have been imported from Shapefiles with Polygon info
Geonames can then be used to populate the lower levels of hierarchy
"""
import codecs
from shapely.geometry import point
from shapely.geos import ReadingError
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
current.log.info("S3GIS",
"Upgrade Shapely for Performance enhancements")
db = current.db
s3db = current.s3db
#cache = s3db.cache
request = current.request
#settings = current.deployment_settings
table = s3db.gis_location
ttable = s3db.gis_location_tag
url = "http://download.geonames.org/export/dump/" + country + ".zip"
cachepath = os.path.join(request.folder, "cache")
filename = country + ".txt"
filepath = os.path.join(cachepath, filename)
if os.access(filepath, os.R_OK):
cached = True
else:
cached = False
if not os.access(cachepath, os.W_OK):
current.log.error("Folder not writable", cachepath)
return
if not cached:
# Download File
from gluon.tools import fetch
try:
f = fetch(url)
except (urllib2.URLError,):
e = sys.exc_info()[1]
current.log.error("URL Error", e)
return
except (urllib2.HTTPError,):
e = sys.exc_info()[1]
current.log.error("HTTP Error", e)
return
# Unzip File
if f[:2] == "PK":
# Unzip
fp = StringIO(f)
import zipfile
myfile = zipfile.ZipFile(fp)
try:
# Python 2.6+ only :/
# For now, 2.5 users need to download/unzip manually to cache folder
myfile.extract(filename, cachepath)
myfile.close()
except IOError:
current.log.error("Zipfile contents don't seem correct!")
myfile.close()
return
f = codecs.open(filepath, encoding="utf-8")
# Downloaded file is worth keeping
#os.remove(filepath)
if level == "L1":
fc = "ADM1"
parent_level = "L0"
elif level == "L2":
fc = "ADM2"
parent_level = "L1"
elif level == "L3":
fc = "ADM3"
parent_level = "L2"
elif level == "L4":
fc = "ADM4"
parent_level = "L3"
else:
# 5 levels of hierarchy or 4?
# @ToDo make more extensible still
#gis_location_hierarchy = self.get_location_hierarchy()
try:
#label = gis_location_hierarchy["L5"]
level = "L5"
parent_level = "L4"
except:
# ADM4 data in Geonames isn't always good (e.g. PK bad)
level = "L4"
parent_level = "L3"
finally:
fc = "PPL"
deleted = (table.deleted == False)
query = deleted & (table.level == parent_level)
# Do the DB query once (outside loop)
all_parents = db(query).select(table.wkt,
table.lon_min,
table.lon_max,
table.lat_min,
table.lat_max,
table.id)
if not all_parents:
# No locations in the parent level found
# - use the one higher instead
parent_level = "L" + str(int(parent_level[1:]) + 1)
query = deleted & (table.level == parent_level)
all_parents = db(query).select(table.wkt,
table.lon_min,
table.lon_max,
table.lat_min,
table.lat_max,
table.id)
# Parse File
current_row = 0
for line in f:
current_row += 1
# Format of file: http://download.geonames.org/export/dump/readme.txt
geonameid, \
name, \
asciiname, \
alternatenames, \
lat, \
lon, \
feature_class, \
feature_code, \
country_code, \
cc2, \
admin1_code, \
admin2_code, \
admin3_code, \
admin4_code, \
population, \
elevation, \
gtopo30, \
timezone, \
modification_date = line.split("\t")
if feature_code == fc:
# Add WKT
lat = float(lat)
lon = float(lon)
wkt = self.latlon_to_wkt(lat, lon)
shape = point.Point(lon, lat)
# Add Bounds
lon_min = lon_max = lon
lat_min = lat_max = lat
# Locate Parent
parent = ""
# 1st check for Parents whose bounds include this location (faster)
def in_bbox(row):
return (row.lon_min < lon_min) & \
(row.lon_max > lon_max) & \
(row.lat_min < lat_min) & \
(row.lat_max > lat_max)
for row in all_parents.find(lambda row: in_bbox(row)):
# Search within this subset with a full geometry check
# Uses Shapely.
# @ToDo provide option to use PostGIS/Spatialite
try:
parent_shape = wkt_loads(row.wkt)
if parent_shape.intersects(shape):
parent = row.id
# Should be just a single parent
break
except ReadingError:
current.log.error("Error reading wkt of location with id", row.id)
# Add entry to database
new_id = table.insert(name=name,
level=level,
parent=parent,
lat=lat,
lon=lon,
wkt=wkt,
lon_min=lon_min,
lon_max=lon_max,
lat_min=lat_min,
lat_max=lat_max)
ttable.insert(location_id=new_id,
tag="geonames",
value=geonameid)
else:
continue
current.log.debug("All done!")
return
# -------------------------------------------------------------------------
@staticmethod
def latlon_to_wkt(lat, lon):
"""
Convert a LatLon to a WKT string
>>> s3gis.latlon_to_wkt(6, 80)
'POINT(80 6)'
"""
WKT = "POINT(%f %f)" % (lon, lat)
return WKT
# -------------------------------------------------------------------------
@staticmethod
def parse_location(wkt, lon=None, lat=None):
"""
Parses a location from wkt, returning wkt, lat, lon, bounding box and type.
For points, wkt may be None if lat and lon are provided; wkt will be generated.
For lines and polygons, the lat, lon returned represent the shape's centroid.
Centroid and bounding box will be None if Shapely is not available.
"""
if not wkt:
if not lon is not None and lat is not None:
raise RuntimeError, "Need wkt or lon+lat to parse a location"
wkt = "POINT(%f %f)" % (lon, lat)
geom_type = GEOM_TYPES["point"]
bbox = (lon, lat, lon, lat)
else:
try:
from shapely.wkt import loads as wkt_loads
SHAPELY = True
except:
SHAPELY = False
if SHAPELY:
shape = wkt_loads(wkt)
centroid = shape.centroid
lat = centroid.y
lon = centroid.x
geom_type = GEOM_TYPES[shape.type.lower()]
bbox = shape.bounds
else:
lat = None
lon = None
geom_type = GEOM_TYPES[wkt.split("(")[0].lower()]
bbox = None
res = {"wkt": wkt, "lat": lat, "lon": lon, "gis_feature_type": geom_type}
if bbox:
res["lon_min"], res["lat_min"], res["lon_max"], res["lat_max"] = bbox
return res
# -------------------------------------------------------------------------
@staticmethod
def update_location_tree(feature=None, all_locations=False):
"""
Update GIS Locations' Materialized path, Lx locations, Lat/Lon & the_geom
@param feature: a feature dict to update the tree for
- if not provided then update the whole tree
@param all_locations: passed to recursive calls to indicate that this
is an update of the whole tree. Used to avoid repeated attempts to
update hierarchy locations with missing data (e.g. lacking some
ancestor level).
returns the path of the feature
Called onaccept for locations (async, where-possible)
"""
# During prepopulate, for efficiency, we don't update the location
# tree, but rather leave that til after prepopulate is complete.
if GIS.disable_update_location_tree:
return None
db = current.db
try:
table = db.gis_location
except:
table = current.s3db.gis_location
spatial = current.deployment_settings.get_gis_spatialdb()
update_location_tree = GIS.update_location_tree
wkt_centroid = GIS.wkt_centroid
fields = (table.id,
table.name,
table.level,
table.path,
table.parent,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
table.L5,
table.lat,
table.lon,
table.wkt,
table.inherited
)
# ---------------------------------------------------------------------
def fixup(feature):
"""
Fix all the issues with a Feature, assuming that
- the corrections are in the feature
- or they are Bounds / Centroid / WKT / the_geom issues
"""
form = Storage()
form.vars = form_vars = feature
form.errors = Storage()
if not form_vars.get("wkt"):
# Point
form_vars.update(gis_feature_type="1")
# Calculate Bounds / Centroid / WKT / the_geom
wkt_centroid(form)
if form.errors:
current.log.error("S3GIS: %s" % form.errors)
else:
wkt = form_vars.wkt
if wkt and not wkt.startswith("POI"):
# Polygons aren't inherited
form_vars.update(inherited = False)
if "update_record" in form_vars:
# Must be a Row
new_vars = {}
table_fields = table.fields
for v in form_vars:
if v in table_fields:
new_vars[v] = form_vars[v]
form_vars = new_vars
try:
db(table.id == feature.id).update(**form_vars)
except MemoryError:
current.log.error("S3GIS: Unable to set bounds & centroid for feature %s: MemoryError" % feature.id)
# ---------------------------------------------------------------------
def propagate(parent):
"""
Propagate Lat/Lon down to any Features which inherit from this one
@param parent: gis_location id of parent
@param all_locations: passed to recursive calls to indicate that
this is an update of the whole tree
"""
query = (table.parent == parent) & \
(table.inherited == True)
rows = db(query).select(*fields)
for row in rows:
try:
update_location_tree(row)
except RuntimeError:
current.log.error("Cannot propagate inherited latlon to child %s of location ID %s: too much recursion" % \
(row.id, parent))
if not feature:
# We are updating all locations.
all_locations = True
# Do in chunks to save memory and also do in correct order
all_fields = (table.id, table.name, table.gis_feature_type,
table.L0, table.L1, table.L2, table.L3, table.L4,
table.lat, table.lon, table.wkt, table.inherited,
# Handle Countries which start with Bounds set, yet are Points
table.lat_min, table.lon_min, table.lat_max, table.lon_max,
table.path, table.parent)
for level in ("L0", "L1", "L2", "L3", "L4", "L5", None):
query = (table.level == level) & (table.deleted == False)
try:
features = db(query).select(*all_fields)
except MemoryError:
current.log.error("S3GIS: Unable to update Location Tree for level %s: MemoryError" % level)
else:
for feature in features:
feature["level"] = level
wkt = feature["wkt"]
if wkt and not wkt.startswith("POI"):
# Polygons aren't inherited
feature["inherited"] = False
update_location_tree(feature) # all_locations is False here
# All Done!
return
# Single Feature
id = str(feature["id"]) if "id" in feature else None
if not id:
# Nothing we can do
raise ValueError
# L0
level = feature.get("level", False)
name = feature.get("name", False)
path = feature.get("path", False)
# If we're processing all locations, and this is a hierarchy location,
# and has already been processed (as evidenced by having a path) do not
# process it again. Locations with a gap in their ancestor levels will
# be regarded as missing data and sent through update_location_tree
# recursively, but that missing data will not be filled in after the
# location is processed once during the all-locations call.
if all_locations and path and level:
# This hierarchy location is already finalized.
return path
lat = feature.get("lat", False)
lon = feature.get("lon", False)
wkt = feature.get("wkt", False)
L0 = feature.get("L0", False)
if level == "L0":
if name is False or path is False or lat is False or lon is False or \
wkt is False or L0 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.name,
table.path,
table.lat,
table.lon,
table.wkt,
table.L0,
limitby=(0, 1)).first()
name = feature.name
path = feature.path
lat = feature.lat
lon = feature.lon
wkt = feature.wkt
L0 = feature.L0
if path != id or L0 != name or not wkt or lat is None:
# Fix everything up
path = id
if lat is False:
lat = None
if lon is False:
lon = None
fix_vars = dict(inherited = False,
path = path,
lat = lat,
lon = lon,
wkt = wkt or None,
L0 = name,
L1 = None,
L2 = None,
L3 = None,
L4 = None,
L5 = None,
)
feature.update(**fix_vars)
fixup(feature)
if not all_locations:
# Ensure that any locations which inherit their latlon from this one get updated
propagate(id)
return path
fixup_required = False
# L1
inherited = feature.get("inherited", None)
parent = feature.get("parent", False)
L1 = feature.get("L1", False)
if level == "L1":
if inherited is None or name is False or parent is False or path is False or \
lat is False or lon is False or wkt is False or \
L0 is False or L1 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.inherited,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.wkt,
table.L0,
table.L1,
limitby=(0, 1)).first()
inherited = feature.inherited
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
wkt = feature.wkt
L0 = feature.L0
L1 = feature.L1
if parent:
_path = "%s/%s" % (parent, id)
_L0 = db(table.id == parent).select(table.name,
table.lat,
table.lon,
limitby=(0, 1)).first()
L0_name = _L0.name
L0_lat = _L0.lat
L0_lon = _L0.lon
else:
_path = id
L0_name = None
L0_lat = None
L0_lon = None
if inherited or lat is None or lon is None:
fixup_required = True
inherited = True
lat = L0_lat
lon = L0_lon
elif path != _path or L0 != L0_name or L1 != name or not wkt:
fixup_required = True
if fixup_required:
# Fix everything up
if lat is False:
lat = None
if lon is False:
lon = None
fix_vars = dict(inherited = inherited,
path = _path,
lat = lat,
lon = lon,
wkt = wkt or None,
L0 = L0_name,
L1 = name,
L2 = None,
L3 = None,
L4 = None,
L5 = None,
)
feature.update(**fix_vars)
fixup(feature)
if not all_locations:
# Ensure that any locations which inherit their latlon from this one get updated
propagate(id)
return _path
# L2
L2 = feature.get("L2", False)
if level == "L2":
if inherited is None or name is False or parent is False or path is False or \
lat is False or lon is False or wkt is False or \
L0 is False or L1 is False or L2 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.inherited,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.wkt,
table.L0,
table.L1,
table.L2,
limitby=(0, 1)).first()
inherited = feature.inherited
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
wkt = feature.wkt
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
if parent:
Lx = db(table.id == parent).select(table.name,
table.level,
table.parent,
table.lat,
table.lon,
limitby=(0, 1)).first()
if Lx.level == "L1":
L1_name = Lx.name
_parent = Lx.parent
if _parent:
_path = "%s/%s/%s" % (_parent, parent, id)
L0_name = db(table.id == _parent).select(table.name,
limitby=(0, 1),
cache=current.s3db.cache
).first().name
else:
_path = "%s/%s" % (parent, id)
L0_name = None
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
else:
current.log.error("Parent of L2 Location ID %s has invalid level: %s is %s" % \
(id, parent, Lx.level))
#raise ValueError
return "%s/%s" % (parent, id)
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
L0_name = None
L1_name = None
Lx_lat = None
Lx_lon = None
if inherited or lat is None or lon is None:
fixup_required = True
inherited = True
lat = Lx_lat
lon = Lx_lon
wkt = None
elif path != _path or L0 != L0_name or L1 != L1_name or L2 != name or not wkt:
fixup_required = True
if fixup_required:
# Fix everything up
if lat is False:
lat = None
if lon is False:
lon = None
fix_vars = dict(inherited = inherited,
path = _path,
lat = lat,
lon = lon,
wkt = wkt or None,
L0 = L0_name,
L1 = L1_name,
L2 = name,
L3 = None,
L4 = None,
L5 = None,
)
feature.update(**fix_vars)
fixup(feature)
if not all_locations:
# Ensure that any locations which inherit their latlon from this one get updated
propagate(id)
return _path
# L3
L3 = feature.get("L3", False)
if level == "L3":
if inherited is None or name is False or parent is False or path is False or \
lat is False or lon is False or wkt is False or \
L0 is False or L1 is False or L2 is False or L3 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.inherited,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.wkt,
table.L0,
table.L1,
table.L2,
table.L3,
limitby=(0, 1)).first()
inherited = feature.inherited
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
wkt = feature.wkt
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
L3 = feature.L3
if parent:
Lx = db(table.id == parent).select(table.id,
table.name,
table.level,
table.parent,
table.path,
table.lat,
table.lon,
table.L0,
table.L1,
limitby=(0, 1)).first()
if Lx.level == "L2":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
elif Lx.level == "L1":
L0_name = Lx.L0
L1_name = Lx.name
L2_name = None
_path = Lx.path
if _path and L0_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
L2_name = None
else:
current.log.error("Parent of L3 Location ID %s has invalid level: %s is %s" % \
(id, parent, Lx.level))
#raise ValueError
return "%s/%s" % (parent, id)
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
L0_name = None
L1_name = None
L2_name = None
Lx_lat = None
Lx_lon = None
if inherited or lat is None or lon is None:
fixup_required = True
inherited = True
lat = Lx_lat
lon = Lx_lon
wkt = None
elif path != _path or L0 != L0_name or L1 != L1_name or L2 != L2_name or L3 != name or not wkt:
fixup_required = True
if fixup_required:
# Fix everything up
if lat is False:
lat = None
if lon is False:
lon = None
fix_vars = dict(inherited = inherited,
path = _path,
lat = lat,
lon = lon,
wkt = wkt or None,
L0 = L0_name,
L1 = L1_name,
L2 = L2_name,
L3 = name,
L4 = None,
L5 = None,
)
feature.update(**fix_vars)
fixup(feature)
if not all_locations:
# Ensure that any locations which inherit their latlon from this one get updated
propagate(id)
return _path
# L4
L4 = feature.get("L4", False)
if level == "L4":
if inherited is None or name is False or parent is False or path is False or \
lat is False or lon is False or wkt is False or \
L0 is False or L1 is False or L2 is False or L3 is False or L4 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.inherited,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.wkt,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
limitby=(0, 1)).first()
inherited = feature.inherited
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
wkt = feature.wkt
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
L3 = feature.L3
L4 = feature.L4
if parent:
Lx = db(table.id == parent).select(table.id,
table.name,
table.level,
table.parent,
table.path,
table.lat,
table.lon,
table.L0,
table.L1,
table.L2,
limitby=(0, 1)).first()
if Lx.level == "L3":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name and L2_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
elif Lx.level == "L2":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.name
L3_name = None
_path = Lx.path
if _path and L0_name and L1_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
elif Lx.level == "L1":
L0_name = Lx.L0
L1_name = Lx.name
L2_name = None
L3_name = None
_path = Lx.path
if _path and L0_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
L2_name = None
L3_name = None
else:
current.log.error("Parent of L3 Location ID %s has invalid level: %s is %s" % \
(id, parent, Lx.level))
#raise ValueError
return "%s/%s" % (parent, id)
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
L0_name = None
L1_name = None
L2_name = None
L3_name = None
Lx_lat = None
Lx_lon = None
if inherited or lat is None or lon is None:
fixup_required = True
inherited = True
lat = Lx_lat
lon = Lx_lon
wkt = None
elif path != _path or L0 != L0_name or L1 != L1_name or L2 != L2_name or L3 != L3_name or L4 != name or not wkt:
fixup_required = True
if fixup_required:
# Fix everything up
if lat is False:
lat = None
if lon is False:
lon = None
fix_vars = dict(inherited = inherited,
path = _path,
lat = lat,
lon = lon,
wkt = wkt or None,
L0 = L0_name,
L1 = L1_name,
L2 = L2_name,
L3 = L3_name,
L4 = name,
L5 = None,
)
feature.update(**fix_vars)
fixup(feature)
if not all_locations:
# Ensure that any locations which inherit their latlon from this one get updated
propagate(id)
return _path
# L5
L5 = feature.get("L5", False)
if level == "L5":
if inherited is None or name is False or parent is False or path is False or \
lat is False or lon is False or wkt is False or \
L0 is False or L1 is False or L2 is False or L3 is False or L4 is False or L5 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.inherited,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.wkt,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
table.L5,
limitby=(0, 1)).first()
inherited = feature.inherited
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
wkt = feature.wkt
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
L3 = feature.L3
L4 = feature.L4
L5 = feature.L5
if parent:
Lx = db(table.id == parent).select(table.id,
table.name,
table.level,
table.parent,
table.path,
table.lat,
table.lon,
table.L0,
table.L1,
table.L2,
table.L3,
limitby=(0, 1)).first()
if Lx.level == "L4":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
L4_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name and L2_name and L3_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.L3,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
elif Lx.level == "L3":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.name
L4_name = None
_path = Lx.path
if _path and L0_name and L1_name and L2_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
elif Lx.level == "L2":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.name
L3_name = None
L4_name = None
_path = Lx.path
if _path and L0_name and L1_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
elif Lx.level == "L1":
L0_name = Lx.L0
L1_name = Lx.name
L2_name = None
L3_name = None
L4_name = None
_path = Lx.path
if _path and L0_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
L2_name = None
L3_name = None
L4_name = None
else:
current.log.error("Parent of L3 Location ID %s has invalid level: %s is %s" % \
(id, parent, Lx.level))
#raise ValueError
return "%s/%s" % (parent, id)
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
L0_name = None
L1_name = None
L2_name = None
L3_name = None
L4_name = None
Lx_lat = None
Lx_lon = None
if inherited or lat is None or lon is None:
fixup_required = True
inherited = True
lat = Lx_lat
lon = Lx_lon
wkt = None
elif path != _path or L0 != L0_name or L1 != L1_name or L2 != L2_name or L3 != L3_name or L4 != L4_name or L5 != name or not wkt:
fixup_required = True
if fixup_required:
# Fix everything up
if lat is False:
lat = None
if lon is False:
lon = None
fix_vars = dict(inherited = inherited,
path = _path,
lat = lat,
lon = lon,
wkt = wkt or None,
L0 = L0_name,
L1 = L1_name,
L2 = L2_name,
L3 = L3_name,
L4 = L4_name,
L5 = name,
)
feature.update(**fix_vars)
fixup(feature)
if not all_locations:
# Ensure that any locations which inherit their latlon from this one get updated
propagate(id)
return _path
# Specific Location
# - or unspecified (which we should avoid happening as inefficient)
if inherited is None or level is False or name is False or parent is False or path is False or \
lat is False or lon is False or wkt is False or \
L0 is False or L1 is False or L2 is False or L3 is False or L4 is False or L5 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.inherited,
table.level,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.wkt,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
table.L5,
limitby=(0, 1)).first()
inherited = feature.inherited
level = feature.level
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
wkt = feature.wkt
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
L3 = feature.L3
L4 = feature.L4
L5 = feature.L5
L0_name = name if level == "L0" else None
L1_name = name if level == "L1" else None
L2_name = name if level == "L2" else None
L3_name = name if level == "L3" else None
L4_name = name if level == "L4" else None
L5_name = name if level == "L5" else None
if parent:
Lx = db(table.id == parent).select(table.id,
table.name,
table.level,
table.parent,
table.path,
table.lat,
table.lon,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
limitby=(0, 1)).first()
if Lx.level == "L5":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
L4_name = Lx.L4
L5_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name and L2_name and L3_name and L4_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
L4_name = Lx.L4
elif Lx.level == "L4":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
L4_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name and L2_name and L3_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.L3,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
elif Lx.level == "L3":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name and L2_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
elif Lx.level == "L2":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
elif Lx.level == "L1":
L0_name = Lx.L0
L1_name = Lx.name
_path = Lx.path
if _path and L0_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = update_location_tree(Lx, all_locations)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
else:
current.log.error("Parent of L3 Location ID %s has invalid level: %s is %s" % \
(id, parent, Lx.level))
#raise ValueError
return "%s/%s" % (parent, id)
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
Lx_lat = None
Lx_lon = None
if inherited or lat is None or lon is None:
fixup_required = True
inherited = True
lat = Lx_lat
lon = Lx_lon
wkt = None
elif path != _path or L0 != L0_name or L1 != L1_name or L2 != L2_name or L3 != L3_name or L4 != L4_name or L5 != L5_name or not wkt:
fixup_required = True
if fixup_required:
# Fix everything up
if lat is False:
lat = None
if lon is False:
lon = None
fix_vars = dict(inherited = inherited,
path = _path,
lat = lat,
lon = lon,
wkt = wkt or None,
L0 = L0_name,
L1 = L1_name,
L2 = L2_name,
L3 = L3_name,
L4 = L4_name,
L5 = L5_name,
)
feature.update(**fix_vars)
fixup(feature)
if not all_locations:
# Ensure that any locations which inherit their latlon from this one get updated
propagate(id)
return _path
# -------------------------------------------------------------------------
@staticmethod
def wkt_centroid(form):
"""
OnValidation callback:
If a WKT is defined: validate the format,
calculate the LonLat of the Centroid, and set bounds
Else if a LonLat is defined: calculate the WKT for the Point.
"""
form_vars = form.vars
if form_vars.get("gis_feature_type", None) == "1":
# Point
lat = form_vars.get("lat", None)
lon = form_vars.get("lon", None)
if (lon is None and lat is None) or \
(lon == "" and lat == ""):
# No Geometry available
# Don't clobber existing records (e.g. in Prepop)
#form_vars.gis_feature_type = "0"
# Cannot create WKT, so Skip
return
elif lat is None or lat == "":
# Can't just have lon without lat
form.errors["lat"] = current.messages.lat_empty
elif lon is None or lon == "":
form.errors["lon"] = current.messages.lon_empty
else:
form_vars.wkt = "POINT(%(lon)s %(lat)s)" % form_vars
radius = form_vars.get("radius", None)
if radius:
bbox = GIS.get_bounds_from_radius(lat, lon, radius)
form_vars.lat_min = bbox["lat_min"]
form_vars.lon_min = bbox["lon_min"]
form_vars.lat_max = bbox["lat_max"]
form_vars.lon_max = bbox["lon_max"]
else:
if "lon_min" not in form_vars or form_vars.lon_min is None:
form_vars.lon_min = lon
if "lon_max" not in form_vars or form_vars.lon_max is None:
form_vars.lon_max = lon
if "lat_min" not in form_vars or form_vars.lat_min is None:
form_vars.lat_min = lat
if "lat_max" not in form_vars or form_vars.lat_max is None:
form_vars.lat_max = lat
elif form_vars.get("wkt", None):
# Parse WKT for LineString, Polygon, etc
from shapely.wkt import loads as wkt_loads
try:
shape = wkt_loads(form_vars.wkt)
except:
try:
# Perhaps this is really a LINESTRING (e.g. OSM import of an unclosed Way)
linestring = "LINESTRING%s" % form_vars.wkt[8:-1]
shape = wkt_loads(linestring)
form_vars.wkt = linestring
except:
form.errors["wkt"] = current.messages.invalid_wkt
return
gis_feature_type = shape.type
if gis_feature_type == "Point":
form_vars.gis_feature_type = 1
elif gis_feature_type == "LineString":
form_vars.gis_feature_type = 2
elif gis_feature_type == "Polygon":
form_vars.gis_feature_type = 3
elif gis_feature_type == "MultiPoint":
form_vars.gis_feature_type = 4
elif gis_feature_type == "MultiLineString":
form_vars.gis_feature_type = 5
elif gis_feature_type == "MultiPolygon":
form_vars.gis_feature_type = 6
elif gis_feature_type == "GeometryCollection":
form_vars.gis_feature_type = 7
try:
centroid_point = shape.centroid
form_vars.lon = centroid_point.x
form_vars.lat = centroid_point.y
bounds = shape.bounds
if gis_feature_type != "Point" or \
"lon_min" not in form_vars or form_vars.lon_min is None or \
form_vars.lon_min == form_vars.lon_max:
# Update bounds unless we have a 'Point' which has already got wider Bounds specified (such as a country)
form_vars.lon_min = bounds[0]
form_vars.lat_min = bounds[1]
form_vars.lon_max = bounds[2]
form_vars.lat_max = bounds[3]
except:
form.errors.gis_feature_type = current.messages.centroid_error
elif (form_vars.lon is None and form_vars.lat is None) or \
(form_vars.lon == "" and form_vars.lat == ""):
# No Geometry available
# Don't clobber existing records (e.g. in Prepop)
#form_vars.gis_feature_type = "0"
# Cannot create WKT, so Skip
return
else:
# Point
form_vars.gis_feature_type = "1"
if form_vars.lat is None or form_vars.lat == "":
form.errors["lat"] = current.messages.lat_empty
elif form_vars.lon is None or form_vars.lon == "":
form.errors["lon"] = current.messages.lon_empty
else:
form_vars.wkt = "POINT(%(lon)s %(lat)s)" % form_vars
if "lon_min" not in form_vars or form_vars.lon_min is None:
form_vars.lon_min = form_vars.lon
if "lon_max" not in form_vars or form_vars.lon_max is None:
form_vars.lon_max = form_vars.lon
if "lat_min" not in form_vars or form_vars.lat_min is None:
form_vars.lat_min = form_vars.lat
if "lat_max" not in form_vars or form_vars.lat_max is None:
form_vars.lat_max = form_vars.lat
if current.deployment_settings.get_gis_spatialdb():
# Also populate the spatial field
form_vars.the_geom = form_vars.wkt
return
# -------------------------------------------------------------------------
@staticmethod
def query_features_by_bbox(lon_min, lat_min, lon_max, lat_max):
"""
Returns a query of all Locations inside the given bounding box
"""
table = current.s3db.gis_location
query = (table.lat_min <= lat_max) & \
(table.lat_max >= lat_min) & \
(table.lon_min <= lon_max) & \
(table.lon_max >= lon_min)
return query
# -------------------------------------------------------------------------
@staticmethod
def get_features_by_bbox(lon_min, lat_min, lon_max, lat_max):
"""
Returns Rows of Locations whose shape intersects the given bbox.
"""
query = current.gis.query_features_by_bbox(lon_min,
lat_min,
lon_max,
lat_max)
return current.db(query).select()
# -------------------------------------------------------------------------
@staticmethod
def get_features_by_shape(shape):
"""
Returns Rows of locations which intersect the given shape.
Relies on Shapely for wkt parsing and intersection.
@ToDo: provide an option to use PostGIS/Spatialite
"""
from shapely.geos import ReadingError
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
current.log.info("S3GIS",
"Upgrade Shapely for Performance enhancements")
table = current.s3db.gis_location
in_bbox = current.gis.query_features_by_bbox(*shape.bounds)
has_wkt = (table.wkt != None) & (table.wkt != "")
for loc in current.db(in_bbox & has_wkt).select():
try:
location_shape = wkt_loads(loc.wkt)
if location_shape.intersects(shape):
yield loc
except ReadingError:
current.log.error("Error reading wkt of location with id", loc.id)
# -------------------------------------------------------------------------
@staticmethod
def get_features_by_latlon(lat, lon):
"""
Returns a generator of locations whose shape intersects the given LatLon.
Relies on Shapely.
@todo: provide an option to use PostGIS/Spatialite
"""
from shapely.geometry import point
return current.gis.get_features_by_shape(point.Point(lon, lat))
# -------------------------------------------------------------------------
@staticmethod
def get_features_by_feature(feature):
"""
Returns all Locations whose geometry intersects the given feature.
Relies on Shapely.
@ToDo: provide an option to use PostGIS/Spatialite
"""
from shapely.wkt import loads as wkt_loads
shape = wkt_loads(feature.wkt)
return current.gis.get_features_by_shape(shape)
# -------------------------------------------------------------------------
@staticmethod
def set_all_bounds():
"""
Sets bounds for all locations without them.
If shapely is present, and a location has wkt, bounds of the geometry
are used. Otherwise, the (lat, lon) are used as bounds.
"""
try:
from shapely.wkt import loads as wkt_loads
SHAPELY = True
except:
SHAPELY = False
db = current.db
table = current.s3db.gis_location
# Query to find all locations without bounds set
no_bounds = (table.lon_min == None) & \
(table.lat_min == None) & \
(table.lon_max == None) & \
(table.lat_max == None) & \
(table.lat != None) & \
(table.lon != None)
if SHAPELY:
# Refine to those locations with a WKT field
wkt_no_bounds = no_bounds & (table.wkt != None) & (table.wkt != "")
for location in db(wkt_no_bounds).select(table.wkt):
try :
shape = wkt_loads(location.wkt)
except:
current.log.error("Error reading WKT", location.wkt)
continue
bounds = shape.bounds
table[location.id] = dict(lon_min = bounds[0],
lat_min = bounds[1],
lon_max = bounds[2],
lat_max = bounds[3],
)
# Anything left, we assume is a Point, so set the bounds to be the same
db(no_bounds).update(lon_min=table.lon,
lat_min=table.lat,
lon_max=table.lon,
lat_max=table.lat)
# -------------------------------------------------------------------------
@staticmethod
def simplify(wkt,
tolerance=None,
preserve_topology=True,
output="wkt",
decimals=4
):
"""
Simplify a complex Polygon using the Douglas-Peucker algorithm
- NB This uses Python, better performance will be gained by doing
this direct from the database if you are using PostGIS:
ST_Simplify() is available as
db(query).select(table.the_geom.st_simplify(tolerance).st_astext().with_alias('wkt')).first().wkt
db(query).select(table.the_geom.st_simplify(tolerance).st_asgeojson().with_alias('geojson')).first().geojson
@param wkt: the WKT string to be simplified (usually coming from a gis_location record)
@param tolerance: how aggressive a simplification to perform
@param preserve_topology: whether the simplified geometry should be maintained
@param output: whether to output as WKT or GeoJSON format
@param decimals: the number of decimal places to include in the output
"""
from shapely.geometry import Point, LineString, Polygon, MultiPolygon
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
current.log.info("S3GIS",
"Upgrade Shapely for Performance enhancements")
try:
shape = wkt_loads(wkt)
except:
wkt = wkt[10] if wkt else wkt
current.log.error("Invalid Shape: %s" % wkt)
return None
if not tolerance:
tolerance = current.deployment_settings.get_gis_simplify_tolerance()
if tolerance:
shape = shape.simplify(tolerance, preserve_topology)
# Limit the number of decimal places
formatter = ".%sf" % decimals
def shrink_polygon(shape):
""" Helper Function """
points = shape.exterior.coords
coords = []
cappend = coords.append
for point in points:
x = float(format(point[0], formatter))
y = float(format(point[1], formatter))
cappend((x, y))
return Polygon(LineString(coords))
geom_type = shape.geom_type
if geom_type == "MultiPolygon":
polygons = shape.geoms
p = []
pappend = p.append
for polygon in polygons:
pappend(shrink_polygon(polygon))
shape = MultiPolygon([s for s in p])
elif geom_type == "Polygon":
shape = shrink_polygon(shape)
elif geom_type == "LineString":
points = shape.coords
coords = []
cappend = coords.append
for point in points:
x = float(format(point[0], formatter))
y = float(format(point[1], formatter))
cappend((x, y))
shape = LineString(coords)
elif geom_type == "Point":
x = float(format(shape.x, formatter))
y = float(format(shape.y, formatter))
shape = Point(x, y)
else:
current.log.info("Cannot yet shrink Geometry: %s" % geom_type)
# Output
if output == "wkt":
output = shape.to_wkt()
elif output == "geojson":
from ..geojson import dumps
# Compact Encoding
output = dumps(shape, separators=SEPARATORS)
return output
# -------------------------------------------------------------------------
def show_map(self,
id = "default_map",
height = None,
width = None,
bbox = {},
lat = None,
lon = None,
zoom = None,
projection = None,
add_feature = False,
add_feature_active = False,
add_line = False,
add_line_active = False,
add_polygon = False,
add_polygon_active = False,
features = None,
feature_queries = None,
feature_resources = None,
wms_browser = {},
catalogue_layers = False,
legend = False,
toolbar = False,
area = False,
color_picker = False,
clear_layers = None,
nav = None,
print_control = None,
print_mode = False,
save = False,
search = False,
mouse_position = None,
overview = None,
permalink = None,
scaleline = None,
zoomcontrol = None,
zoomWheelEnabled = True,
mgrs = {},
window = False,
window_hide = False,
closable = True,
maximizable = True,
collapsed = False,
callback = "DEFAULT",
plugins = None,
):
"""
Returns the HTML to display a map
Normally called in the controller as: map = gis.show_map()
In the view, put: {{=XML(map)}}
@param id: ID to uniquely identify this map if there are several on a page
@param height: Height of viewport (if not provided then the default deployment setting is used)
@param width: Width of viewport (if not provided then the default deployment setting is used)
@param bbox: default Bounding Box of viewport (if not provided then the Lat/Lon/Zoom are used) (Dict):
{"lon_min" : float,
"lat_min" : float,
"lon_max" : float,
"lat_max" : float,
}
@param lat: default Latitude of viewport (if not provided then the default setting from the Map Service Catalogue is used)
@param lon: default Longitude of viewport (if not provided then the default setting from the Map Service Catalogue is used)
@param zoom: default Zoom level of viewport (if not provided then the default setting from the Map Service Catalogue is used)
@param projection: EPSG code for the Projection to use (if not provided then the default setting from the Map Service Catalogue is used)
@param add_feature: Whether to include a DrawFeature control to allow adding a marker to the map
@param add_feature_active: Whether the DrawFeature control should be active by default
@param add_polygon: Whether to include a DrawFeature control to allow drawing a polygon over the map
@param add_polygon_active: Whether the DrawFeature control should be active by default
@param features: Simple Features to overlay on Map (no control over appearance & not interactive)
[wkt]
@param feature_queries: Feature Queries to overlay onto the map & their options (List of Dicts):
[{"name" : T("MyLabel"), # A string: the label for the layer
"query" : query, # A gluon.sql.Rows of gis_locations, which can be from a simple query or a Join.
# Extra fields can be added for 'popup_url', 'popup_label' & either
# 'marker' (url/height/width) or 'shape' (with optional 'colour' & 'size')
"active" : True, # Is the feed displayed upon load or needs ticking to load afterwards?
"marker" : None, # Optional: A per-Layer marker query or marker_id for the icon used to display the feature
"opacity" : 1, # Optional
"cluster_attribute", # Optional
"cluster_distance", # Optional
"cluster_threshold" # Optional
}]
@param feature_resources: REST URLs for (filtered) resources to overlay onto the map & their options (List of Dicts):
[{"name" : T("MyLabel"), # A string: the label for the layer
"id" : "search", # A string: the id for the layer (for manipulation by JavaScript)
"active" : True, # Is the feed displayed upon load or needs ticking to load afterwards?
EITHER:
"layer_id" : 1, # An integer: the layer_id to load (optional alternative to specifying URL/tablename/marker)
"filter" : "filter", # A string: an optional URL filter which *replaces* any in the layer
OR:
"tablename" : "module_resource", # A string: the tablename (used to determine whether to locate via location_id or site_id)
"url" : "/eden/module/resource.geojson?filter", # A URL to load the resource
"marker" : None, # Optional: A per-Layer marker dict for the icon used to display the feature (overrides layer_id if-set)
"opacity" : 1, # Optional (overrides layer_id if-set)
"cluster_attribute", # Optional (overrides layer_id if-set)
"cluster_distance", # Optional (overrides layer_id if-set)
"cluster_threshold", # Optional (overrides layer_id if-set)
"dir", # Optional (overrides layer_id if-set)
"style", # Optional (overrides layer_id if-set)
}]
@param wms_browser: WMS Server's GetCapabilities & options (dict)
{"name": T("MyLabel"), # Name for the Folder in LayerTree
"url": string # URL of GetCapabilities
}
@param catalogue_layers: Show all the enabled Layers from the GIS Catalogue
Defaults to False: Just show the default Base layer
@param legend: True: Show the GeoExt Legend panel, False: No Panel, "float": New floating Legend Panel
@param toolbar: Show the Icon Toolbar of Controls
@param area: Show the Area tool on the Toolbar
@param color_picker: Show the Color Picker tool on the Toolbar (used for S3LocationSelector...pick up in postprocess)
If a style is provided then this is used as the default style
@param nav: Show the Navigation controls on the Toolbar
@param save: Show the Save tool on the Toolbar
@param search: Show the Geonames search box (requires a username to be configured)
@param mouse_position: Show the current coordinates in the bottom-right of the map. 3 Options: 'normal', 'mgrs', False (defaults to checking deployment_settings, which defaults to 'normal')
@param overview: Show the Overview Map (defaults to checking deployment_settings, which defaults to True)
@param permalink: Show the Permalink control (defaults to checking deployment_settings, which defaults to True)
@param scaleline: Show the ScaleLine control (defaults to checking deployment_settings, which defaults to True)
@param zoomcontrol: Show the Zoom control (defaults to checking deployment_settings, which defaults to True)
@param mgrs: Use the MGRS Control to select PDFs
{"name": string, # Name for the Control
"url": string # URL of PDF server
}
@ToDo: Also add MGRS Search support: http://gxp.opengeo.org/master/examples/mgrs.html
@param window: Have viewport pop out of page into a resizable window
@param window_hide: Have the window hidden by default, ready to appear (e.g. on clicking a button)
@param closable: In Window mode, whether the window is closable or not
@param collapsed: Start the Tools panel (West region) collapsed
@param callback: Code to run once the Map JavaScript has loaded
@param plugins: an iterable of objects which support the following methods:
.extend_gis_map(map)
Client-side portion suppoprts the following methods:
.addToMapWindow(items)
.setup(map)
"""
return MAP(id = id,
height = height,
width = width,
bbox = bbox,
lat = lat,
lon = lon,
zoom = zoom,
projection = projection,
add_feature = add_feature,
add_feature_active = add_feature_active,
add_line = add_line,
add_line_active = add_line_active,
add_polygon = add_polygon,
add_polygon_active = add_polygon_active,
features = features,
feature_queries = feature_queries,
feature_resources = feature_resources,
wms_browser = wms_browser,
catalogue_layers = catalogue_layers,
legend = legend,
toolbar = toolbar,
area = area,
color_picker = color_picker,
clear_layers = clear_layers,
nav = nav,
print_control = print_control,
print_mode = print_mode,
save = save,
search = search,
mouse_position = mouse_position,
overview = overview,
permalink = permalink,
scaleline = scaleline,
zoomcontrol = zoomcontrol,
zoomWheelEnabled = zoomWheelEnabled,
mgrs = mgrs,
window = window,
window_hide = window_hide,
closable = closable,
maximizable = maximizable,
collapsed = collapsed,
callback = callback,
plugins = plugins,
)
# =============================================================================
class MAP(DIV):
"""
HTML Helper to render a Map
- allows the Map to be generated only when being rendered
- used by gis.show_map()
"""
def __init__(self, **opts):
"""
:param **opts: options to pass to the Map for server-side processing
"""
# We haven't yet run _setup()
self.setup = False
self.callback = None
# Options for server-side processing
self.opts = opts
self.id = map_id = opts.get("id", "default_map")
# Options for client-side processing
self.options = {}
# Components
# Map (Embedded not Window)
components = [DIV(DIV(_class="map_loader"),
_id="%s_panel" % map_id)
]
self.components = components
for c in components:
self._setnode(c)
# Adapt CSS to size of Map
_class = "map_wrapper"
if opts.get("window"):
_class = "%s fullscreen" % _class
if opts.get("print_mode"):
_class = "%s print" % _class
self.attributes = {"_class": _class,
"_id": map_id,
}
self.parent = None
# Show Color Picker?
if opts.get("color_picker"):
# Can't be done in _setup() as usually run from xml() and hence we've already passed this part of the layout.html
s3 = current.response.s3
if s3.debug:
style = "plugins/spectrum.css"
else:
style = "plugins/spectrum.min.css"
if style not in s3.stylesheets:
s3.stylesheets.append(style)
# -------------------------------------------------------------------------
def _setup(self):
"""
Setup the Map
- not done during init() to be as Lazy as possible
- separated from xml() in order to be able to read options to put
into scripts (callback or otherwise)
"""
# Read configuration
config = GIS.get_config()
if not config:
# No prepop - Bail
current.session.error = current.T("Map cannot display without prepop data!")
redirect(URL(c="default", f="index"))
opts = self.opts
T = current.T
db = current.db
auth = current.auth
s3db = current.s3db
request = current.request
response = current.response
if not response.warning:
response.warning = ""
s3 = response.s3
ctable = db.gis_config
settings = current.deployment_settings
MAP_ADMIN = auth.s3_has_role(current.session.s3.system_roles.MAP_ADMIN)
# Support bookmarks (such as from the control)
# - these over-ride the arguments
get_vars = request.get_vars
# JS Globals
js_globals = {}
# Map Options for client-side processing
options = {}
# Strings used by all Maps
i18n = {"gis_base_layers": T("Base Layers"),
"gis_overlays": T(settings.get_gis_label_overlays()),
"gis_layers": T(settings.get_gis_layers_label()),
"gis_draft_layer": T("Draft Features"),
"gis_cluster_multiple": T("There are multiple records at this location"),
"gis_loading": T("Loading"),
"gis_requires_login": T("Requires Login"),
"gis_too_many_features": T("There are too many features, please Zoom In or Filter"),
"gis_zoomin": T("Zoom In"),
}
##########
# Viewport
##########
height = opts.get("height", None)
if height:
map_height = height
else:
map_height = settings.get_gis_map_height()
options["map_height"] = map_height
width = opts.get("width", None)
if width:
map_width = width
else:
map_width = settings.get_gis_map_width()
options["map_width"] = map_width
# Bounding Box or Center/Zoom
bbox = opts.get("bbox", None)
if (bbox
and (-90 <= bbox["lat_max"] <= 90)
and (-90 <= bbox["lat_min"] <= 90)
and (-180 <= bbox["lon_max"] <= 180)
and (-180 <= bbox["lon_min"] <= 180)
):
# We have sane Bounds provided, so we should use them
pass
else:
# No bounds or we've been passed bounds which aren't sane
bbox = None
# Use Lat/Lon/Zoom to center instead
lat = get_vars.get("lat", None)
if lat is not None:
lat = float(lat)
else:
lat = opts.get("lat", None)
if lat is None or lat == "":
lat = config.lat
lon = get_vars.get("lon", None)
if lon is not None:
lon = float(lon)
else:
lon = opts.get("lon", None)
if lon is None or lon == "":
lon = config.lon
if bbox:
# Calculate from Bounds
options["bbox"] = [bbox["lon_min"], # left
bbox["lat_min"], # bottom
bbox["lon_max"], # right
bbox["lat_max"], # top
]
else:
options["lat"] = lat
options["lon"] = lon
zoom = get_vars.get("zoom", None)
if zoom is not None:
zoom = int(zoom)
else:
zoom = opts.get("zoom", None)
if not zoom:
zoom = config.zoom
options["zoom"] = zoom or 1
options["numZoomLevels"] = config.zoom_levels
options["restrictedExtent"] = (config.lon_min,
config.lat_min,
config.lon_max,
config.lat_max,
)
############
# Projection
############
projection = opts.get("projection", None)
if not projection:
projection = config.epsg
options["projection"] = projection
if projection not in (900913, 4326):
# Test for Valid Projection file in Proj4JS library
projpath = os.path.join(
request.folder, "static", "scripts", "gis", "proj4js", \
"lib", "defs", "EPSG%s.js" % projection
)
try:
f = open(projpath, "r")
f.close()
except:
if projection:
proj4js = config.proj4js
if proj4js:
# Create it
try:
f = open(projpath, "w")
except IOError, e:
response.error = \
T("Map not available: Cannot write projection file - %s") % e
else:
f.write('''Proj4js.defs["EPSG:4326"]="%s"''' % proj4js)
f.close()
else:
response.warning = \
T("Map not available: Projection %(projection)s not supported - please add definition to %(path)s") % \
dict(projection = "'%s'" % projection,
path= "/static/scripts/gis/proj4js/lib/defs")
else:
response.error = \
T("Map not available: No Projection configured")
return None
options["maxExtent"] = config.maxExtent
options["units"] = config.units
########
# Marker
########
if config.marker_image:
options["marker_default"] = dict(i = config.marker_image,
h = config.marker_height,
w = config.marker_width,
)
# @ToDo: show_map() opts with fallback to settings
# Keep these in sync with scaleImage() in s3.gis.js
marker_max_height = settings.get_gis_marker_max_height()
if marker_max_height != 35:
options["max_h"] = marker_max_height
marker_max_width = settings.get_gis_marker_max_width()
if marker_max_width != 30:
options["max_w"] = marker_max_width
#########
# Colours
#########
# Keep these in sync with s3.gis.js
cluster_fill = settings.get_gis_cluster_fill()
if cluster_fill and cluster_fill != '8087ff':
options["cluster_fill"] = cluster_fill
cluster_stroke = settings.get_gis_cluster_stroke()
if cluster_stroke and cluster_stroke != '2b2f76':
options["cluster_stroke"] = cluster_stroke
select_fill = settings.get_gis_select_fill()
if select_fill and select_fill != 'ffdc33':
options["select_fill"] = select_fill
select_stroke = settings.get_gis_select_stroke()
if select_stroke and select_stroke != 'ff9933':
options["select_stroke"] = select_stroke
if not settings.get_gis_cluster_label():
options["cluster_label"] = False
########
# Layout
########
if not opts.get("closable", False):
options["windowNotClosable"] = True
if opts.get("window", False):
options["window"] = True
if opts.get("window_hide", False):
options["windowHide"] = True
if opts.get("maximizable", False):
options["maximizable"] = True
else:
options["maximizable"] = False
# Collapsed
if opts.get("collapsed", False):
options["west_collapsed"] = True
# LayerTree
if not settings.get_gis_layer_tree_base():
options["hide_base"] = True
if not settings.get_gis_layer_tree_overlays():
options["hide_overlays"] = True
if not settings.get_gis_layer_tree_expanded():
options["folders_closed"] = True
if settings.get_gis_layer_tree_radio():
options["folders_radio"] = True
#######
# Tools
#######
# Toolbar
if opts.get("toolbar", False):
options["toolbar"] = True
i18n["gis_length_message"] = T("The length is")
i18n["gis_length_tooltip"] = T("Measure Length: Click the points along the path & end with a double-click")
i18n["gis_zoomfull"] = T("Zoom to maximum map extent")
if settings.get_gis_geolocate_control():
# Presence of label turns feature on in s3.gis.js
# @ToDo: Provide explicit option to support multiple maps in a page with different options
i18n["gis_geoLocate"] = T("Zoom to Current Location")
# Search
if opts.get("search", False):
geonames_username = settings.get_gis_geonames_username()
if geonames_username:
# Presence of username turns feature on in s3.gis.js
options["geonames"] = geonames_username
# Presence of label adds support JS in Loader
i18n["gis_search"] = T("Search location in Geonames")
#i18n["gis_search_no_internet"] = T("Geonames.org search requires Internet connectivity!")
# Show NAV controls?
# e.g. removed within S3LocationSelector[Widget]
nav = opts.get("nav", None)
if nav is None:
nav = settings.get_gis_nav_controls()
if nav:
i18n["gis_zoominbutton"] = T("Zoom In: click in the map or use the left mouse button and drag to create a rectangle")
i18n["gis_zoomout"] = T("Zoom Out: click in the map or use the left mouse button and drag to create a rectangle")
i18n["gis_pan"] = T("Pan Map: keep the left mouse button pressed and drag the map")
i18n["gis_navPrevious"] = T("Previous View")
i18n["gis_navNext"] = T("Next View")
else:
options["nav"] = False
# Show Area control?
if opts.get("area", False):
options["area"] = True
i18n["gis_area_message"] = T("The area is")
i18n["gis_area_tooltip"] = T("Measure Area: Click the points around the polygon & end with a double-click")
# Show Color Picker?
color_picker = opts.get("color_picker", False)
if color_picker:
options["color_picker"] = True
if color_picker is not True:
options["draft_style"] = json.loads(color_picker)
#i18n["gis_color_picker_tooltip"] = T("Select Color")
i18n["gis_cancelText"] = T("cancel")
i18n["gis_chooseText"] = T("choose")
i18n["gis_togglePaletteMoreText"] = T("more")
i18n["gis_togglePaletteLessText"] = T("less")
i18n["gis_clearText"] = T("Clear Color Selection")
i18n["gis_noColorSelectedText"] = T("No Color Selected")
# Show Print control?
print_control = opts.get("print_control") is not False and settings.get_gis_print()
if print_control:
# @ToDo: Use internal Printing or External Service
# http://eden.sahanafoundation.org/wiki/BluePrint/GIS/Printing
#print_service = settings.get_gis_print_service()
#if print_service:
# print_tool = {"url": string, # URL of print service (e.g. http://localhost:8080/geoserver/pdf/)
# "mapTitle": string, # Title for the Printed Map (optional)
# "subTitle": string # subTitle for the Printed Map (optional)
# }
options["print"] = True
i18n["gis_print"] = T("Print")
i18n["gis_paper_size"] = T("Paper Size")
i18n["gis_print_tip"] = T("Take a screenshot of the map which can be printed")
# Show Save control?
# e.g. removed within S3LocationSelector[Widget]
if opts.get("save") is True and auth.s3_logged_in():
options["save"] = True
i18n["gis_save"] = T("Save: Default Lat, Lon & Zoom for the Viewport")
if MAP_ADMIN or (config.pe_id == auth.user.pe_id):
# Personal config or MapAdmin, so Save Button does Updates
options["config_id"] = config.id
# OSM Authoring
pe_id = auth.user.pe_id if auth.s3_logged_in() else None
if pe_id and s3db.auth_user_options_get_osm(pe_id):
# Presence of label turns feature on in s3.gis.js
# @ToDo: Provide explicit option to support multiple maps in a page with different options
i18n["gis_potlatch"] = T("Edit the OpenStreetMap data for this area")
i18n["gis_osm_zoom_closer"] = T("Zoom in closer to Edit OpenStreetMap layer")
# MGRS PDF Browser
mgrs = opts.get("mgrs", None)
if mgrs:
options["mgrs_name"] = mgrs["name"]
options["mgrs_url"] = mgrs["url"]
else:
# No toolbar
if opts.get("save") is True:
opts["save"] = "float"
# Show Save control?
# e.g. removed within S3LocationSelector[Widget]
if opts.get("save") == "float" and auth.s3_logged_in():
permit = auth.s3_has_permission
if permit("create", ctable):
options["save"] = "float"
i18n["gis_save_map"] = T("Save Map")
i18n["gis_new_map"] = T("Save as New Map?")
i18n["gis_name_map"] = T("Name of Map")
i18n["save"] = T("Save")
i18n["saved"] = T("Saved")
config_id = config.id
_config = db(ctable.id == config_id).select(ctable.uuid,
ctable.name,
limitby=(0, 1),
).first()
if MAP_ADMIN:
i18n["gis_my_maps"] = T("Saved Maps")
else:
options["pe_id"] = auth.user.pe_id
i18n["gis_my_maps"] = T("My Maps")
if permit("update", ctable, record_id=config_id):
options["config_id"] = config_id
options["config_name"] = _config.name
elif _config.uuid != "SITE_DEFAULT":
options["config_name"] = _config.name
# Legend panel
legend = opts.get("legend", False)
if legend:
i18n["gis_legend"] = T("Legend")
if legend == "float":
options["legend"] = "float"
if settings.get_gis_layer_metadata():
options["metadata"] = True
# MAP_ADMIN better for simpler deployments
#if auth.s3_has_permission("create", "cms_post_layer"):
if MAP_ADMIN:
i18n["gis_metadata_create"] = T("Create 'More Info'")
i18n["gis_metadata_edit"] = T("Edit 'More Info'")
else:
i18n["gis_metadata"] = T("More Info")
else:
options["legend"] = True
# Draw Feature Controls
if opts.get("add_feature", False):
i18n["gis_draw_feature"] = T("Add Point")
if opts.get("add_feature_active", False):
options["draw_feature"] = "active"
else:
options["draw_feature"] = "inactive"
if opts.get("add_line", False):
i18n["gis_draw_line"] = T("Add Line")
if opts.get("add_line_active", False):
options["draw_line"] = "active"
else:
options["draw_line"] = "inactive"
if opts.get("add_polygon", False):
i18n["gis_draw_polygon"] = T("Add Polygon")
if opts.get("add_polygon_active", False):
options["draw_polygon"] = "active"
else:
options["draw_polygon"] = "inactive"
# Clear Layers
clear_layers = opts.get("clear_layers") is not False and settings.get_gis_clear_layers()
if clear_layers:
options["clear_layers"] = clear_layers
i18n["gis_clearlayers"] = T("Clear all Layers")
# Layer Properties
if settings.get_gis_layer_properties():
# Presence of label turns feature on in s3.gis.js
i18n["gis_properties"] = T("Layer Properties")
# Upload Layer
if settings.get_gis_geoserver_password():
# Presence of label adds support JS in Loader and turns feature on in s3.gis.js
# @ToDo: Provide explicit option to support multiple maps in a page with different options
i18n["gis_uploadlayer"] = T("Upload Shapefile")
# WMS Browser
wms_browser = opts.get("wms_browser", None)
if wms_browser:
options["wms_browser_name"] = wms_browser["name"]
# urlencode the URL
options["wms_browser_url"] = urllib.quote(wms_browser["url"])
# Mouse Position
# 'normal', 'mgrs' or 'off'
mouse_position = opts.get("mouse_position", None)
if mouse_position is None:
mouse_position = settings.get_gis_mouse_position()
if mouse_position == "mgrs":
options["mouse_position"] = "mgrs"
# Tell loader to load support scripts
js_globals["mgrs"] = True
elif mouse_position:
options["mouse_position"] = True
# Overview Map
overview = opts.get("overview", None)
if overview is None:
overview = settings.get_gis_overview()
if not overview:
options["overview"] = False
# Permalink
permalink = opts.get("permalink", None)
if permalink is None:
permalink = settings.get_gis_permalink()
if not permalink:
options["permalink"] = False
# ScaleLine
scaleline = opts.get("scaleline", None)
if scaleline is None:
scaleline = settings.get_gis_scaleline()
if not scaleline:
options["scaleline"] = False
# Zoom control
zoomcontrol = opts.get("zoomcontrol", None)
if zoomcontrol is None:
zoomcontrol = settings.get_gis_zoomcontrol()
if not zoomcontrol:
options["zoomcontrol"] = False
zoomWheelEnabled = opts.get("zoomWheelEnabled", True)
if not zoomWheelEnabled:
options["no_zoom_wheel"] = True
########
# Layers
########
# Duplicate Features to go across the dateline?
# @ToDo: Action this again (e.g. for DRRPP)
if settings.get_gis_duplicate_features():
options["duplicate_features"] = True
# Features
features = opts.get("features", None)
if features:
options["features"] = addFeatures(features)
# Feature Queries
feature_queries = opts.get("feature_queries", None)
if feature_queries:
options["feature_queries"] = addFeatureQueries(feature_queries)
# Feature Resources
feature_resources = opts.get("feature_resources", None)
if feature_resources:
options["feature_resources"] = addFeatureResources(feature_resources)
# Layers
db = current.db
ltable = db.gis_layer_config
etable = db.gis_layer_entity
query = (ltable.deleted == False)
join = [etable.on(etable.layer_id == ltable.layer_id)]
fields = [etable.instance_type,
ltable.layer_id,
ltable.enabled,
ltable.visible,
ltable.base,
ltable.dir,
]
if opts.get("catalogue_layers", False):
# Add all enabled Layers from the Catalogue
stable = db.gis_style
mtable = db.gis_marker
query &= (ltable.config_id.belongs(config.ids))
join.append(ctable.on(ctable.id == ltable.config_id))
fields.extend((stable.style,
stable.cluster_distance,
stable.cluster_threshold,
stable.opacity,
stable.popup_format,
mtable.image,
mtable.height,
mtable.width,
ctable.pe_type))
left = [stable.on((stable.layer_id == etable.layer_id) & \
(stable.record_id == None) & \
((stable.config_id == ctable.id) | \
(stable.config_id == None))),
mtable.on(mtable.id == stable.marker_id),
]
limitby = None
# @ToDo: Need to fix this?: make the style lookup a different call
if settings.get_database_type() == "postgres":
# None is last
orderby = [ctable.pe_type, stable.config_id]
else:
# None is 1st
orderby = [ctable.pe_type, ~stable.config_id]
if settings.get_gis_layer_metadata():
cptable = s3db.cms_post_layer
left.append(cptable.on(cptable.layer_id == etable.layer_id))
fields.append(cptable.post_id)
else:
# Add just the default Base Layer
query &= (ltable.base == True) & \
(ltable.config_id == config.id)
# Base layer doesn't need a style
left = None
limitby = (0, 1)
orderby = None
layer_types = []
lappend = layer_types.append
layers = db(query).select(join=join,
left=left,
limitby=limitby,
orderby=orderby,
*fields)
if not layers:
# Use Site Default base layer
# (Base layer doesn't need a style)
query = (etable.id == ltable.layer_id) & \
(ltable.config_id == ctable.id) & \
(ctable.uuid == "SITE_DEFAULT") & \
(ltable.base == True) & \
(ltable.enabled == True)
layers = db(query).select(*fields,
limitby=(0, 1))
if not layers:
# Just show EmptyLayer
layer_types = [LayerEmpty]
for layer in layers:
layer_type = layer["gis_layer_entity.instance_type"]
if layer_type == "gis_layer_openstreetmap":
lappend(LayerOSM)
elif layer_type == "gis_layer_google":
# NB v3 doesn't work when initially hidden
lappend(LayerGoogle)
elif layer_type == "gis_layer_arcrest":
lappend(LayerArcREST)
elif layer_type == "gis_layer_bing":
lappend(LayerBing)
elif layer_type == "gis_layer_tms":
lappend(LayerTMS)
elif layer_type == "gis_layer_wms":
lappend(LayerWMS)
elif layer_type == "gis_layer_xyz":
lappend(LayerXYZ)
elif layer_type == "gis_layer_empty":
lappend(LayerEmpty)
elif layer_type == "gis_layer_js":
lappend(LayerJS)
elif layer_type == "gis_layer_theme":
lappend(LayerTheme)
elif layer_type == "gis_layer_geojson":
lappend(LayerGeoJSON)
elif layer_type == "gis_layer_gpx":
lappend(LayerGPX)
elif layer_type == "gis_layer_coordinate":
lappend(LayerCoordinate)
elif layer_type == "gis_layer_georss":
lappend(LayerGeoRSS)
elif layer_type == "gis_layer_kml":
lappend(LayerKML)
elif layer_type == "gis_layer_openweathermap":
lappend(LayerOpenWeatherMap)
elif layer_type == "gis_layer_shapefile":
lappend(LayerShapefile)
elif layer_type == "gis_layer_wfs":
lappend(LayerWFS)
elif layer_type == "gis_layer_feature":
lappend(LayerFeature)
# Make unique
layer_types = set(layer_types)
scripts = []
scripts_append = scripts.append
for LayerType in layer_types:
try:
# Instantiate the Class
layer = LayerType(layers)
layer.as_dict(options)
for script in layer.scripts:
scripts_append(script)
except Exception, exception:
error = "%s not shown: %s" % (LayerType.__name__, exception)
current.log.error(error)
if s3.debug:
raise HTTP(500, error)
else:
response.warning += error
# WMS getFeatureInfo
# (loads conditionally based on whether queryable WMS Layers have been added)
if s3.gis.get_feature_info and settings.get_gis_getfeature_control():
# Presence of label turns feature on
# @ToDo: Provide explicit option to support multiple maps in a page
# with different options
i18n["gis_get_feature_info"] = T("Get Feature Info")
i18n["gis_feature_info"] = T("Feature Info")
# Callback can be set before _setup()
if not self.callback:
self.callback = opts.get("callback", "DEFAULT")
# These can be read/modified after _setup() & before xml()
self.options = options
self.globals = js_globals
self.i18n = i18n
self.scripts = scripts
# Set up map plugins
# - currently just used by Climate
# @ToDo: Get these working with new loader
# This, and any code it generates, is done last
# However, map plugin should not assume this.
self.plugin_callbacks = []
plugins = opts.get("plugins", None)
if plugins:
for plugin in plugins:
plugin.extend_gis_map(self)
# Flag to xml() that we've already been run
self.setup = True
return options
# -------------------------------------------------------------------------
def xml(self):
"""
Render the Map
- this is primarily done by inserting a lot of JavaScript
- CSS loaded as-standard to avoid delays in page loading
- HTML added in init() as a component
"""
if not self.setup:
self._setup()
# Add ExtJS
# @ToDo: Do this conditionally on whether Ext UI is used
s3_include_ext()
dumps = json.dumps
s3 = current.response.s3
js_global = s3.js_global
js_global_append = js_global.append
i18n_dict = self.i18n
i18n = []
i18n_append = i18n.append
for key, val in i18n_dict.items():
line = '''i18n.%s="%s"''' % (key, val)
if line not in i18n:
i18n_append(line)
i18n = '''\n'''.join(i18n)
if i18n not in js_global:
js_global_append(i18n)
globals_dict = self.globals
js_globals = []
for key, val in globals_dict.items():
line = '''S3.gis.%s=%s''' % (key, dumps(val, separators=SEPARATORS))
if line not in js_globals:
js_globals.append(line)
js_globals = '''\n'''.join(js_globals)
if js_globals not in js_global:
js_global_append(js_globals)
debug = s3.debug
scripts = s3.scripts
if s3.cdn:
if debug:
script = \
"//cdnjs.cloudflare.com/ajax/libs/underscore.js/1.6.0/underscore.js"
else:
script = \
"//cdnjs.cloudflare.com/ajax/libs/underscore.js/1.6.0/underscore-min.js"
else:
if debug:
script = URL(c="static", f="scripts/underscore.js")
else:
script = URL(c="static", f="scripts/underscore-min.js")
if script not in scripts:
scripts.append(script)
if self.opts.get("color_picker", False):
if debug:
script = URL(c="static", f="scripts/spectrum.js")
else:
script = URL(c="static", f="scripts/spectrum.min.js")
if script not in scripts:
scripts.append(script)
if debug:
script = URL(c="static", f="scripts/S3/s3.gis.loader.js")
else:
script = URL(c="static", f="scripts/S3/s3.gis.loader.min.js")
if script not in scripts:
scripts.append(script)
callback = self.callback
map_id = self.id
options = self.options
projection = options["projection"]
try:
options = dumps(options, separators=SEPARATORS)
except Exception, exception:
current.log.error("Map %s failed to initialise" % map_id, exception)
plugin_callbacks = '''\n'''.join(self.plugin_callbacks)
if callback:
if callback == "DEFAULT":
if map_id == "default_map":
callback = '''S3.gis.show_map(null,%s)''' % options
else:
callback = '''S3.gis.show_map(%s,%s)''' % (map_id, options)
else:
# Store options where they can be read by a later show_map()
js_global_append('''S3.gis.options["%s"]=%s''' % (map_id,
options))
script = URL(c="static", f="scripts/yepnope.1.5.4-min.js")
if script not in scripts:
scripts.append(script)
if plugin_callbacks:
callback = '''%s\n%s''' % (callback, plugin_callbacks)
callback = '''function(){%s}''' % callback
else:
# Store options where they can be read by a later show_map()
js_global_append('''S3.gis.options["%s"]=%s''' % (map_id, options))
if plugin_callbacks:
callback = '''function(){%s}''' % plugin_callbacks
else:
callback = '''null'''
loader = \
'''s3_gis_loadjs(%(debug)s,%(projection)s,%(callback)s,%(scripts)s)''' \
% dict(debug = "true" if s3.debug else "false",
projection = projection,
callback = callback,
scripts = self.scripts
)
jquery_ready = s3.jquery_ready
if loader not in jquery_ready:
jquery_ready.append(loader)
# Return the HTML
return super(MAP, self).xml()
# =============================================================================
def addFeatures(features):
"""
Add Simple Features to the Draft layer
- used by S3LocationSelectorWidget
"""
simplify = GIS.simplify
_f = []
append = _f.append
for feature in features:
geojson = simplify(feature, output="geojson")
if geojson:
f = dict(type = "Feature",
geometry = json.loads(geojson))
append(f)
return _f
# =============================================================================
def addFeatureQueries(feature_queries):
"""
Add Feature Queries to the map
- These can be Rows or Storage()
NB These considerations need to be taken care of before arriving here:
Security of data
Localisation of name/popup_label
"""
db = current.db
s3db = current.s3db
cache = s3db.cache
request = current.request
controller = request.controller
function = request.function
fqtable = s3db.gis_feature_query
mtable = s3db.gis_marker
auth = current.auth
auth_user = auth.user
if auth_user:
created_by = auth_user.id
s3_make_session_owner = auth.s3_make_session_owner
else:
# Anonymous
# @ToDo: A deployment with many Anonymous Feature Queries being
# accessed will need to change this design - e.g. use session ID instead
created_by = None
layers_feature_query = []
append = layers_feature_query.append
for layer in feature_queries:
name = str(layer["name"])
_layer = dict(name=name)
name_safe = re.sub("\W", "_", name)
# Lat/Lon via Join or direct?
try:
layer["query"][0].gis_location.lat
join = True
except:
join = False
# Push the Features into a temporary table in order to have them accessible via GeoJSON
# @ToDo: Maintenance Script to clean out old entries (> 24 hours?)
cname = "%s_%s_%s" % (name_safe,
controller,
function)
# Clear old records
query = (fqtable.name == cname) & \
(fqtable.created_by == created_by)
db(query).delete()
for row in layer["query"]:
rowdict = {"name" : cname}
if join:
rowdict["lat"] = row.gis_location.lat
rowdict["lon"] = row.gis_location.lon
else:
rowdict["lat"] = row["lat"]
rowdict["lon"] = row["lon"]
if "popup_url" in row:
rowdict["popup_url"] = row["popup_url"]
if "popup_label" in row:
rowdict["popup_label"] = row["popup_label"]
if "marker" in row:
rowdict["marker_url"] = URL(c="static", f="img",
args=["markers",
row["marker"].image])
rowdict["marker_height"] = row["marker"].height
rowdict["marker_width"] = row["marker"].width
else:
if "marker_url" in row:
rowdict["marker_url"] = row["marker_url"]
if "marker_height" in row:
rowdict["marker_height"] = row["marker_height"]
if "marker_width" in row:
rowdict["marker_width"] = row["marker_width"]
if "shape" in row:
rowdict["shape"] = row["shape"]
if "size" in row:
rowdict["size"] = row["size"]
if "colour" in row:
rowdict["colour"] = row["colour"]
if "opacity" in row:
rowdict["opacity"] = row["opacity"]
record_id = fqtable.insert(**rowdict)
if not created_by:
s3_make_session_owner(fqtable, record_id)
# URL to retrieve the data
url = "%s.geojson?feature_query.name=%s&feature_query.created_by=%s" % \
(URL(c="gis", f="feature_query"),
cname,
created_by)
_layer["url"] = url
if "active" in layer and not layer["active"]:
_layer["visibility"] = False
if "marker" in layer:
# per-Layer Marker
marker = layer["marker"]
if isinstance(marker, int):
# integer (marker_id) not row
marker = db(mtable.id == marker).select(mtable.image,
mtable.height,
mtable.width,
limitby=(0, 1),
cache=cache
).first()
if marker:
# @ToDo: Single option as Marker.as_json_dict()
_layer["marker_url"] = marker["image"]
_layer["marker_height"] = marker["height"]
_layer["marker_width"] = marker["width"]
if "opacity" in layer and layer["opacity"] != 1:
_layer["opacity"] = "%.1f" % layer["opacity"]
if "cluster_attribute" in layer and \
layer["cluster_attribute"] != CLUSTER_ATTRIBUTE:
_layer["cluster_attribute"] = layer["cluster_attribute"]
if "cluster_distance" in layer and \
layer["cluster_distance"] != CLUSTER_DISTANCE:
_layer["cluster_distance"] = layer["cluster_distance"]
if "cluster_threshold" in layer and \
layer["cluster_threshold"] != CLUSTER_THRESHOLD:
_layer["cluster_threshold"] = layer["cluster_threshold"]
append(_layer)
return layers_feature_query
# =============================================================================
def addFeatureResources(feature_resources):
"""
Add Feature Resources to the map
- REST URLs to back-end resources
"""
T = current.T
db = current.db
s3db = current.s3db
ftable = s3db.gis_layer_feature
ltable = s3db.gis_layer_config
# Better to do a separate query
#mtable = s3db.gis_marker
stable = db.gis_style
config = GIS.get_config()
config_id = config.id
postgres = current.deployment_settings.get_database_type() == "postgres"
layers_feature_resource = []
append = layers_feature_resource.append
for layer in feature_resources:
name = str(layer["name"])
_layer = dict(name=name)
_id = str(layer["id"])
_id = re.sub("\W", "_", _id)
_layer["id"] = _id
# Are we loading a Catalogue Layer or a simple URL?
layer_id = layer.get("layer_id", None)
if layer_id:
query = (ftable.layer_id == layer_id)
left = [ltable.on((ltable.layer_id == layer_id) & \
(ltable.config_id == config_id)),
stable.on((stable.layer_id == layer_id) & \
((stable.config_id == config_id) | \
(stable.config_id == None)) & \
(stable.record_id == None) & \
(stable.aggregate == False)),
# Better to do a separate query
#mtable.on(mtable.id == stable.marker_id),
]
# @ToDo: Need to fix this?: make the style lookup a different call
if postgres:
# None is last
orderby = stable.config_id
else:
# None is 1st
orderby = ~stable.config_id
row = db(query).select(ftable.layer_id,
ftable.controller,
ftable.function,
ftable.filter,
ftable.aggregate,
ftable.trackable,
ftable.use_site,
# @ToDo: Deprecate Legacy
ftable.popup_fields,
# @ToDo: Deprecate Legacy
ftable.popup_label,
ftable.cluster_attribute,
ltable.dir,
# Better to do a separate query
#mtable.image,
#mtable.height,
#mtable.width,
stable.marker_id,
stable.opacity,
stable.popup_format,
# @ToDo: If-required
#stable.url_format,
stable.cluster_distance,
stable.cluster_threshold,
stable.style,
left=left,
limitby=(0, 1),
orderby=orderby,
).first()
_dir = layer.get("dir", row["gis_layer_config.dir"])
# Better to do a separate query
#_marker = row["gis_marker"]
_style = row["gis_style"]
row = row["gis_layer_feature"]
if row.use_site:
maxdepth = 1
else:
maxdepth = 0
opacity = layer.get("opacity", _style.opacity) or 1
cluster_attribute = layer.get("cluster_attribute",
row.cluster_attribute) or \
CLUSTER_ATTRIBUTE
cluster_distance = layer.get("cluster_distance",
_style.cluster_distance) or \
CLUSTER_DISTANCE
cluster_threshold = layer.get("cluster_threshold",
_style.cluster_threshold)
if cluster_threshold is None:
cluster_threshold = CLUSTER_THRESHOLD
style = layer.get("style", None)
if style:
try:
# JSON Object?
style = json.loads(style)
except:
current.log.error("Invalid Style: %s" % style)
style = None
else:
style = _style.style
#url_format = _style.url_format
aggregate = layer.get("aggregate", row.aggregate)
if aggregate:
url = "%s.geojson?layer=%i&show_ids=true" % \
(URL(c=row.controller, f=row.function, args="report"),
row.layer_id)
#if not url_format:
# Use gis/location controller in all reports
url_format = "%s/{id}.plain" % URL(c="gis", f="location")
else:
_url = URL(c=row.controller, f=row.function)
url = "%s.geojson?layer=%i&components=None&show_ids=true&maxdepth=%s" % \
(_url,
row.layer_id,
maxdepth)
#if not url_format:
url_format = "%s/{id}.plain" % _url
# Use specified filter or fallback to the one in the layer
_filter = layer.get("filter", row.filter)
if _filter:
url = "%s&%s" % (url, _filter)
if row.trackable:
url = "%s&track=1" % url
if not style:
marker = layer.get("marker")
if marker:
marker = Marker(marker).as_json_dict()
elif _style.marker_id:
marker = Marker(marker_id=_style.marker_id).as_json_dict()
popup_format = _style.popup_format
if not popup_format:
# Old-style
popup_fields = row["popup_fields"]
if popup_fields:
popup_label = row["popup_label"]
if popup_label:
popup_format = "{%s} (%s)" % (popup_fields[0],
current.T(popup_label))
else:
popup_format = "%s" % popup_fields[0]
for f in popup_fields[1:]:
popup_format = "%s<br />{%s}" % (popup_format, f)
else:
# URL to retrieve the data
url = layer["url"]
tablename = layer["tablename"]
table = s3db[tablename]
# Optimise the query
if "location_id" in table.fields:
maxdepth = 0
elif "site_id" in table.fields:
maxdepth = 1
elif tablename == "gis_location":
maxdepth = 0
else:
# Not much we can do!
# @ToDo: Use Context
continue
options = "components=None&maxdepth=%s&show_ids=true" % maxdepth
if "?" in url:
url = "%s&%s" % (url, options)
else:
url = "%s?%s" % (url, options)
opacity = layer.get("opacity", 1)
cluster_attribute = layer.get("cluster_attribute",
CLUSTER_ATTRIBUTE)
cluster_distance = layer.get("cluster_distance",
CLUSTER_DISTANCE)
cluster_threshold = layer.get("cluster_threshold",
CLUSTER_THRESHOLD)
_dir = layer.get("dir", None)
style = layer.get("style", None)
if style:
try:
# JSON Object?
style = json.loads(style)
except:
current.log.error("Invalid Style: %s" % style)
style = None
if not style:
marker = layer.get("marker", None)
if marker:
marker = Marker(marker).as_json_dict()
popup_format = layer.get("popup_format")
url_format = layer.get("url_format")
if "active" in layer and not layer["active"]:
_layer["visibility"] = False
if opacity != 1:
_layer["opacity"] = "%.1f" % opacity
if popup_format:
if "T(" in popup_format:
# i18n
items = regex_translate.findall(popup_format)
for item in items:
titem = str(T(item[1:-1]))
popup_format = popup_format.replace("T(%s)" % item,
titem)
_layer["popup_format"] = popup_format
if url_format:
_layer["url_format"] = url_format
if cluster_attribute != CLUSTER_ATTRIBUTE:
_layer["cluster_attribute"] = cluster_attribute
if cluster_distance != CLUSTER_DISTANCE:
_layer["cluster_distance"] = cluster_distance
if cluster_threshold != CLUSTER_THRESHOLD:
_layer["cluster_threshold"] = cluster_threshold
if _dir:
_layer["dir"] = _dir
if style:
_layer["style"] = style
elif marker:
# Per-layer Marker
_layer["marker"] = marker
else:
# Request the server to provide per-feature Markers
url = "%s&markers=1" % url
_layer["url"] = url
append(_layer)
return layers_feature_resource
# =============================================================================
class Layer(object):
"""
Abstract base class for Layers from Catalogue
"""
def __init__(self, all_layers):
sublayers = []
append = sublayers.append
# List of Scripts to load async with the Map JavaScript
self.scripts = []
s3_has_role = current.auth.s3_has_role
tablename = self.tablename
table = current.s3db[tablename]
fields = table.fields
metafields = s3_all_meta_field_names()
fields = [table[f] for f in fields if f not in metafields]
layer_ids = [row["gis_layer_config.layer_id"] for row in all_layers if \
row["gis_layer_entity.instance_type"] == tablename]
query = (table.layer_id.belongs(set(layer_ids)))
rows = current.db(query).select(*fields)
SubLayer = self.SubLayer
# Flag to show whether we've set the default baselayer
# (otherwise a config higher in the hierarchy can overrule one lower down)
base = True
# Layers requested to be visible via URL (e.g. embedded map)
visible = current.request.get_vars.get("layers", None)
if visible:
visible = visible.split(".")
else:
visible = []
metadata = current.deployment_settings.get_gis_layer_metadata()
styled = self.style
for record in rows:
layer_id = record.layer_id
# Find the 1st row in all_layers which matches this
for row in all_layers:
if row["gis_layer_config.layer_id"] == layer_id:
layer_config = row["gis_layer_config"]
break
# Check if layer is enabled
if layer_config.enabled is False:
continue
# Check user is allowed to access the layer
role_required = record.role_required
if role_required and not s3_has_role(role_required):
continue
# All OK - add SubLayer
record["visible"] = layer_config.visible or str(layer_id) in visible
if base and layer_config.base:
# var name can't conflict with OSM/WMS/ArcREST layers
record["_base"] = True
base = False
else:
record["_base"] = False
record["dir"] = layer_config.dir
if styled:
style = row.get("gis_style", None)
if style:
style_dict = style.style
if isinstance(style_dict, basestring):
# Matryoshka?
try:
style_dict = json.loads(style_dict)
except ValueError:
pass
if style_dict:
record["style"] = style_dict
else:
record["style"] = None
marker = row.get("gis_marker", None)
if marker:
record["marker"] = Marker(marker)
#if style.marker_id:
# record["marker"] = Marker(marker_id=style.marker_id)
else:
# Default Marker?
record["marker"] = Marker(tablename=tablename)
record["opacity"] = style.opacity or 1
record["popup_format"] = style.popup_format
record["cluster_distance"] = style.cluster_distance or CLUSTER_DISTANCE
if style.cluster_threshold != None:
record["cluster_threshold"] = style.cluster_threshold
else:
record["cluster_threshold"] = CLUSTER_THRESHOLD
else:
record["style"] = None
record["opacity"] = 1
record["popup_format"] = None
record["cluster_distance"] = CLUSTER_DISTANCE
record["cluster_threshold"] = CLUSTER_THRESHOLD
# Default Marker?
record["marker"] = Marker(tablename=tablename)
if metadata:
post_id = row.get("cms_post_layer.post_id", None)
record["post_id"] = post_id
if tablename in ("gis_layer_bing", "gis_layer_google"):
# SubLayers handled differently
append(record)
else:
append(SubLayer(record))
# Alphasort layers
# - client will only sort within their type: s3.gis.layers.js
self.sublayers = sorted(sublayers, key=lambda row: row.name)
# -------------------------------------------------------------------------
def as_dict(self, options=None):
"""
Output the Layers as a Python dict
"""
sublayer_dicts = []
append = sublayer_dicts.append
sublayers = self.sublayers
for sublayer in sublayers:
# Read the output dict for this sublayer
sublayer_dict = sublayer.as_dict()
if sublayer_dict:
# Add this layer to the list of layers for this layer type
append(sublayer_dict)
if sublayer_dicts:
if options:
# Used by Map._setup()
options[self.dictname] = sublayer_dicts
else:
# Used by as_json() and hence as_javascript()
return sublayer_dicts
# -------------------------------------------------------------------------
def as_json(self):
"""
Output the Layers as JSON
"""
result = self.as_dict()
if result:
#return json.dumps(result, indent=4, separators=(",", ": "), sort_keys=True)
return json.dumps(result, separators=SEPARATORS)
# -------------------------------------------------------------------------
def as_javascript(self):
"""
Output the Layers as global Javascript
- suitable for inclusion in the HTML page
"""
result = self.as_json()
if result:
return '''S3.gis.%s=%s\n''' % (self.dictname, result)
# -------------------------------------------------------------------------
class SubLayer(object):
def __init__(self, record):
# Ensure all attributes available (even if Null)
self.__dict__.update(record)
del record
if current.deployment_settings.get_L10n_translate_gis_layer():
self.safe_name = re.sub('[\\"]', "", s3_unicode(current.T(self.name)))
else:
self.safe_name = re.sub('[\\"]', "", self.name)
if hasattr(self, "projection_id"):
self.projection = Projection(self.projection_id)
def setup_clustering(self, output):
if hasattr(self, "cluster_attribute"):
cluster_attribute = self.cluster_attribute
else:
cluster_attribute = None
cluster_distance = self.cluster_distance
cluster_threshold = self.cluster_threshold
if cluster_attribute and \
cluster_attribute != CLUSTER_ATTRIBUTE:
output["cluster_attribute"] = cluster_attribute
if cluster_distance != CLUSTER_DISTANCE:
output["cluster_distance"] = cluster_distance
if cluster_threshold != CLUSTER_THRESHOLD:
output["cluster_threshold"] = cluster_threshold
def setup_folder(self, output):
if self.dir:
output["dir"] = s3_unicode(current.T(self.dir))
def setup_folder_and_visibility(self, output):
if not self.visible:
output["visibility"] = False
if self.dir:
output["dir"] = s3_unicode(current.T(self.dir))
def setup_folder_visibility_and_opacity(self, output):
if not self.visible:
output["visibility"] = False
if self.opacity != 1:
output["opacity"] = "%.1f" % self.opacity
if self.dir:
output["dir"] = s3_unicode(current.T(self.dir))
# ---------------------------------------------------------------------
@staticmethod
def add_attributes_if_not_default(output, **values_and_defaults):
# could also write values in debug mode, to check if defaults ignored.
# could also check values are not being overwritten.
for key, (value, defaults) in values_and_defaults.iteritems():
if value not in defaults:
output[key] = value
# -----------------------------------------------------------------------------
class LayerArcREST(Layer):
"""
ArcGIS REST Layers from Catalogue
"""
tablename = "gis_layer_arcrest"
dictname = "layers_arcrest"
style = False
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = {"id": self.layer_id,
"type": "arcrest",
"name": self.safe_name,
"url": self.url,
}
# Attributes which are defaulted client-side if not set
self.setup_folder_and_visibility(output)
self.add_attributes_if_not_default(
output,
layers = (self.layers, ([0],)),
transparent = (self.transparent, (True,)),
base = (self.base, (False,)),
_base = (self._base, (False,)),
format = (self.img_format, ("png",)),
)
return output
# -----------------------------------------------------------------------------
class LayerBing(Layer):
"""
Bing Layers from Catalogue
"""
tablename = "gis_layer_bing"
dictname = "Bing"
style = False
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
if Projection().epsg != 900913:
raise Exception("Cannot display Bing layers unless we're using the Spherical Mercator Projection\n")
apikey = current.deployment_settings.get_gis_api_bing()
if not apikey:
raise Exception("Cannot display Bing layers unless we have an API key\n")
# Mandatory attributes
ldict = {"ApiKey": apikey
}
for sublayer in sublayers:
# Attributes which are defaulted client-side if not set
if sublayer._base:
# Set default Base layer
ldict["Base"] = sublayer.type
if sublayer.type == "aerial":
ldict["Aerial"] = {"name": sublayer.name or "Bing Satellite",
"id": sublayer.layer_id}
elif sublayer.type == "road":
ldict["Road"] = {"name": sublayer.name or "Bing Roads",
"id": sublayer.layer_id}
elif sublayer.type == "hybrid":
ldict["Hybrid"] = {"name": sublayer.name or "Bing Hybrid",
"id": sublayer.layer_id}
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerCoordinate(Layer):
"""
Coordinate Layer from Catalogue
- there should only be one of these
"""
tablename = "gis_layer_coordinate"
dictname = "CoordinateGrid"
style = False
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
sublayer = sublayers[0]
name_safe = re.sub("'", "", sublayer.name)
ldict = dict(name = name_safe,
visibility = sublayer.visible,
id = sublayer.layer_id)
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerEmpty(Layer):
"""
Empty Layer from Catalogue
- there should only be one of these
"""
tablename = "gis_layer_empty"
dictname = "EmptyLayer"
style = False
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
sublayer = sublayers[0]
name = s3_unicode(current.T(sublayer.name))
name_safe = re.sub("'", "", name)
ldict = dict(name = name_safe,
id = sublayer.layer_id)
if sublayer._base:
ldict["base"] = True
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerFeature(Layer):
"""
Feature Layers from Catalogue
"""
tablename = "gis_layer_feature"
dictname = "layers_feature"
style = True
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def __init__(self, record):
controller = record.controller
self.skip = False
if controller is not None:
if controller not in current.deployment_settings.modules:
# Module is disabled
self.skip = True
if not current.auth.permission.has_permission("read",
c=controller,
f=record.function):
# User has no permission to this resource (in ACL)
self.skip = True
else:
error = "Feature Layer Record '%s' has no controller" % \
record.name
raise Exception(error)
super(LayerFeature.SubLayer, self).__init__(record)
def as_dict(self):
if self.skip:
# Skip layer
return
if self.use_site:
maxdepth = 1
else:
maxdepth = 0
if self.aggregate:
# id is used for url_format
url = "%s.geojson?layer=%i&show_ids=true" % \
(URL(c=self.controller, f=self.function, args="report"),
self.layer_id)
# Use gis/location controller in all reports
url_format = "%s/{id}.plain" % URL(c="gis", f="location")
else:
_url = URL(self.controller, self.function)
# id is used for url_format
url = "%s.geojson?layer=%i&components=None&maxdepth=%s&show_ids=true" % \
(_url,
self.layer_id,
maxdepth)
url_format = "%s/{id}.plain" % _url
if self.filter:
url = "%s&%s" % (url, self.filter)
if self.trackable:
url = "%s&track=1" % url
# Mandatory attributes
output = {"id": self.layer_id,
# Defaults client-side if not-provided
#"type": "feature",
"name": self.safe_name,
"url_format": url_format,
"url": url,
}
popup_format = self.popup_format
if popup_format:
# New-style
if "T(" in popup_format:
# i18n
T = current.T
items = regex_translate.findall(popup_format)
for item in items:
titem = str(T(item[1:-1]))
popup_format = popup_format.replace("T(%s)" % item,
titem)
output["popup_format"] = popup_format
else:
# @ToDo: Deprecate
popup_fields = self.popup_fields
if popup_fields:
# Old-style
popup_label = self.popup_label
if popup_label:
popup_format = "{%s} (%s)" % (popup_fields[0],
current.T(popup_label))
else:
popup_format = "%s" % popup_fields[0]
for f in popup_fields[1:]:
popup_format = "%s<br/>{%s}" % (popup_format, f)
output["popup_format"] = popup_format or ""
# Attributes which are defaulted client-side if not set
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
if self.aggregate:
# Enable the Cluster Strategy, so that it can be enabled/disabled
# depending on the zoom level & hence Points or Polygons
output["cluster"] = 1
if not popup_format:
# Need this to differentiate from e.g. FeatureQueries
output["no_popups"] = 1
if self.style:
output["style"] = self.style
else:
self.marker.add_attributes_to_output(output)
return output
# -----------------------------------------------------------------------------
class LayerGeoJSON(Layer):
"""
GeoJSON Layers from Catalogue
"""
tablename = "gis_layer_geojson"
dictname = "layers_geojson"
style = True
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = {"id": self.layer_id,
"type": "geojson",
"name": self.safe_name,
"url": self.url,
}
# Attributes which are defaulted client-side if not set
projection = self.projection
if projection.epsg != 4326:
output["projection"] = projection.epsg
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
if self.style:
output["style"] = self.style
else:
self.marker.add_attributes_to_output(output)
popup_format = self.popup_format
if popup_format:
if "T(" in popup_format:
# i18n
T = current.T
items = regex_translate.findall(popup_format)
for item in items:
titem = str(T(item[1:-1]))
popup_format = popup_format.replace("T(%s)" % item,
titem)
output["popup_format"] = popup_format
return output
# -----------------------------------------------------------------------------
class LayerGeoRSS(Layer):
"""
GeoRSS Layers from Catalogue
"""
tablename = "gis_layer_georss"
dictname = "layers_georss"
style = True
def __init__(self, all_layers):
super(LayerGeoRSS, self).__init__(all_layers)
LayerGeoRSS.SubLayer.cachetable = current.s3db.gis_cache
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
db = current.db
request = current.request
response = current.response
cachetable = self.cachetable
url = self.url
# Check to see if we should Download layer to the cache
download = True
query = (cachetable.source == url)
existing_cached_copy = db(query).select(cachetable.modified_on,
limitby=(0, 1)).first()
refresh = self.refresh or 900 # 15 minutes set if we have no data (legacy DB)
if existing_cached_copy:
modified_on = existing_cached_copy.modified_on
cutoff = modified_on + datetime.timedelta(seconds=refresh)
if request.utcnow < cutoff:
download = False
if download:
# Download layer to the Cache
from gluon.tools import fetch
# @ToDo: Call directly without going via HTTP
# @ToDo: Make this async by using S3Task (also use this for the refresh time)
fields = ""
if self.data:
fields = "&data_field=%s" % self.data
if self.image:
fields = "%s&image_field=%s" % (fields, self.image)
_url = "%s%s/update.georss?fetchurl=%s%s" % (current.deployment_settings.get_base_public_url(),
URL(c="gis", f="cache_feed"),
url,
fields)
# Keep Session for local URLs
import Cookie
cookie = Cookie.SimpleCookie()
cookie[response.session_id_name] = response.session_id
current.session._unlock(response)
try:
# @ToDo: Need to commit to not have DB locked with SQLite?
fetch(_url, cookie=cookie)
if existing_cached_copy:
# Clear old selfs which are no longer active
query = (cachetable.source == url) & \
(cachetable.modified_on < cutoff)
db(query).delete()
except Exception, exception:
current.log.error("GeoRSS %s download error" % url, exception)
# Feed down
if existing_cached_copy:
# Use cached copy
# Should we Update timestamp to prevent every
# subsequent request attempting the download?
#query = (cachetable.source == url)
#db(query).update(modified_on=request.utcnow)
pass
else:
response.warning += "%s down & no cached copy available" % url
name_safe = self.safe_name
# Pass the GeoJSON URL to the client
# Filter to the source of this feed
url = "%s.geojson?cache.source=%s" % (URL(c="gis", f="cache_feed"),
url)
# Mandatory attributes
output = {"id": self.layer_id,
"type": "georss",
"name": name_safe,
"url": url,
}
self.marker.add_attributes_to_output(output)
# Attributes which are defaulted client-side if not set
if self.refresh != 900:
output["refresh"] = self.refresh
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
return output
# -----------------------------------------------------------------------------
class LayerGoogle(Layer):
"""
Google Layers/Tools from Catalogue
"""
tablename = "gis_layer_google"
dictname = "Google"
style = False
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
T = current.T
epsg = (Projection().epsg == 900913)
settings = current.deployment_settings
apikey = settings.get_gis_api_google()
s3 = current.response.s3
debug = s3.debug
# Google scripts use document.write so cannot be loaded async via yepnope.js
s3_scripts = s3.scripts
ldict = {}
for sublayer in sublayers:
# Attributes which are defaulted client-side if not set
if sublayer.type == "earth":
ldict["Earth"] = str(T("Switch to 3D"))
#{"modules":[{"name":"earth","version":"1"}]}
script = "//www.google.com/jsapi?key=" + apikey + "&autoload=%7B%22modules%22%3A%5B%7B%22name%22%3A%22earth%22%2C%22version%22%3A%221%22%7D%5D%7D"
if script not in s3_scripts:
s3_scripts.append(script)
# Dynamic Loading not supported: https://developers.google.com/loader/#Dynamic
#s3.jquery_ready.append('''try{google.load('earth','1')catch(e){}''')
if debug:
self.scripts.append("gis/gxp/widgets/GoogleEarthPanel.js")
else:
self.scripts.append("gis/gxp/widgets/GoogleEarthPanel.min.js")
s3.js_global.append('''S3.public_url="%s"''' % settings.get_base_public_url())
elif epsg:
# Earth is the only layer which can run in non-Spherical Mercator
# @ToDo: Warning?
if sublayer._base:
# Set default Base layer
ldict["Base"] = sublayer.type
if sublayer.type == "satellite":
ldict["Satellite"] = {"name": sublayer.name or "Google Satellite",
"id": sublayer.layer_id}
elif sublayer.type == "maps":
ldict["Maps"] = {"name": sublayer.name or "Google Maps",
"id": sublayer.layer_id}
elif sublayer.type == "hybrid":
ldict["Hybrid"] = {"name": sublayer.name or "Google Hybrid",
"id": sublayer.layer_id}
elif sublayer.type == "streetview":
ldict["StreetviewButton"] = "Click where you want to open Streetview"
elif sublayer.type == "terrain":
ldict["Terrain"] = {"name": sublayer.name or "Google Terrain",
"id": sublayer.layer_id}
elif sublayer.type == "mapmaker":
ldict["MapMaker"] = {"name": sublayer.name or "Google MapMaker",
"id": sublayer.layer_id}
elif sublayer.type == "mapmakerhybrid":
ldict["MapMakerHybrid"] = {"name": sublayer.name or "Google MapMaker Hybrid",
"id": sublayer.layer_id}
if "MapMaker" in ldict or "MapMakerHybrid" in ldict:
# Need to use v2 API
# This should be able to be fixed in OpenLayers now since Google have fixed in v3 API:
# http://code.google.com/p/gmaps-api-issues/issues/detail?id=2349#c47
script = "//maps.google.com/maps?file=api&v=2&key=%s" % apikey
if script not in s3_scripts:
s3_scripts.append(script)
else:
# v3 API (3.16 is frozen, 3.17 release & 3.18 is nightly)
script = "//maps.google.com/maps/api/js?v=3.17&sensor=false"
if script not in s3_scripts:
s3_scripts.append(script)
if "StreetviewButton" in ldict:
# Streetview doesn't work with v2 API
ldict["StreetviewButton"] = str(T("Click where you want to open Streetview"))
ldict["StreetviewTitle"] = str(T("Street View"))
if debug:
self.scripts.append("gis/gxp/widgets/GoogleStreetViewPanel.js")
else:
self.scripts.append("gis/gxp/widgets/GoogleStreetViewPanel.min.js")
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerGPX(Layer):
"""
GPX Layers from Catalogue
"""
tablename = "gis_layer_gpx"
dictname = "layers_gpx"
style = True
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
url = URL(c="default", f="download",
args=self.track)
# Mandatory attributes
output = {"id": self.layer_id,
"name": self.safe_name,
"url": url,
}
# Attributes which are defaulted client-side if not set
self.marker.add_attributes_to_output(output)
self.add_attributes_if_not_default(
output,
waypoints = (self.waypoints, (True,)),
tracks = (self.tracks, (True,)),
routes = (self.routes, (True,)),
)
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
return output
# -----------------------------------------------------------------------------
class LayerJS(Layer):
"""
JS Layers from Catalogue
- these are raw Javascript layers for use by expert OpenLayers people
to quickly add/configure new data sources without needing support
from back-end Sahana programmers
"""
tablename = "gis_layer_js"
dictname = "layers_js"
style = False
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
sublayer_dicts = []
append = sublayer_dicts.append
for sublayer in sublayers:
append(sublayer.code)
if options:
# Used by Map._setup()
options[self.dictname] = sublayer_dicts
else:
# Used by as_json() and hence as_javascript()
return sublayer_dicts
# -----------------------------------------------------------------------------
class LayerKML(Layer):
"""
KML Layers from Catalogue
"""
tablename = "gis_layer_kml"
dictname = "layers_kml"
style = True
# -------------------------------------------------------------------------
def __init__(self, all_layers, init=True):
"Set up the KML cache, should be done once per request"
super(LayerKML, self).__init__(all_layers)
# Can we cache downloaded KML feeds?
# Needed for unzipping & filtering as well
# @ToDo: Should we move this folder to static to speed up access to cached content?
# Do we need to secure it?
request = current.request
cachepath = os.path.join(request.folder,
"uploads",
"gis_cache")
if os.path.exists(cachepath):
cacheable = os.access(cachepath, os.W_OK)
else:
try:
os.mkdir(cachepath)
except OSError, os_error:
current.log.error("GIS: KML layers cannot be cached: %s %s" % \
(cachepath, os_error))
cacheable = False
else:
cacheable = True
# @ToDo: Migrate to gis_cache
LayerKML.cachetable = current.s3db.gis_cache2
LayerKML.cacheable = cacheable
LayerKML.cachepath = cachepath
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
db = current.db
request = current.request
cachetable = LayerKML.cachetable
cacheable = LayerKML.cacheable
#cachepath = LayerKML.cachepath
name = self.name
if cacheable:
_name = urllib2.quote(name)
_name = _name.replace("%", "_")
filename = "%s.file.%s.kml" % (cachetable._tablename,
_name)
# Should we download a fresh copy of the source file?
download = True
query = (cachetable.name == name)
cached = db(query).select(cachetable.modified_on,
limitby=(0, 1)).first()
refresh = self.refresh or 900 # 15 minutes set if we have no data (legacy DB)
if cached:
modified_on = cached.modified_on
cutoff = modified_on + datetime.timedelta(seconds=refresh)
if request.utcnow < cutoff:
download = False
if download:
# Download file (async, if workers alive)
response = current.response
session_id_name = response.session_id_name
session_id = response.session_id
current.s3task.async("gis_download_kml",
args=[self.id, filename, session_id_name, session_id])
if cached:
db(query).update(modified_on=request.utcnow)
else:
cachetable.insert(name=name, file=filename)
url = URL(c="default", f="download",
args=[filename])
else:
# No caching possible (e.g. GAE), display file direct from remote (using Proxy)
# (Requires OpenLayers.Layer.KML to be available)
url = self.url
# Mandatory attributes
output = dict(id = self.layer_id,
name = self.safe_name,
url = url,
)
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
title = (self.title, ("name", None, "")),
body = (self.body, ("description", None)),
refresh = (self.refresh, (900,)),
)
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
if self.style:
output["style"] = self.style
else:
self.marker.add_attributes_to_output(output)
return output
# -----------------------------------------------------------------------------
class LayerOSM(Layer):
"""
OpenStreetMap Layers from Catalogue
@ToDo: Provide a catalogue of standard layers which are fully-defined
in static & can just have name over-ridden, as well as
fully-custom layers.
"""
tablename = "gis_layer_openstreetmap"
dictname = "layers_osm"
style = False
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
if Projection().epsg != 900913:
# Cannot display OpenStreetMap layers unless we're using the Spherical Mercator Projection
return {}
# Mandatory attributes
output = {"id": self.layer_id,
"name": self.safe_name,
"url1": self.url1,
}
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
base = (self.base, (True,)),
_base = (self._base, (False,)),
url2 = (self.url2, ("",)),
url3 = (self.url3, ("",)),
zoomLevels = (self.zoom_levels, (9,)),
attribution = (self.attribution, (None,)),
)
self.setup_folder_and_visibility(output)
return output
# -----------------------------------------------------------------------------
class LayerOpenWeatherMap(Layer):
"""
OpenWeatherMap Layers from Catalogue
"""
tablename = "gis_layer_openweathermap"
dictname = "OWM"
style = False
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
if current.response.s3.debug:
self.scripts.append("gis/OWM.OpenLayers.js")
else:
self.scripts.append("gis/OWM.OpenLayers.min.js")
ldict = {}
for sublayer in sublayers:
if sublayer.type == "station":
ldict["station"] = {"name": sublayer.name or "Weather Stations",
"id": sublayer.layer_id,
"dir": sublayer.dir,
"visibility": sublayer.visible
}
elif sublayer.type == "city":
ldict["city"] = {"name": sublayer.name or "Current Weather",
"id": sublayer.layer_id,
"dir": sublayer.dir,
"visibility": sublayer.visible
}
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerShapefile(Layer):
"""
Shapefile Layers from Catalogue
"""
tablename = "gis_layer_shapefile"
dictname = "layers_shapefile"
style = True
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
url = "%s/%s/data.geojson" % \
(URL(c="gis", f="layer_shapefile"), self.id)
if self.filter:
url = "%s?layer_shapefile_%s.%s" % (url, self.id, self.filter)
# Mandatory attributes
output = {"id": self.layer_id,
"type": "shapefile",
"name": self.safe_name,
"url": url,
# Shapefile layers don't alter their contents, so don't refresh
"refresh": 0,
}
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
desc = (self.description, (None, "")),
src = (self.source_name, (None, "")),
src_url = (self.source_url, (None, "")),
)
# We convert on-upload to have BBOX handling work properly
#projection = self.projection
#if projection.epsg != 4326:
# output["projection"] = projection.epsg
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
if self.style:
output["style"] = self.style
else:
self.marker.add_attributes_to_output(output)
return output
# -----------------------------------------------------------------------------
class LayerTheme(Layer):
"""
Theme Layers from Catalogue
"""
tablename = "gis_layer_theme"
dictname = "layers_theme"
style = True
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
url = "%s.geojson?theme_data.layer_theme_id=%i&polygons=1&maxdepth=0" % \
(URL(c="gis", f="theme_data"), self.id)
# Mandatory attributes
output = {"id": self.layer_id,
"type": "theme",
"name": self.safe_name,
"url": url,
}
# Attributes which are defaulted client-side if not set
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
style = self.style
if style:
output["style"] = style
return output
# -----------------------------------------------------------------------------
class LayerTMS(Layer):
"""
TMS Layers from Catalogue
"""
tablename = "gis_layer_tms"
dictname = "layers_tms"
style = False
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = {"id": self.layer_id,
"type": "tms",
"name": self.safe_name,
"url": self.url,
"layername": self.layername
}
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
_base = (self._base, (False,)),
url2 = (self.url2, (None,)),
url3 = (self.url3, (None,)),
format = (self.img_format, ("png", None)),
zoomLevels = (self.zoom_levels, (19,)),
attribution = (self.attribution, (None,)),
)
self.setup_folder(output)
return output
# -----------------------------------------------------------------------------
class LayerWFS(Layer):
"""
WFS Layers from Catalogue
"""
tablename = "gis_layer_wfs"
dictname = "layers_wfs"
style = True
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = dict(id = self.layer_id,
name = self.safe_name,
url = self.url,
title = self.title,
featureType = self.featureType,
)
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
version = (self.version, ("1.1.0",)),
featureNS = (self.featureNS, (None, "")),
geometryName = (self.geometryName, ("the_geom",)),
schema = (self.wfs_schema, (None, "")),
username = (self.username, (None, "")),
password = (self.password, (None, "")),
projection = (self.projection.epsg, (4326,)),
desc = (self.description, (None, "")),
src = (self.source_name, (None, "")),
src_url = (self.source_url, (None, "")),
refresh = (self.refresh, (0,)),
#editable
)
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
if self.style:
output["style"] = self.style
else:
self.marker.add_attributes_to_output(output)
return output
# -----------------------------------------------------------------------------
class LayerWMS(Layer):
"""
WMS Layers from Catalogue
"""
tablename = "gis_layer_wms"
dictname = "layers_wms"
style = False
# -------------------------------------------------------------------------
def __init__(self, all_layers):
super(LayerWMS, self).__init__(all_layers)
if self.sublayers:
if current.response.s3.debug:
self.scripts.append("gis/gxp/plugins/WMSGetFeatureInfo.js")
else:
self.scripts.append("gis/gxp/plugins/WMSGetFeatureInfo.min.js")
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
if self.queryable:
current.response.s3.gis.get_feature_info = True
# Mandatory attributes
output = dict(id = self.layer_id,
name = self.safe_name,
url = self.url,
layers = self.layers
)
# Attributes which are defaulted client-side if not set
legend_url = self.legend_url
if legend_url and not legend_url.startswith("http"):
legend_url = "%s/%s%s" % \
(current.deployment_settings.get_base_public_url(),
current.request.application,
legend_url)
attr = dict(transparent = (self.transparent, (True,)),
version = (self.version, ("1.1.1",)),
format = (self.img_format, ("image/png",)),
map = (self.map, (None, "")),
username = (self.username, (None, "")),
password = (self.password, (None, "")),
buffer = (self.buffer, (0,)),
base = (self.base, (False,)),
_base = (self._base, (False,)),
style = (self.style, (None, "")),
bgcolor = (self.bgcolor, (None, "")),
tiled = (self.tiled, (False,)),
legendURL = (legend_url, (None, "")),
queryable = (self.queryable, (False,)),
desc = (self.description, (None, "")),
)
if current.deployment_settings.get_gis_layer_metadata():
# Use CMS to add info about sources
attr["post_id"] = (self.post_id, (None, ""))
else:
# Link direct to sources
attr.update(src = (self.source_name, (None, "")),
src_url = (self.source_url, (None, "")),
)
self.add_attributes_if_not_default(output, **attr)
self.setup_folder_visibility_and_opacity(output)
return output
# -----------------------------------------------------------------------------
class LayerXYZ(Layer):
"""
XYZ Layers from Catalogue
"""
tablename = "gis_layer_xyz"
dictname = "layers_xyz"
style = False
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = {"id": self.layer_id,
"name": self.safe_name,
"url": self.url
}
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
_base = (self._base, (False,)),
url2 = (self.url2, (None,)),
url3 = (self.url3, (None,)),
format = (self.img_format, ("png", None)),
zoomLevels = (self.zoom_levels, (19,)),
attribution = (self.attribution, (None,)),
)
self.setup_folder(output)
return output
# =============================================================================
class Marker(object):
"""
Represents a Map Marker
@ToDo: Support Markers in Themes
"""
def __init__(self,
marker=None,
marker_id=None,
layer_id=None,
tablename=None):
"""
@param marker: Storage object with image/height/width (looked-up in bulk)
@param marker_id: id of record in gis_marker
@param layer_id: layer_id to lookup marker in gis_style (unused)
@param tablename: used to identify whether to provide a default marker as fallback
"""
no_default = False
if not marker:
db = current.db
s3db = current.s3db
mtable = s3db.gis_marker
config = None
if marker_id:
# Lookup the Marker details from it's ID
marker = db(mtable.id == marker_id).select(mtable.image,
mtable.height,
mtable.width,
limitby=(0, 1),
cache=s3db.cache
).first()
elif layer_id:
# Check if we have a Marker defined for this Layer
config = GIS.get_config()
stable = s3db.gis_style
query = (stable.layer_id == layer_id) & \
((stable.config_id == config.id) | \
(stable.config_id == None)) & \
(stable.marker_id == mtable.id) & \
(stable.record_id == None)
marker = db(query).select(mtable.image,
mtable.height,
mtable.width,
limitby=(0, 1)).first()
if not marker:
# Check to see if we're a Polygon/LineString
# (& hence shouldn't use a default marker)
if tablename == "gis_layer_shapefile":
table = db.gis_layer_shapefile
query = (table.layer_id == layer_id)
layer = db(query).select(table.gis_feature_type,
limitby=(0, 1)).first()
if layer and layer.gis_feature_type != 1:
no_default = True
#elif tablename == "gis_layer_feature":
# table = db.gis_layer_feature
# query = (table.layer_id == layer_id)
# layer = db(query).select(table.polygons,
# limitby=(0, 1)).first()
# if layer and layer.polygons:
# no_default = True
if marker:
self.image = marker["image"]
self.height = marker["height"]
self.width = marker["width"]
elif no_default:
self.image = None
else:
# Default Marker
if not config:
config = GIS.get_config()
self.image = config.marker_image
self.height = config.marker_height
self.width = config.marker_width
# -------------------------------------------------------------------------
def add_attributes_to_output(self, output):
"""
Called by Layer.as_dict()
"""
if self.image:
output["marker"] = self.as_json_dict()
# -------------------------------------------------------------------------
def as_dict(self):
"""
Called by gis.get_marker(), feature_resources & s3profile
"""
if self.image:
marker = Storage(image = self.image,
height = self.height,
width = self.width,
)
else:
marker = None
return marker
# -------------------------------------------------------------------------
#def as_json(self):
# """
# Called by nothing
# """
# output = dict(i = self.image,
# h = self.height,
# w = self.width,
# )
# return json.dumps(output, separators=SEPARATORS)
# -------------------------------------------------------------------------
def as_json_dict(self):
"""
Called by Style.as_dict() and add_attributes_to_output()
"""
if self.image:
marker = dict(i = self.image,
h = self.height,
w = self.width,
)
else:
marker = None
return marker
# =============================================================================
class Projection(object):
"""
Represents a Map Projection
"""
def __init__(self, projection_id=None):
if projection_id:
s3db = current.s3db
table = s3db.gis_projection
query = (table.id == projection_id)
projection = current.db(query).select(table.epsg,
limitby=(0, 1),
cache=s3db.cache).first()
else:
# Default projection
config = GIS.get_config()
projection = Storage(epsg = config.epsg)
self.epsg = projection.epsg
# =============================================================================
class Style(object):
"""
Represents a Map Style
"""
def __init__(self,
style_id=None,
layer_id=None,
aggregate=None):
db = current.db
s3db = current.s3db
table = s3db.gis_style
fields = [table.marker_id,
table.opacity,
table.popup_format,
# @ToDo: if-required
#table.url_format,
table.cluster_distance,
table.cluster_threshold,
table.style,
]
if style_id:
query = (table.id == style_id)
limitby = (0, 1)
elif layer_id:
config = GIS.get_config()
# @ToDo: if record_id:
query = (table.layer_id == layer_id) & \
(table.record_id == None) & \
((table.config_id == config.id) | \
(table.config_id == None))
if aggregate is not None:
query &= (table.aggregate == aggregate)
fields.append(table.config_id)
limitby = (0, 2)
else:
# Default style for this config
# - falling back to Default config
config = GIS.get_config()
ctable = db.gis_config
query = (table.config_id == ctable.id) & \
((ctable.id == config.id) | \
(ctable.uuid == "SITE_DEFAULT")) & \
(table.layer_id == None)
fields.append(ctable.uuid)
limitby = (0, 2)
styles = db(query).select(*fields,
limitby=limitby)
if len(styles) > 1:
if layer_id:
# Remove the general one
_filter = lambda row: row.config_id == None
else:
# Remove the Site Default
_filter = lambda row: row["gis_config.uuid"] == "SITE_DEFAULT"
styles.exclude(_filter)
if styles:
style = styles.first()
if not layer_id and "gis_style" in style:
style = style["gis_style"]
else:
current.log.error("Style not found!")
style = None
if style:
if style.marker_id:
style.marker = Marker(marker_id=style.marker_id)
if aggregate is True:
# Use gis/location controller in all reports
style.url_format = "%s/{id}.plain" % URL(c="gis", f="location")
elif layer_id:
# Build from controller/function
ftable = s3db.gis_layer_feature
layer = db(ftable.layer_id == layer_id).select(ftable.controller,
ftable.function,
limitby=(0, 1)
).first()
if layer:
style.url_format = "%s/{id}.plain" % \
URL(c=layer.controller, f=layer.function)
self.style = style
# -------------------------------------------------------------------------
def as_dict(self):
"""
"""
# Not JSON-serializable
#return self.style
style = self.style
output = Storage()
if not style:
return output
if hasattr(style, "marker"):
output.marker = style.marker.as_json_dict()
opacity = style.opacity
if opacity and opacity not in (1, 1.0):
output.opacity = style.opacity
if style.popup_format:
output.popup_format = style.popup_format
if style.url_format:
output.url_format = style.url_format
cluster_distance = style.cluster_distance
if cluster_distance is not None and \
cluster_distance != CLUSTER_DISTANCE:
output.cluster_distance = cluster_distance
cluster_threshold = style.cluster_threshold
if cluster_threshold is not None and \
cluster_threshold != CLUSTER_THRESHOLD:
output.cluster_threshold = cluster_threshold
if style.style:
if isinstance(style.style, basestring):
# Native JSON
try:
style.style = json.loads(style.style)
except:
current.log.error("Unable to decode Style: %s" % style.style)
style.style = None
output.style = style.style
return output
# =============================================================================
class S3Map(S3Method):
"""
Class to generate a Map linked to Search filters
"""
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point to apply map method to S3Requests
- produces a full page with S3FilterWidgets above a Map
@param r: the S3Request instance
@param attr: controller attributes for the request
@return: output object to send to the view
"""
if r.http == "GET":
representation = r.representation
if representation == "html":
return self.page(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
# -------------------------------------------------------------------------
def page(self, r, **attr):
"""
Map page
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
if r.representation in ("html", "iframe"):
response = current.response
resource = self.resource
get_config = resource.get_config
tablename = resource.tablename
widget_id = "default_map"
output = {}
title = response.s3.crud_strings[tablename].get("title_map",
current.T("Map"))
output["title"] = title
# Filter widgets
filter_widgets = get_config("filter_widgets", None)
if filter_widgets and not self.hide_filter:
advanced = False
for widget in filter_widgets:
if "hidden" in widget.opts and widget.opts.hidden:
advanced = resource.get_config("map_advanced", True)
break
request = self.request
from s3filter import S3FilterForm
# Apply filter defaults (before rendering the data!)
S3FilterForm.apply_filter_defaults(r, resource)
filter_formstyle = get_config("filter_formstyle", None)
submit = resource.get_config("map_submit", True)
filter_form = S3FilterForm(filter_widgets,
formstyle=filter_formstyle,
advanced=advanced,
submit=submit,
ajax=True,
# URL to update the Filter Widget Status
ajaxurl=r.url(method="filter",
vars={},
representation="options"),
_class="filter-form",
_id="%s-filter-form" % widget_id,
)
get_vars = request.get_vars
filter_form = filter_form.html(resource, get_vars=get_vars, target=widget_id)
else:
# Render as empty string to avoid the exception in the view
filter_form = ""
output["form"] = filter_form
# Map
output["map"] = self.widget(r, widget_id=widget_id,
callback='''S3.search.s3map()''', **attr)
# View
response.view = self._view(r, "map.html")
return output
else:
r.error(501, current.ERROR.BAD_FORMAT)
# -------------------------------------------------------------------------
def widget(self,
r,
method="map",
widget_id=None,
visible=True,
callback=None,
**attr):
"""
Render a Map widget suitable for use in an S3Filter-based page
such as S3Summary
@param r: the S3Request
@param method: the widget method
@param widget_id: the widget ID
@param callback: None by default in case DIV is hidden
@param visible: whether the widget is initially visible
@param attr: controller attributes
"""
if not widget_id:
widget_id = "default_map"
gis = current.gis
tablename = self.tablename
ftable = current.s3db.gis_layer_feature
def lookup_layer(prefix, name):
query = (ftable.controller == prefix) & \
(ftable.function == name)
layers = current.db(query).select(ftable.layer_id,
ftable.style_default,
)
if len(layers) > 1:
layers.exclude(lambda row: row.style_default == False)
if len(layers) == 1:
layer_id = layers.first().layer_id
else:
# We can't distinguish
layer_id = None
return layer_id
prefix = r.controller
name = r.function
layer_id = lookup_layer(prefix, name)
if not layer_id:
# Try the tablename
prefix, name = tablename.split("_", 1)
layer_id = lookup_layer(prefix, name)
url = URL(extension="geojson", args=None)
# @ToDo: Support maps with multiple layers (Dashboards)
#_id = "search_results_%s" % widget_id
_id = "search_results"
feature_resources = [{"name" : current.T("Search Results"),
"id" : _id,
"layer_id" : layer_id,
"tablename" : tablename,
"url" : url,
# We activate in callback after ensuring URL is updated for current filter status
"active" : False,
}]
settings = current.deployment_settings
catalogue_layers = settings.get_gis_widget_catalogue_layers()
legend = settings.get_gis_legend()
search = settings.get_gis_search_geonames()
toolbar = settings.get_gis_toolbar()
wms_browser = settings.get_gis_widget_wms_browser()
if wms_browser:
config = gis.get_config()
if config.wmsbrowser_url:
wms_browser = wms_browser = {"name" : config.wmsbrowser_name,
"url" : config.wmsbrowser_url,
}
else:
wms_browser = None
map = gis.show_map(id = widget_id,
feature_resources = feature_resources,
catalogue_layers = catalogue_layers,
collapsed = True,
legend = legend,
toolbar = toolbar,
save = False,
search = search,
wms_browser = wms_browser,
callback = callback,
)
return map
# =============================================================================
class S3ExportPOI(S3Method):
""" Export point-of-interest resources for a location """
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
output = dict()
if r.http == "GET":
output = self.export(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def export(self, r, **attr):
"""
Export POI resources.
URL options:
- "resources" list of tablenames to export records from
- "msince" datetime in ISO format, "auto" to use the
feed's last update
- "update_feed" 0 to skip the update of the feed's last
update datetime, useful for trial exports
Supported formats:
.xml S3XML
.osm OSM XML Format
.kml Google KML
(other formats can be requested, but may give unexpected results)
@param r: the S3Request
@param attr: controller options for this request
"""
# Determine request Lx
current_lx = r.record
if not current_lx: # or not current_lx.level:
# Must have a location
r.error(400, current.ERROR.BAD_REQUEST)
else:
self.lx = current_lx.id
tables = []
# Parse the ?resources= parameter
if "resources" in r.get_vars:
resources = r.get_vars["resources"]
else:
# Fallback to deployment_setting
resources = current.deployment_settings.get_gis_poi_export_resources()
if not isinstance(resources, list):
resources = [resources]
[tables.extend(t.split(",")) for t in resources]
# Parse the ?update_feed= parameter
update_feed = True
if "update_feed" in r.get_vars:
_update_feed = r.get_vars["update_feed"]
if _update_feed == "0":
update_feed = False
# Parse the ?msince= parameter
msince = None
if "msince" in r.get_vars:
msince = r.get_vars["msince"]
if msince.lower() == "auto":
msince = "auto"
else:
msince = s3_parse_datetime(msince)
# Export a combined tree
tree = self.export_combined_tree(tables,
msince=msince,
update_feed=update_feed)
xml = current.xml
# Set response headers
response = current.response
s3 = response.s3
headers = response.headers
representation = r.representation
if r.representation in s3.json_formats:
as_json = True
default = "application/json"
else:
as_json = False
default = "text/xml"
headers["Content-Type"] = s3.content_type.get(representation,
default)
# Find XSLT stylesheet and transform
stylesheet = r.stylesheet()
if tree and stylesheet is not None:
args = Storage(domain=xml.domain,
base_url=s3.base_url,
utcnow=s3_format_datetime())
tree = xml.transform(tree, stylesheet, **args)
if tree:
if as_json:
output = xml.tree2json(tree, pretty_print=True)
else:
output = xml.tostring(tree, pretty_print=True)
return output
# -------------------------------------------------------------------------
def export_combined_tree(self, tables, msince=None, update_feed=True):
"""
Export a combined tree of all records in tables, which
are in Lx, and have been updated since msince.
@param tables: list of table names
@param msince: minimum modified_on datetime, "auto" for
automatic from feed data, None to turn it off
@param update_feed: update the last_update datetime in the feed
"""
db = current.db
s3db = current.s3db
ftable = s3db.gis_poi_feed
lx = self.lx
elements = []
for tablename in tables:
# Define the resource
try:
resource = s3db.resource(tablename, components=[])
except AttributeError:
# Table not defined (module deactivated?)
continue
# Check
if "location_id" not in resource.fields:
# Hardly a POI resource without location_id
continue
# Add Lx filter
self._add_lx_filter(resource, lx)
# Get the feed data
query = (ftable.tablename == tablename) & \
(ftable.location_id == lx)
feed = db(query).select(limitby=(0, 1)).first()
if msince == "auto":
if feed is None:
_msince = None
else:
_msince = feed.last_update
else:
_msince = msince
# Export the tree and append its element to the element list
tree = resource.export_tree(msince=_msince,
references=["location_id"])
# Update the feed data
if update_feed:
muntil = resource.muntil
if feed is None:
ftable.insert(location_id = lx,
tablename = tablename,
last_update = muntil)
else:
feed.update_record(last_update = muntil)
elements.extend([c for c in tree.getroot()])
# Combine all elements in one tree and return it
tree = current.xml.tree(elements, results=len(elements))
return tree
# -------------------------------------------------------------------------
@staticmethod
def _add_lx_filter(resource, lx):
"""
Add a Lx filter for the current location to this
resource.
@param resource: the resource
"""
from s3query import FS
query = (FS("location_id$path").contains("/%s/" % lx)) | \
(FS("location_id$path").like("%s/%%" % lx))
resource.add_filter(query)
# =============================================================================
class S3ImportPOI(S3Method):
"""
Import point-of-interest resources for a location
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
s3db = current.s3db
request = current.request
response = current.response
settings = current.deployment_settings
s3 = current.response.s3
title = T("Import from OpenStreetMap")
resources_list = settings.get_gis_poi_export_resources()
uploadpath = os.path.join(request.folder,"uploads/")
from s3utils import s3_yes_no_represent
fields = [Field("text1", # Dummy Field to add text inside the Form
label = "",
default = T("Can read PoIs either from an OpenStreetMap file (.osm) or mirror."),
writable = False),
Field("file", "upload",
uploadfolder = uploadpath,
label = T("File")),
Field("text2", # Dummy Field to add text inside the Form
label = "",
default = "Or",
writable = False),
Field("host",
default = "localhost",
label = T("Host")),
Field("database",
default = "osm",
label = T("Database")),
Field("user",
default = "osm",
label = T("User")),
Field("password", "string",
default = "planet",
label = T("Password")),
Field("ignore_errors", "boolean",
label = T("Ignore Errors?"),
represent = s3_yes_no_represent),
Field("resources",
label = T("Select resources to import"),
requires = IS_IN_SET(resources_list, multiple=True),
default = resources_list,
widget = SQLFORM.widgets.checkboxes.widget)
]
if not r.id:
from s3validators import IS_LOCATION
from s3widgets import S3LocationAutocompleteWidget
# dummy field
field = s3db.org_office.location_id
field.requires = IS_EMPTY_OR(IS_LOCATION())
field.widget = S3LocationAutocompleteWidget()
fields.insert(3, field)
from s3utils import s3_mark_required
labels, required = s3_mark_required(fields, ["file", "location_id"])
s3.has_required = True
form = SQLFORM.factory(*fields,
formstyle = settings.get_ui_formstyle(),
submit_button = T("Import"),
labels = labels,
separator = "",
table_name = "import_poi" # Dummy table name
)
response.view = "create.html"
output = dict(title=title,
form=form)
if form.accepts(request.vars, current.session):
form_vars = form.vars
if form_vars.file != "":
File = open(uploadpath + form_vars.file, "r")
else:
# Create .poly file
if r.record:
record = r.record
elif not form_vars.location_id:
form.errors["location_id"] = T("Location is Required!")
return output
else:
gtable = s3db.gis_location
record = current.db(gtable.id == form_vars.location_id).select(gtable.name,
gtable.wkt,
limitby=(0, 1)
).first()
if record.wkt is None:
form.errors["location_id"] = T("Location needs to have WKT!")
return output
error = GIS.create_poly(record)
if error:
current.session.error = error
redirect(URL(args=r.id))
# Use Osmosis to extract an .osm file using this .poly
name = record.name
if os.path.exists(os.path.join(os.getcwd(), "temp")): # use web2py/temp
TEMP = os.path.join(os.getcwd(), "temp")
else:
import tempfile
TEMP = tempfile.gettempdir()
filename = os.path.join(TEMP, "%s.osm" % name)
cmd = ["/home/osm/osmosis/bin/osmosis", # @ToDo: deployment_setting
"--read-pgsql",
"host=%s" % form_vars.host,
"database=%s" % form_vars.database,
"user=%s" % form_vars.user,
"password=%s" % form_vars.password,
"--dataset-dump",
"--bounding-polygon",
"file=%s" % os.path.join(TEMP, "%s.poly" % name),
"--write-xml",
"file=%s" % filename,
]
import subprocess
try:
#result = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError, e:
current.session.error = T("OSM file generation failed: %s") % e.output
redirect(URL(args=r.id))
except AttributeError:
# Python < 2.7
error = subprocess.call(cmd, shell=True)
if error:
current.log.debug(cmd)
current.session.error = T("OSM file generation failed!")
redirect(URL(args=r.id))
try:
File = open(filename, "r")
except:
current.session.error = T("Cannot open created OSM file!")
redirect(URL(args=r.id))
stylesheet = os.path.join(request.folder, "static", "formats",
"osm", "import.xsl")
ignore_errors = form_vars.get("ignore_errors", None)
xml = current.xml
tree = xml.parse(File)
define_resource = s3db.resource
response.error = ""
import_count = 0
import_res = list(set(form_vars["resources"]) & \
set(resources_list))
for tablename in import_res:
try:
s3db[tablename]
except:
# Module disabled
continue
resource = define_resource(tablename)
s3xml = xml.transform(tree, stylesheet_path=stylesheet,
name=resource.name)
try:
resource.import_xml(s3xml,
ignore_errors=ignore_errors)
import_count += resource.import_count
except:
response.error += str(sys.exc_info()[1])
if import_count:
response.confirmation = "%s %s" % \
(import_count,
T("PoIs successfully imported."))
else:
response.information = T("No PoIs available.")
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# END =========================================================================
| mit |
basicthinker/ThyNVM | ext/ply/test/yacc_error4.py | 174 | 1562 | # -----------------------------------------------------------------------------
# yacc_error4.py
#
# Attempt to define a rule named 'error'
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error_handler(t):
'error : NAME'
pass
def p_error(t):
pass
yacc.yacc()
| bsd-3-clause |
LethusTI/supportcenter | vendor/django/django/contrib/localflavor/sk/sk_districts.py | 543 | 2453 | """
Slovak districts according to http://sk.wikipedia.org/wiki/Administrat%C3%ADvne_%C4%8Dlenenie_Slovenska
"""
from django.utils.translation import ugettext_lazy as _
DISTRICT_CHOICES = (
('BB', _('Banska Bystrica')),
('BS', _('Banska Stiavnica')),
('BJ', _('Bardejov')),
('BN', _('Banovce nad Bebravou')),
('BR', _('Brezno')),
('BA1', _('Bratislava I')),
('BA2', _('Bratislava II')),
('BA3', _('Bratislava III')),
('BA4', _('Bratislava IV')),
('BA5', _('Bratislava V')),
('BY', _('Bytca')),
('CA', _('Cadca')),
('DT', _('Detva')),
('DK', _('Dolny Kubin')),
('DS', _('Dunajska Streda')),
('GA', _('Galanta')),
('GL', _('Gelnica')),
('HC', _('Hlohovec')),
('HE', _('Humenne')),
('IL', _('Ilava')),
('KK', _('Kezmarok')),
('KN', _('Komarno')),
('KE1', _('Kosice I')),
('KE2', _('Kosice II')),
('KE3', _('Kosice III')),
('KE4', _('Kosice IV')),
('KEO', _('Kosice - okolie')),
('KA', _('Krupina')),
('KM', _('Kysucke Nove Mesto')),
('LV', _('Levice')),
('LE', _('Levoca')),
('LM', _('Liptovsky Mikulas')),
('LC', _('Lucenec')),
('MA', _('Malacky')),
('MT', _('Martin')),
('ML', _('Medzilaborce')),
('MI', _('Michalovce')),
('MY', _('Myjava')),
('NO', _('Namestovo')),
('NR', _('Nitra')),
('NM', _('Nove Mesto nad Vahom')),
('NZ', _('Nove Zamky')),
('PE', _('Partizanske')),
('PK', _('Pezinok')),
('PN', _('Piestany')),
('PT', _('Poltar')),
('PP', _('Poprad')),
('PB', _('Povazska Bystrica')),
('PO', _('Presov')),
('PD', _('Prievidza')),
('PU', _('Puchov')),
('RA', _('Revuca')),
('RS', _('Rimavska Sobota')),
('RV', _('Roznava')),
('RK', _('Ruzomberok')),
('SB', _('Sabinov')),
('SC', _('Senec')),
('SE', _('Senica')),
('SI', _('Skalica')),
('SV', _('Snina')),
('SO', _('Sobrance')),
('SN', _('Spisska Nova Ves')),
('SL', _('Stara Lubovna')),
('SP', _('Stropkov')),
('SK', _('Svidnik')),
('SA', _('Sala')),
('TO', _('Topolcany')),
('TV', _('Trebisov')),
('TN', _('Trencin')),
('TT', _('Trnava')),
('TR', _('Turcianske Teplice')),
('TS', _('Tvrdosin')),
('VK', _('Velky Krtis')),
('VT', _('Vranov nad Toplou')),
('ZM', _('Zlate Moravce')),
('ZV', _('Zvolen')),
('ZC', _('Zarnovica')),
('ZH', _('Ziar nad Hronom')),
('ZA', _('Zilina')),
)
| gpl-3.0 |
JeroenMathon/trackma | trackma/accounts.py | 1 | 2993 | import utils
import cPickle
class AccountManager(object):
"""
This is the account manager.
It provides a generic way for the user interface to query for the
available registered accounts, and add or delete accounts.
This class returns an Account Dictionary used by
the :class:`Engine` to start.
"""
accounts = {'default': None, 'next': 1, 'accounts': dict()}
def __init__(self):
utils.make_dir('')
self.filename = utils.get_root_filename('accounts.dict')
self._load()
def _load(self):
if utils.file_exists(self.filename):
with open(self.filename, 'rb') as f:
self.accounts = cPickle.load(f)
def _save(self):
with open(self.filename, 'wb') as f:
cPickle.dump(self.accounts, f)
def add_account(self, username, password, api):
"""
Registers a new account with the specified
*username*, *password* and *api*.
The *api* must be one of the available APIs
found in the utils.available_libs dict.
"""
available_libs = utils.available_libs.keys()
if not username:
raise utils.AccountError('Empty username.')
if not password:
raise utils.AccountError('Empty password.')
if api not in available_libs:
raise utils.AccountError('That API doesn\'t exist.')
account = {'username': username,
'password': password,
'api': api,
}
nextnum = self.accounts['next']
self.accounts['accounts'][nextnum] = account
self.accounts['next'] += 1
self._save()
def delete_account(self, num):
"""
Deletes the account number **num**.
"""
self.accounts['default'] = None
del self.accounts['accounts'][num]
# Reset index if there are no accounts left
if not self.accounts['accounts']:
self.accounts['next'] = 1
self._save()
def get_account(self, num):
"""
Returns the account dict **num**.
"""
return self.accounts['accounts'][num]
def get_accounts(self):
"""
Returns an iterator of available accounts.
"""
return self.accounts['accounts'].iteritems()
def get_default(self):
"""
Returns the default account number, if set.
Otherwise returns None.
"""
num = self.accounts['default']
if num is not None:
try:
return self.accounts['accounts'][num]
except KeyError:
return None
else:
return None
def set_default(self, val):
"""
Sets a new default account number.
"""
self.accounts['default'] = val
self._save()
def unset_default(self):
"""
Unsets the default account number.
"""
self.accounts['default'] = None
| gpl-3.0 |
moio/spacewalk | client/tools/rhn-virtualization/virtualization/domain_config.py | 7 | 13054 | #
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
from xml.dom.minidom import parse
import string
import os
import sys
###############################################################################
# Exceptions
###############################################################################
class DomainConfigError(Exception): pass
###############################################################################
# Classes
###############################################################################
class DataType:
ATTRIBUTE = "attribute"
VALUE = "value"
class DomainConfigItem:
def __init__(self, path, data_type):
self.path = path
self.data_type = data_type
class DomainConfig:
###########################################################################
# Constants
###########################################################################
NAME = DomainConfigItem("domain/name", DataType.VALUE)
UUID = DomainConfigItem("domain/uuid", DataType.VALUE)
BOOTLOADER = DomainConfigItem("domain/bootloader", DataType.VALUE)
MEMORY = DomainConfigItem("domain/memory", DataType.VALUE)
VCPU = DomainConfigItem("domain/vcpu", DataType.VALUE)
OS = DomainConfigItem("domain/os", DataType.VALUE)
OS_TYPE = DomainConfigItem("domain/os/type", DataType.VALUE)
ROOT_DEVICE = DomainConfigItem("domain/os/root", DataType.VALUE)
COMMAND_LINE = DomainConfigItem("domain/os/cmdline", DataType.VALUE)
KERNEL_PATH = DomainConfigItem("domain/os/kernel", DataType.VALUE)
RAMDISK_PATH = DomainConfigItem("domain/os/initrd", DataType.VALUE)
DISK_IMAGE_PATH = DomainConfigItem("domain/devices/disk/source/file",
DataType.ATTRIBUTE)
DOMAIN_ID = DomainConfigItem("domain/id", DataType.ATTRIBUTE)
###########################################################################
# Public Interface
###########################################################################
def __init__(self, config_path, uuid):
# Prepare the file name and parse the XML file.
if string.find(uuid, ".xml") > 1 and os.path.exists(uuid):
self.__file_name = uuid
else:
self.__file_name = "%s/%s.xml" % (config_path, uuid)
self.__dom_tree = None
try:
self.__dom_tree = parse(self.__file_name).documentElement
except Exception, e:
raise DomainConfigError("Error reading config file '%s': %s" % \
(self.__file_name, str(e))), None, sys.exc_info()[2]
def save(self):
"""Saves any changes made to this configuration."""
file = None
try:
try:
file = open(self.__file_name, "w")
file.write(self.__dom_tree.toxml())
except IOError, ioe:
raise DomainConfigError("Error saving config file '%s': %s" % \
(self.__file_name, str(ioe))), None, sys.exc_info()[2]
finally:
if file is not None:
file.close()
def getFileName(self):
"""
Returns the path to the configuration file represented by this
object.
"""
return self.__file_name
def toXML(self):
"""Returns the XML representation of this configuration."""
return self.__dom_tree.toxml()
def getConfigItem(self, config_item):
if config_item.data_type == DataType.ATTRIBUTE:
return self.__getElementAttribute(
self.__dom_tree,
*config_item.path.split("/"))
elif config_item.data_type == DataType.VALUE:
return self.__getElementValue(
self.__dom_tree,
*config_item.path.split("/"))
raise DomainConfigError("Unknown config item data type '%s'" % \
str(config_item.data_type))
def hasConfigItem(self, config_item):
try:
self.getConfigItem(config_item)
except DomainConfigError:
return 0
return 1
def removeConfigItem(self, config_item):
if config_item.data_type == DataType.ATTRIBUTE:
return self.__removeElementAttribute(
self.__dom_tree,
*config_item.path.split("/"))
elif config_item.data_type == DataType.VALUE:
return self.__removeElementValue(
self.__dom_tree,
*config_item.path.split("/"))
raise DomainConfigError("Unknown config item data type '%s'" % \
str(config_item.data_type))
def setConfigItem(self, config_item, value):
"""
Sets the value of an item in the tree. If the item does not yet exist,
it will be created.
"""
if config_item.data_type == DataType.ATTRIBUTE:
return self.__setElementAttribute(
self.__dom_tree,
value,
*config_item.path.split("/"))
elif config_item.data_type == DataType.VALUE:
return self.__setElementValue(
self.__dom_tree,
value,
*config_item.path.split("/"))
raise DomainConfigError("Unknown config item data type '%s'" % \
str(config_item.data_type))
def isInstallerConfig(self):
"""
Returns true if this configuration indicates that the domain was
started in a method that would put it into the installer.
"""
result = 0
if self.hasConfigItem(DomainConfig.COMMAND_LINE):
# Convert the command line to a dict for easy parsability.
command_line = self.getConfigItem(DomainConfig.COMMAND_LINE)
command_line_parts = command_line.strip().split(" ")
command_line_dict = {}
for part in command_line_parts:
command_line_args = part.split("=")
key = command_line_args[0]
command_line_dict[key] = None
if len(command_line_args) >= 2:
command_line_dict[key] = '='.join(command_line_args[1:])
# Look for the "method" argument. This is a good indication that
# the instance is in the installer.
if (command_line_dict.has_key("method") or
command_line_dict.has_key("ks") or
command_line_dict.has_key("autoyast")):
result = 1
return result
###########################################################################
# Helpers
###########################################################################
def __getElementValue(self, start_tree, *tag_path):
found = self.__extractElement(start_tree, *tag_path)
if len(found.childNodes) == 0:
raise DomainConfigError, \
"Unable to find config value: " + "/".join(tag_path)
return found.childNodes[0].data
def __getElementAttribute(self, start_tree, *tag_path):
"""
Returns the value of the requested XML attribute. The attribute name
is the last value in the tag_path.
"""
attribute_name = tag_path[-1]
found = self.__extractElement(start_tree, *tag_path[:-1])
# Dig out the value of the requested attribute.
if not found.hasAttribute(attribute_name):
raise DomainConfigError, \
"Unable to find config attribute: " + "/".join(tag_path)
return found.getAttribute(attribute_name)
def __removeElementValue(self, start_tree, *tag_path):
found = self.__extractElement(start_tree, *tag_path)
if len(found.childNodes) == 0:
raise DomainConfigError, \
"Unable to find config value: " + "/".join(tag_path)
found.parentNode.removeChild(found)
def __removeElementAttribute(self, start_tree, *tag_path):
attribute_name = tag_path[-1]
found = self.__extractElement(start_tree, *tag_path[:-1])
if not found.hasAttribute(attribute_name):
raise DomainConfigError, \
"Unable to find config attribute: " + "/".join(tag_path)
found.removeAttribute(attribute_name)
def __setElementValue(self, start_tree, value, *tag_path):
try:
found = self.__extractElement(start_tree, *tag_path)
except DomainConfigError:
# If an exception was thrown, the element did not exist. We'll
# add it.
found = self.__makeElement(start_tree, *tag_path)
if len(found.childNodes) == 0:
document = self.__dom_tree.parentNode
element_text = document.createTextNode('')
found.appendChild(element_text)
try:
found.childNodes[0].data = str(value)
except IndexError, ie:
raise DomainConfigError(
"Error writing %s tag in '%s'." % \
(string.join(tag_path, '/'), self.__file_name)), None, sys.exc_info()[2]
def __setElementAttribute(self, start_tree, value, *tag_path):
attribute_name = tag_path[-1]
found = self.__extractElement(start_tree, *tag_path[:-1])
found.setAttribute(attribute_name, str(value))
def __makeElement(self, start_tree, *tag_path):
# If there are no more tags left in the path, there's nothing more to
# add.
if len(tag_path) == 0:
return start_tree
# Look for the first part of the tag.
tag = tag_path[0]
try:
element = self.__extractElement(start_tree, tag)
except DomainConfigError:
# No matching tag found. Create one.
document = self.__dom_tree.parentNode
element = document.createElement(tag)
start_tree.appendChild(element)
tag_path = tag_path[1:]
return self.__makeElement(element, *tag_path)
def __extractElement(self, start_tree, *tag_path):
# If there are no more tags left in the path, we're done.
if len(tag_path) == 0:
return start_tree
# Extract the first matching child from this tree.
tag = tag_path[0]
if start_tree == self.__dom_tree:
# If this is the root node, ensure that the first part of the path
# matches. This is a special case because the getElementsByTagName
# only applies to elements below the root node.
if start_tree.nodeName != tag:
# First part of the tag path didn't match. Raise exception.
raise DomainConfigError, "Could not locate tag <%s>." % tag
else:
# First part matched; adjust the tag pointer, if there's any
# thing left.
tag_path = tag_path[1:]
if len(tag_path) == 0:
return start_tree
else:
tag = tag_path[0]
node_list = start_tree.getElementsByTagName(tag)
if node_list is not None and len(node_list) > 0:
tag_node = node_list[0]
return self.__extractElement(tag_node, *tag_path[1:])
# If we got here, we couldn't find the tag in question. Raise an
# exception
raise DomainConfigError, "Could not locate tag " + str(tag)
###############################################################################
# Test Method
###############################################################################
if __name__ == "__main__":
import sys
uuid = sys.argv[1]
f = DomainConfig("/etc/sysconfig/rhn/virt", uuid)
print "name=", f.getConfigItem(DomainConfig.NAME)
print "memory=", f.getConfigItem(DomainConfig.MEMORY)
print "domain_id=", f.getConfigItem(DomainConfig.DOMAIN_ID)
f.setConfigItem(DomainConfig.DOMAIN_ID, 22322)
f.setConfigItem(DomainConfigItem("domain/argh", DataType.ATTRIBUTE), 22322)
f.setConfigItem(DomainConfigItem("domain/pete", DataType.VALUE), "hello")
f.setConfigItem(DomainConfigItem("domain/vcpu", DataType.VALUE), "22")
f.setConfigItem(DomainConfig.BOOTLOADER, "/usr/pete/bin/pygrub")
f.removeConfigItem(DomainConfigItem("domain/os", DataType.VALUE))
print f.toXML()
| gpl-2.0 |
TylerL-uxai/SamsBday | cocos2d/tools/project-creator/module/core.py | 13 | 12398 | #!/usr/bin/python
#coding=utf-8
"""****************************************************************************
Copyright (c) 2013 cocos2d-x.org
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************"""
import sys
import os, os.path
import json
import shutil
def replaceString(filepath, src_string, dst_string):
""" From file's content replace specified string
Arg:
filepath: Specify a file contains the path
src_string: old string
dst_string: new string
"""
content = ""
f1 = open(filepath, "rb")
for line in f1:
strline = line.decode('utf8')
if src_string in strline:
content += strline.replace(src_string, dst_string)
else:
content += strline
f1.close()
f2 = open(filepath, "wb")
f2.write(content.encode('utf8'))
f2.close()
#end of replaceString
class CocosProject:
def __init__(self):
"""
"""
self.platforms= {
"cpp" : ["ios_mac", "android", "win32", "linux"],
"lua" : ["ios_mac", "android", "win32", "linux"],
"javascript" : ["ios_mac", "android", "win32"]
}
self.context = {
"language": None,
"src_project_name": None,
"src_package_name": None,
"dst_project_name": None,
"dst_package_name": None,
"src_project_path": None,
"dst_project_path": None,
"cocos_file_list":None,
"script_dir": None
}
self.platforms_list = []
self.cocos_root =os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
self.callbackfun = None
self.totalStep =1
self.step=0
def checkParams(self):
"""Custom and check param list.
"""
from optparse import OptionParser
# set the parser to parse input params
# the correspond variable name of "-x, --xxx" is parser.xxx
parser = OptionParser(
usage="Usage: %prog -n <PROJECT_NAME> -k <PACKAGE_NAME> -l <cpp|lua|javascript> -p <PROJECT_PATH>\n\
Sample: %prog -n MyGame -k com.MyCompany.AwesomeGame -l javascript -p c:/mycompany"
)
parser.add_option("-n", "--name", metavar="PROJECT_NAME",help="Set a project name")
parser.add_option("-k", "--package", metavar="PACKAGE_NAME",help="Set a package name for project")
parser.add_option("-l", "--language",metavar="PROGRAMMING_NAME",
type="choice",
choices=["cpp", "lua", "javascript"],
help="Major programming language you want to use, should be [cpp | lua | javascript]")
parser.add_option("-p", "--path", metavar="PROJECT_PATH",help="Set generate project path for project")
# parse the params
(opts, args) = parser.parse_args()
if not opts.name:
parser.error("-n or --name is not specified")
if not opts.package:
parser.error("-k or --package is not specified")
if not opts.language:
parser.error("-l or --language is not specified")
if not opts.path:
parser.error("-p or --path is not specified")
return opts.name, opts.package, opts.language, opts.path
def createPlatformProjects(self, projectName, packageName, language, projectPath, callbackfun = None):
""" Create a plantform project.
Arg:
projectName: Project name, like this: "helloworld".
packageName: It's used for android platform,like this:"com.cocos2dx.helloworld".
language: There have three languages can be choice: [cpp | lua | javascript], like this:"javascript".
projectPath: The path of generate project.
callbackfun: It's new project callback function.There have four Params.
As follow:
def newProjectCallBack(step, totalStep, showMsg):
#step: processing step,at present
#totalStep: all the steps
#showMsg: message about the progress
pass
"""
self.callbackfun = callbackfun
# init our internal params
self.context["dst_project_name"] = projectName
self.context["dst_package_name"] = packageName
self.context["language"] = language
self.context["dst_project_path"] = os.path.join(projectPath,projectName)
self.context["script_dir"] = os.path.abspath(os.path.dirname(__file__))
self.context["cocos_file_list"] = os.path.join(self.context["script_dir"], "cocos_files.json")
# fill in src_project_name and src_package_name according to "language"
template_dir = os.path.abspath(os.path.join(self.cocos_root, "template"))
if ("cpp" == self.context["language"]):
self.context["src_project_name"] = "HelloCpp"
self.context["src_package_name"] = "org.cocos2dx.hellocpp"
self.context["src_project_path"] = os.path.join(template_dir, "multi-platform-cpp")
elif ("lua" == self.context["language"]):
self.context["src_project_name"] = "HelloLua"
self.context["src_package_name"] = "org.cocos2dx.hellolua"
self.context["src_project_path"] = os.path.join(template_dir, "multi-platform-lua")
elif ("javascript" == self.context["language"]):
self.context["src_project_name"] = "HelloJavascript"
self.context["src_package_name"] = "org.cocos2dx.hellojavascript"
self.context["src_project_path"] = os.path.join(template_dir, "multi-platform-js")
else:
print ("Your language parameter doesn\'t exist." \
"Check correct language option\'s parameter")
return False
# copy "lauguage"(cpp/lua/javascript) platform.proj into cocos2d-x/projects/<project_name>/folder
if os.path.exists(self.context["dst_project_path"]):
print ("Error:" + self.context["dst_project_path"] + " folder is already existing")
print ("Please remove the old project or choose a new PROJECT_NAME in -project parameter")
return False
else:
shutil.copytree(self.context["src_project_path"], self.context["dst_project_path"], True)
# check cocos engine exist
if not os.path.exists(self.context["cocos_file_list"]):
print ("cocos_file_list.json doesn\'t exist." \
"generate it, please")
return False
f = open(self.context["cocos_file_list"])
fileList = json.load(f)
f.close()
self.platforms_list = self.platforms.get(self.context["language"], [])
self.totalStep = len(self.platforms_list) + len(fileList)
self.step = 0
#begin copy engine
print("###begin copy engine")
print("waitting copy cocos2d ...")
dstPath = os.path.join(self.context["dst_project_path"],"cocos2d")
for index in range(len(fileList)):
srcfile = os.path.join(self.cocos_root,fileList[index])
dstfile = os.path.join(dstPath,fileList[index])
if not os.path.exists(os.path.dirname(dstfile)):
os.makedirs(os.path.dirname(dstfile))
#copy file or folder
if os.path.exists(srcfile):
if os.path.isdir(srcfile):
if os.path.exists(dstfile):
shutil.rmtree(dstfile)
shutil.copytree(srcfile, dstfile)
else:
if os.path.exists(dstfile):
os.remove(dstfile)
shutil.copy(srcfile, dstfile)
self.step = self.step + 1
if self.callbackfun and self.step%int(self.totalStep/50) == 0:
self.callbackfun(self.step,self.totalStep,fileList[index])
print("cocos2d\t\t: Done!")
# call process_proj from each platform's script folder
for platform in self.platforms_list:
self.__processPlatformProjects(platform)
print ("###New project has been created in this path: ")
print (self.context["dst_project_path"].replace("\\", "/"))
print ("Have Fun!")
return True
def __processPlatformProjects(self, platform):
""" Process each platform project.
Arg:
platform: "ios_mac", "android", "win32", "linux"
"""
# determine proj_path
proj_path = os.path.join(self.context["dst_project_path"], "proj." + platform)
java_package_path = ""
# read json config file for the current platform
conf_path = os.path.join(self.context["script_dir"], "%s.json" % platform)
f = open(conf_path)
data = json.load(f)
# rename package path, like "org.cocos2dx.hello" to "com.company.game". This is a special process for android
if platform == "android":
src_pkg = self.context["src_package_name"].split('.')
dst_pkg = self.context["dst_package_name"].split('.')
java_package_path = os.path.join(*dst_pkg)
# rename files and folders
for item in data["rename"]:
tmp = item.replace("PACKAGE_PATH", java_package_path)
src = tmp.replace("PROJECT_NAME", self.context["src_project_name"])
dst = tmp.replace("PROJECT_NAME", self.context["dst_project_name"])
if os.path.exists(os.path.join(proj_path, src)):
os.rename(os.path.join(proj_path, src), os.path.join(proj_path, dst))
# remove useless files and folders
for item in data["remove"]:
dst = item.replace("PROJECT_NAME", self.context["dst_project_name"])
if os.path.exists(os.path.join(proj_path, dst)):
shutil.rmtree(os.path.join(proj_path, dst))
# rename package_name. This should be replaced at first. Don't change this sequence
for item in data["replace_package_name"]:
tmp = item.replace("PACKAGE_PATH", java_package_path)
dst = tmp.replace("PROJECT_NAME", self.context["dst_project_name"])
if os.path.exists(os.path.join(proj_path, dst)):
replaceString(os.path.join(proj_path, dst), self.context["src_package_name"], self.context["dst_package_name"])
# rename project_name
for item in data["replace_project_name"]:
tmp = item.replace("PACKAGE_PATH", java_package_path)
dst = tmp.replace("PROJECT_NAME", self.context["dst_project_name"])
if os.path.exists(os.path.join(proj_path, dst)):
replaceString(os.path.join(proj_path, dst), self.context["src_project_name"], self.context["dst_project_name"])
# done!
showMsg = "proj.%s\t\t: Done!" % platform
self.step += 1
if self.callbackfun:
self.callbackfun(self.step,self.totalStep,showMsg)
print (showMsg)
# end of processPlatformProjects
# -------------- main --------------
# dump argvs
# print sys.argv
if __name__ == '__main__':
project = CocosProject()
name, package, language, path = project.checkParams()
project.createPlatformProjects(name, package, language, path)
| mit |
sirex/Misago | misago/users/tests/test_lists_views.py | 8 | 3258 | from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from misago.acl.testutils import override_acl
from misago.users.models import Rank
from misago.users.testutils import AuthenticatedUserTestCase
class UsersListTestCase(AuthenticatedUserTestCase):
def setUp(self):
super(UsersListTestCase, self).setUp()
override_acl(self.user, {
'can_browse_users_list': 1,
})
class UsersListLanderTests(UsersListTestCase):
def test_lander_no_permission(self):
"""lander returns 403 if user has no permission"""
override_acl(self.user, {
'can_browse_users_list': 0,
})
response = self.client.get(reverse('misago:users'))
self.assertEqual(response.status_code, 403)
def test_lander_redirect(self):
"""lander returns redirect to valid page if user has permission"""
response = self.client.get(reverse('misago:users'))
self.assertEqual(response.status_code, 302)
self.assertTrue(response['location'].endswith(
reverse('misago:users_active_posters')))
class ActivePostersTests(UsersListTestCase):
def test_active_posters_list(self):
"""active posters page has no showstoppers"""
view_link = reverse('misago:users_active_posters')
response = self.client.get(view_link)
self.assertEqual(response.status_code, 200)
# Create 200 test users and see if errors appeared
User = get_user_model()
for i in xrange(200):
User.objects.create_user('Bob%s' % i, 'm%s@te.com' % i, 'Pass.123',
posts=12345)
response = self.client.get(view_link)
self.assertEqual(response.status_code, 200)
for page in xrange(2, 6):
response = self.client.get(reverse('misago:users_active_posters',
kwargs={'page': page}))
self.assertEqual(response.status_code, 200)
class OnlineUsersTests(UsersListTestCase):
def test_no_permission(self):
"""online list returns 403 if user has no permission"""
override_acl(self.user, {
'can_browse_users_list': 1,
'can_see_users_online_list': 0,
})
response = self.client.get(reverse('misago:users_online'))
self.assertEqual(response.status_code, 403)
def test_with_permission(self):
"""online list returns 200 if user has permission"""
override_acl(self.user, {
'can_browse_users_list': 1,
'can_see_users_online_list': 1,
})
response = self.client.get(reverse('misago:users_online'))
self.assertEqual(response.status_code, 200)
class UsersRankTests(UsersListTestCase):
def test_ranks(self):
"""ranks lists are handled correctly"""
for rank in Rank.objects.iterator():
rank_link = reverse('misago:users_rank',
kwargs={'rank_slug': rank.slug})
response = self.client.get(rank_link)
if rank.is_tab:
self.assertEqual(response.status_code, 200)
else:
self.assertEqual(response.status_code, 404)
| gpl-2.0 |
dednal/chromium.src | third_party/pycoverage/coverage/execfile.py | 209 | 5865 | """Execute files of Python code."""
import imp, marshal, os, sys
from coverage.backward import exec_code_object, open_source
from coverage.misc import ExceptionDuringRun, NoCode, NoSource
try:
# In Py 2.x, the builtins were in __builtin__
BUILTINS = sys.modules['__builtin__']
except KeyError:
# In Py 3.x, they're in builtins
BUILTINS = sys.modules['builtins']
def rsplit1(s, sep):
"""The same as s.rsplit(sep, 1), but works in 2.3"""
parts = s.split(sep)
return sep.join(parts[:-1]), parts[-1]
def run_python_module(modulename, args):
"""Run a python module, as though with ``python -m name args...``.
`modulename` is the name of the module, possibly a dot-separated name.
`args` is the argument array to present as sys.argv, including the first
element naming the module being executed.
"""
openfile = None
glo, loc = globals(), locals()
try:
try:
# Search for the module - inside its parent package, if any - using
# standard import mechanics.
if '.' in modulename:
packagename, name = rsplit1(modulename, '.')
package = __import__(packagename, glo, loc, ['__path__'])
searchpath = package.__path__
else:
packagename, name = None, modulename
searchpath = None # "top-level search" in imp.find_module()
openfile, pathname, _ = imp.find_module(name, searchpath)
# Complain if this is a magic non-file module.
if openfile is None and pathname is None:
raise NoSource(
"module does not live in a file: %r" % modulename
)
# If `modulename` is actually a package, not a mere module, then we
# pretend to be Python 2.7 and try running its __main__.py script.
if openfile is None:
packagename = modulename
name = '__main__'
package = __import__(packagename, glo, loc, ['__path__'])
searchpath = package.__path__
openfile, pathname, _ = imp.find_module(name, searchpath)
except ImportError:
_, err, _ = sys.exc_info()
raise NoSource(str(err))
finally:
if openfile:
openfile.close()
# Finally, hand the file off to run_python_file for execution.
pathname = os.path.abspath(pathname)
args[0] = pathname
run_python_file(pathname, args, package=packagename)
def run_python_file(filename, args, package=None):
"""Run a python file as if it were the main program on the command line.
`filename` is the path to the file to execute, it need not be a .py file.
`args` is the argument array to present as sys.argv, including the first
element naming the file being executed. `package` is the name of the
enclosing package, if any.
"""
# Create a module to serve as __main__
old_main_mod = sys.modules['__main__']
main_mod = imp.new_module('__main__')
sys.modules['__main__'] = main_mod
main_mod.__file__ = filename
if package:
main_mod.__package__ = package
main_mod.__builtins__ = BUILTINS
# Set sys.argv properly.
old_argv = sys.argv
sys.argv = args
try:
# Make a code object somehow.
if filename.endswith(".pyc") or filename.endswith(".pyo"):
code = make_code_from_pyc(filename)
else:
code = make_code_from_py(filename)
# Execute the code object.
try:
exec_code_object(code, main_mod.__dict__)
except SystemExit:
# The user called sys.exit(). Just pass it along to the upper
# layers, where it will be handled.
raise
except:
# Something went wrong while executing the user code.
# Get the exc_info, and pack them into an exception that we can
# throw up to the outer loop. We peel two layers off the traceback
# so that the coverage.py code doesn't appear in the final printed
# traceback.
typ, err, tb = sys.exc_info()
raise ExceptionDuringRun(typ, err, tb.tb_next.tb_next)
finally:
# Restore the old __main__
sys.modules['__main__'] = old_main_mod
# Restore the old argv and path
sys.argv = old_argv
def make_code_from_py(filename):
"""Get source from `filename` and make a code object of it."""
# Open the source file.
try:
source_file = open_source(filename)
except IOError:
raise NoSource("No file to run: %r" % filename)
try:
source = source_file.read()
finally:
source_file.close()
# We have the source. `compile` still needs the last line to be clean,
# so make sure it is, then compile a code object from it.
if not source or source[-1] != '\n':
source += '\n'
code = compile(source, filename, "exec")
return code
def make_code_from_pyc(filename):
"""Get a code object from a .pyc file."""
try:
fpyc = open(filename, "rb")
except IOError:
raise NoCode("No file to run: %r" % filename)
try:
# First four bytes are a version-specific magic number. It has to
# match or we won't run the file.
magic = fpyc.read(4)
if magic != imp.get_magic():
raise NoCode("Bad magic number in .pyc file")
# Skip the junk in the header that we don't need.
fpyc.read(4) # Skip the moddate.
if sys.version_info >= (3, 3):
# 3.3 added another long to the header (size), skip it.
fpyc.read(4)
# The rest of the file is the code object we want.
code = marshal.load(fpyc)
finally:
fpyc.close()
return code
| bsd-3-clause |
somic/paasta | paasta_tools/cli/cmds/local_run.py | 1 | 37744 | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import os
import socket
import sys
import time
import uuid
from os import execlp
from random import randint
from urllib.parse import urlparse
import requests
from docker import errors
from paasta_tools.adhoc_tools import get_default_interactive_config
from paasta_tools.chronos_tools import parse_time_variables
from paasta_tools.cli.cmds.check import makefile_responds_to
from paasta_tools.cli.cmds.cook_image import paasta_cook_image
from paasta_tools.cli.utils import figure_out_service_name
from paasta_tools.cli.utils import get_instance_config
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.cli.utils import list_instances
from paasta_tools.cli.utils import list_services
from paasta_tools.cli.utils import pick_random_port
from paasta_tools.long_running_service_tools import get_healthcheck_for_instance
from paasta_tools.paasta_execute_docker_command import execute_in_container
from paasta_tools.secret_tools import get_secret_provider
from paasta_tools.secret_tools import is_secret_ref
from paasta_tools.utils import _run
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import get_docker_client
from paasta_tools.utils import get_username
from paasta_tools.utils import list_clusters
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import NoDeploymentsAvailable
from paasta_tools.utils import NoDockerImageError
from paasta_tools.utils import paasta_print
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import PaastaNotConfiguredError
from paasta_tools.utils import SystemPaastaConfig
from paasta_tools.utils import timed_flock
from paasta_tools.utils import Timeout
from paasta_tools.utils import TimeoutError
from paasta_tools.utils import validate_service_instance
def perform_http_healthcheck(url, timeout):
"""Returns true if healthcheck on url succeeds, false otherwise
:param url: the healthcheck url
:param timeout: timeout in seconds
:returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise
"""
try:
with Timeout(seconds=timeout):
try:
res = requests.get(url, verify=False)
except requests.ConnectionError:
return (False, "http request failed: connection failed")
except TimeoutError:
return (False, "http request timed out after %d seconds" % timeout)
if 'content-type' in res.headers and ',' in res.headers['content-type']:
paasta_print(PaastaColors.yellow(
"Multiple content-type headers detected in response."
" The Mesos healthcheck system will treat this as a failure!",
))
return (False, "http request succeeded, code %d" % res.status_code)
# check if response code is valid per https://mesosphere.github.io/marathon/docs/health-checks.html
elif res.status_code >= 200 and res.status_code < 400:
return (True, "http request succeeded, code %d" % res.status_code)
else:
return (False, "http request failed, code %s" % str(res.status_code))
def perform_tcp_healthcheck(url, timeout):
"""Returns true if successfully connests to host and port, false otherwise
:param url: the healthcheck url (in the form tcp://host:port)
:param timeout: timeout in seconds
:returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise
"""
url_elem = urlparse(url)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
result = sock.connect_ex((url_elem.hostname, url_elem.port))
sock.close()
if result == 0:
return (True, "tcp connection succeeded")
else:
return (False, "%s (timeout %d seconds)" % (os.strerror(result), timeout))
def perform_cmd_healthcheck(docker_client, container_id, command, timeout):
"""Returns true if return code of command is 0 when executed inside container, false otherwise
:param docker_client: Docker client object
:param container_id: Docker container id
:param command: command to execute
:param timeout: timeout in seconds
:returns: True if command exits with return code 0, false otherwise
"""
(output, return_code) = execute_in_container(docker_client, container_id, command, timeout)
if return_code == 0:
return (True, output)
else:
return (False, output)
def run_healthcheck_on_container(
docker_client,
container_id,
healthcheck_mode,
healthcheck_data,
timeout,
):
"""Performs healthcheck on a container
:param container_id: Docker container id
:param healthcheck_mode: one of 'http', 'https', 'tcp', or 'cmd'
:param healthcheck_data: a URL when healthcheck_mode is 'http[s]' or 'tcp', a command if healthcheck_mode is 'cmd'
:param timeout: timeout in seconds for individual check
:returns: a tuple of (bool, output string)
"""
healthcheck_result = (False, "unknown")
if healthcheck_mode == 'cmd':
healthcheck_result = perform_cmd_healthcheck(docker_client, container_id, healthcheck_data, timeout)
elif healthcheck_mode == 'http' or healthcheck_mode == 'https':
healthcheck_result = perform_http_healthcheck(healthcheck_data, timeout)
elif healthcheck_mode == 'tcp':
healthcheck_result = perform_tcp_healthcheck(healthcheck_data, timeout)
else:
paasta_print(PaastaColors.yellow(
"Healthcheck mode '%s' is not currently supported!" % healthcheck_mode,
))
sys.exit(1)
return healthcheck_result
def simulate_healthcheck_on_service(
instance_config,
docker_client,
container_id,
healthcheck_mode,
healthcheck_data,
healthcheck_enabled,
):
"""Simulates Marathon-style healthcheck on given service if healthcheck is enabled
:param instance_config: service manifest
:param docker_client: Docker client object
:param container_id: Docker container id
:param healthcheck_data: tuple url to healthcheck
:param healthcheck_enabled: boolean
:returns: healthcheck_passed: boolean
"""
healthcheck_link = PaastaColors.cyan(healthcheck_data)
if healthcheck_enabled:
grace_period = instance_config.get_healthcheck_grace_period_seconds()
timeout = instance_config.get_healthcheck_timeout_seconds()
interval = instance_config.get_healthcheck_interval_seconds()
max_failures = instance_config.get_healthcheck_max_consecutive_failures()
paasta_print('\nStarting health check via %s (waiting %s seconds before '
'considering failures due to grace period):' % (healthcheck_link, grace_period))
# silenty start performing health checks until grace period ends or first check succeeds
graceperiod_end_time = time.time() + grace_period
after_grace_period_attempts = 0
while True:
# First inspect the container for early exits
container_state = docker_client.inspect_container(container_id)
if not container_state['State']['Running']:
paasta_print(
PaastaColors.red('Container exited with code {}'.format(
container_state['State']['ExitCode'],
)),
)
healthcheck_passed = False
break
healthcheck_passed, healthcheck_output = run_healthcheck_on_container(
docker_client, container_id, healthcheck_mode, healthcheck_data, timeout,
)
# Yay, we passed the healthcheck
if healthcheck_passed:
paasta_print("{}'{}' (via {})".format(
PaastaColors.green("Healthcheck succeeded!: "),
healthcheck_output,
healthcheck_link,
))
break
# Otherwise, print why we failed
if time.time() < graceperiod_end_time:
color = PaastaColors.grey
msg = '(disregarded due to grace period)'
extra_msg = ' (via: {}. Output: {})'.format(healthcheck_link, healthcheck_output)
else:
# If we've exceeded the grace period, we start incrementing attempts
after_grace_period_attempts += 1
color = PaastaColors.red
msg = '(Attempt {} of {})'.format(
after_grace_period_attempts, max_failures,
)
extra_msg = ' (via: {}. Output: {})'.format(healthcheck_link, healthcheck_output)
paasta_print('{}{}'.format(
color('Healthcheck failed! {}'.format(msg)),
extra_msg,
))
if after_grace_period_attempts == max_failures:
break
time.sleep(interval)
else:
paasta_print('\nPaaSTA would have healthchecked your service via\n%s' % healthcheck_link)
healthcheck_passed = True
return healthcheck_passed
def read_local_dockerfile_lines():
dockerfile = os.path.join(os.getcwd(), 'Dockerfile')
return open(dockerfile).readlines()
def add_subparser(subparsers):
list_parser = subparsers.add_parser(
'local-run',
help="Run service's Docker image locally",
description=(
"'paasta local-run' is useful for simulating how a PaaSTA service would be "
"executed on a real cluster. It analyzes the local soa-configs and constructs "
"a 'docker run' invocation to match. This is useful as a type of end-to-end "
"test, ensuring that a service will work inside the docker container as expected. "
"Additionally, 'local-run' can healthcheck a service per the configured healthcheck.\n\n"
"Alternatively, 'local-run' can be used with --pull, which will pull the currently "
"deployed docker image and use it, instead of building one."
),
epilog=(
"Note: 'paasta local-run' uses docker commands, which may require elevated privileges "
"to run (sudo)."
),
)
list_parser.add_argument(
'-s', '--service',
help='The name of the service you wish to inspect',
).completer = lazy_choices_completer(list_services)
list_parser.add_argument(
'-c', '--cluster',
help=(
"The name of the cluster you wish to simulate. "
"If omitted, uses the default cluster defined in the paasta local-run configs"
),
).completer = lazy_choices_completer(list_clusters)
list_parser.add_argument(
'-y', '--yelpsoa-config-root',
dest='yelpsoa_config_root',
help='A directory from which yelpsoa-configs should be read from',
default=DEFAULT_SOA_DIR,
)
build_pull_group = list_parser.add_mutually_exclusive_group()
build_pull_group.add_argument(
'-b', '--build',
help=(
"Build the docker image to run from scratch using the local Makefile's "
"'cook-image' target. Defaults to try to use the local Makefile if present."
),
action='store_const',
const='build',
dest='action',
)
build_pull_group.add_argument(
'-p', '--pull',
help=(
"Pull the docker image marked for deployment from the Docker registry and "
"use that for the local-run. This is the opposite of --build."
),
action='store_const',
const='pull',
dest='action',
)
build_pull_group.add_argument(
'-d', '--dry-run',
help='Shows the arguments supplied to docker as json.',
action='store_const',
const='dry_run',
dest='action',
)
build_pull_group.set_defaults(action='build')
list_parser.add_argument(
'--json-dict',
help='When running dry run, output the arguments as a json dict',
action='store_true',
dest='dry_run_json_dict',
)
list_parser.add_argument(
'-C', '--cmd',
help=(
'Run Docker container with particular command, '
'for example: "bash". By default will use the command or args specified by the '
'soa-configs or what was specified in the Dockerfile'
),
required=False,
default=None,
)
list_parser.add_argument(
'-i', '--instance',
help=("Simulate a docker run for a particular instance of the service, like 'main' or 'canary'"),
required=False,
default=None,
).completer = lazy_choices_completer(list_instances)
list_parser.add_argument(
'-v', '--verbose',
help='Show Docker commands output',
action='store_true',
required=False,
default=True,
)
list_parser.add_argument(
'-I', '--interactive',
help=(
'Run container in interactive mode. If interactive is set the default command will be "bash" '
'unless otherwise set by the "--cmd" flag'
),
action='store_true',
required=False,
default=False,
)
list_parser.add_argument(
'-k', '--no-healthcheck',
help='Disable simulated healthcheck',
dest='healthcheck',
action='store_false',
required=False,
default=True,
)
list_parser.add_argument(
'-t', '--healthcheck-only',
help='Terminates container after healthcheck (exits with status code 0 on success, 1 otherwise)',
dest='healthcheck_only',
action='store_true',
required=False,
default=False,
)
list_parser.add_argument(
'-o', '--port',
help='Specify a port number to use. If not set, a random non-conflicting port will be found.',
type=int,
dest='user_port',
required=False,
default=False,
)
list_parser.add_argument(
'--vault-auth-method',
help='Override how we auth with vault, defaults to token if not present',
type=str,
dest='vault_auth_method',
required=False,
default='token',
choices=['token', 'ldap'],
)
list_parser.add_argument(
'--vault-token-file',
help='Override vault token file, defaults to /root/.vault-token',
type=str,
dest='vault_token_file',
required=False,
default='/var/spool/.paasta_vault_token',
)
list_parser.add_argument(
'--skip-secrets',
help='Skip decrypting secrets, useful if running non-interactively',
dest='skip_secrets',
required=False,
action='store_true',
default=False,
)
list_parser.set_defaults(command=paasta_local_run)
def get_container_name():
return 'paasta_local_run_%s_%s' % (get_username(), randint(1, 999999))
def get_docker_run_cmd(
memory, chosen_port, container_port, container_name, volumes, env, interactive,
docker_hash, command, net, docker_params, detach,
):
cmd = ['paasta_docker_wrapper', 'run']
for k, v in env.items():
cmd.append('--env')
cmd.append('%s=%s' % (k, v))
cmd.append('--memory=%dm' % memory)
for i in docker_params:
cmd.append('--%s=%s' % (i['key'], i['value']))
if net == 'bridge' and container_port is not None:
cmd.append('--publish=%d:%d' % (chosen_port, container_port))
elif net == 'host':
cmd.append('--net=host')
cmd.append('--name=%s' % container_name)
for volume in volumes:
cmd.append('--volume=%s' % volume)
if interactive:
cmd.append('--interactive=true')
if sys.stdout.isatty():
cmd.append('--tty=true')
else:
if detach:
cmd.append('--detach=true')
cmd.append('%s' % docker_hash)
if command:
if isinstance(command, str):
cmd.extend(('sh', '-c', command))
else:
cmd.extend(command)
return cmd
class LostContainerException(Exception):
pass
def docker_pull_image(docker_url):
"""Pull an image via ``docker pull``. Uses the actual pull command instead of the python
bindings due to the docker auth/registry transition. Once we are past Docker 1.6
we can use better credential management, but for now this function assumes the
user running the command has already been authorized for the registry"""
paasta_print("Please wait while the image (%s) is pulled (times out after 30m)..." % docker_url, file=sys.stderr)
DEVNULL = open(os.devnull, 'wb')
with open('/tmp/paasta-local-run-pull.lock', 'w') as f:
with timed_flock(f, seconds=1800):
ret, output = _run('docker pull %s' % docker_url, stream=True, stdin=DEVNULL)
if ret != 0:
paasta_print(
"\nPull failed. Are you authorized to run docker commands?",
file=sys.stderr,
)
sys.exit(ret)
def get_container_id(docker_client, container_name):
"""Use 'docker_client' to find the container we started, identifiable by
its 'container_name'. If we can't find the id, raise
LostContainerException.
"""
containers = docker_client.containers(all=False)
for container in containers:
if '/%s' % container_name in container.get('Names', []):
return container.get('Id')
raise LostContainerException(
"Can't find the container I just launched so I can't do anything else.\n"
"Try docker 'ps --all | grep %s' to see where it went.\n"
"Here were all the containers:\n"
"%s" % (container_name, containers),
)
def _cleanup_container(docker_client, container_id):
if docker_client.inspect_container(container_id)['State'].get('OOMKilled', False):
paasta_print(
PaastaColors.red(
"Your service was killed by the OOM Killer!\n"
"You've exceeded the memory limit, try increasing the mem parameter in your soa_configs",
),
file=sys.stderr,
)
paasta_print("\nStopping and removing the old container %s..." % container_id)
paasta_print("(Please wait or you may leave an orphaned container.)")
try:
docker_client.stop(container_id)
docker_client.remove_container(container_id)
paasta_print("...done")
except errors.APIError:
paasta_print(PaastaColors.yellow(
"Could not clean up container! You should stop and remove container '%s' manually." % container_id,
))
def get_local_run_environment_vars(instance_config, port0, framework):
"""Returns a dictionary of environment variables to simulate what would be available to
a paasta service running in a container"""
hostname = socket.getfqdn()
docker_image = instance_config.get_docker_image()
if docker_image == '':
# In a local_run environment, the docker_image may not be available
# so we can fall-back to the injected DOCKER_TAG per the paasta contract
docker_image = os.environ['DOCKER_TAG']
fake_taskid = uuid.uuid4()
env = {
'HOST': hostname,
'MESOS_SANDBOX': '/mnt/mesos/sandbox',
'MESOS_CONTAINER_NAME': 'localrun-%s' % fake_taskid,
'MESOS_TASK_ID': str(fake_taskid),
'PAASTA_DOCKER_IMAGE': docker_image,
}
if framework == 'marathon':
env['MARATHON_PORT'] = str(port0)
env['MARATHON_PORT0'] = str(port0)
env['MARATHON_PORTS'] = str(port0)
env['MARATHON_PORT_%d' % instance_config.get_container_port()] = str(port0)
env['MARATHON_APP_VERSION'] = 'simulated_marathon_app_version'
env['MARATHON_APP_RESOURCE_CPUS'] = str(instance_config.get_cpus())
env['MARATHON_APP_DOCKER_IMAGE'] = docker_image
env['MARATHON_APP_RESOURCE_MEM'] = str(instance_config.get_mem())
env['MARATHON_APP_RESOURCE_DISK'] = str(instance_config.get_disk())
env['MARATHON_APP_LABELS'] = ""
env['MARATHON_APP_ID'] = '/simulated_marathon_app_id'
env['MARATHON_HOST'] = hostname
elif framework == 'chronos':
env['CHRONOS_RESOURCE_DISK'] = str(instance_config.get_disk())
env['CHRONOS_RESOURCE_CPU'] = str(instance_config.get_cpus())
env['CHRONOS_RESOURCE_MEM'] = str(instance_config.get_mem())
env['CHRONOS_JOB_OWNER'] = 'simulated-owner'
env['CHRONOS_JOB_RUN_TIME'] = str(int(time.time()))
env['CHRONOS_JOB_NAME'] = "%s %s" % (instance_config.get_service(), instance_config.get_instance())
env['CHRONOS_JOB_RUN_ATTEMPT'] = str(0)
env['mesos_task_id'] = 'ct:simulated-task-id'
return env
def check_if_port_free(port):
temp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
temp_socket.bind(("127.0.0.1", port))
except socket.error:
return False
finally:
temp_socket.close()
return True
def decrypt_secret_environment_variables(
secret_provider_name,
environment,
soa_dir,
service_name,
cluster_name,
secret_provider_kwargs,
):
secret_environment = {}
secret_env_vars = {k: v for k, v in environment.items() if is_secret_ref(v)}
if secret_env_vars:
secret_provider = get_secret_provider(
secret_provider_name=secret_provider_name,
soa_dir=soa_dir,
service_name=service_name,
cluster_names=[cluster_name],
secret_provider_kwargs=secret_provider_kwargs,
)
secret_environment = secret_provider.decrypt_environment(
secret_env_vars,
)
return secret_environment
def run_docker_container(
docker_client,
service,
instance,
docker_hash,
volumes,
interactive,
command,
healthcheck,
healthcheck_only,
user_port,
instance_config,
secret_provider_name,
soa_dir=DEFAULT_SOA_DIR,
dry_run=False,
json_dict=False,
framework=None,
secret_provider_kwargs={},
skip_secrets=False,
):
"""docker-py has issues running a container with a TTY attached, so for
consistency we execute 'docker run' directly in both interactive and
non-interactive modes.
In non-interactive mode when the run is complete, stop the container and
remove it (with docker-py).
"""
if user_port:
if check_if_port_free(user_port):
chosen_port = user_port
else:
paasta_print(
PaastaColors.red(
"The chosen port is already in use!\n"
"Try specifying another one, or omit (--port|-o) and paasta will find a free one for you",
),
file=sys.stderr,
)
sys.exit(1)
else:
chosen_port = pick_random_port(service)
environment = instance_config.get_env_dictionary()
if not skip_secrets:
secret_environment = decrypt_secret_environment_variables(
secret_provider_name=secret_provider_name,
environment=environment,
soa_dir=soa_dir,
service_name=service,
cluster_name=instance_config.cluster,
secret_provider_kwargs=secret_provider_kwargs,
)
environment.update(secret_environment)
local_run_environment = get_local_run_environment_vars(
instance_config=instance_config,
port0=chosen_port,
framework=framework,
)
environment.update(local_run_environment)
net = instance_config.get_net()
memory = instance_config.get_mem()
container_name = get_container_name()
docker_params = instance_config.format_docker_parameters()
healthcheck_mode, healthcheck_data = get_healthcheck_for_instance(
service, instance, instance_config, chosen_port, soa_dir=soa_dir,
)
if healthcheck_mode is None:
container_port = None
interactive = True
elif not user_port and not healthcheck and not healthcheck_only:
container_port = None
else:
try:
container_port = instance_config.get_container_port()
except AttributeError:
container_port = None
simulate_healthcheck = (healthcheck_only or healthcheck) and healthcheck_mode is not None
docker_run_args = dict(
memory=memory,
chosen_port=chosen_port,
container_port=container_port,
container_name=container_name,
volumes=volumes,
env=environment,
interactive=interactive,
detach=simulate_healthcheck,
docker_hash=docker_hash,
command=command,
net=net,
docker_params=docker_params,
)
docker_run_cmd = get_docker_run_cmd(**docker_run_args)
joined_docker_run_cmd = ' '.join(docker_run_cmd)
if dry_run:
if json_dict:
paasta_print(json.dumps(docker_run_args))
else:
paasta_print(json.dumps(docker_run_cmd))
return 0
else:
paasta_print('Running docker command:\n%s' % PaastaColors.grey(joined_docker_run_cmd))
if interactive or not simulate_healthcheck:
# NOTE: This immediately replaces us with the docker run cmd. Docker
# run knows how to clean up the running container in this situation.
execlp('paasta_docker_wrapper', *docker_run_cmd)
# For testing, when execlp is patched out and doesn't replace us, we
# still want to bail out.
return 0
container_started = False
container_id = None
try:
(returncode, output) = _run(docker_run_cmd)
if returncode != 0:
paasta_print(
'Failure trying to start your container!'
'Returncode: %d'
'Output:'
'%s'
''
'Fix that problem and try again.'
'http://y/paasta-troubleshooting'
% (returncode, output),
sep='\n',
)
# Container failed to start so no need to cleanup; just bail.
sys.exit(1)
container_started = True
container_id = get_container_id(docker_client, container_name)
paasta_print('Found our container running with CID %s' % container_id)
if simulate_healthcheck:
healthcheck_result = simulate_healthcheck_on_service(
instance_config=instance_config,
docker_client=docker_client,
container_id=container_id,
healthcheck_mode=healthcheck_mode,
healthcheck_data=healthcheck_data,
healthcheck_enabled=healthcheck,
)
def _output_stdout_and_exit_code():
returncode = docker_client.inspect_container(container_id)['State']['ExitCode']
paasta_print('Container exited: %d)' % returncode)
paasta_print('Here is the stdout and stderr:\n\n')
paasta_print(
docker_client.attach(container_id, stderr=True, stream=False, logs=True),
)
if healthcheck_only:
if container_started:
_output_stdout_and_exit_code()
_cleanup_container(docker_client, container_id)
if healthcheck_mode is None:
paasta_print('--healthcheck-only, but no healthcheck is defined for this instance!')
sys.exit(1)
elif healthcheck_result is True:
sys.exit(0)
else:
sys.exit(1)
running = docker_client.inspect_container(container_id)['State']['Running']
if running:
paasta_print('Your service is now running! Tailing stdout and stderr:')
for line in docker_client.attach(container_id, stderr=True, stream=True, logs=True):
paasta_print(line)
else:
_output_stdout_and_exit_code()
returncode = 3
except KeyboardInterrupt:
returncode = 3
# Cleanup if the container exits on its own or interrupted.
if container_started:
returncode = docker_client.inspect_container(container_id)['State']['ExitCode']
_cleanup_container(docker_client, container_id)
return returncode
def command_function_for_framework(framework):
"""
Given a framework, return a function that appropriately formats
the command to be run.
"""
def format_marathon_command(cmd):
return cmd
def format_chronos_command(cmd):
interpolated_command = parse_time_variables(cmd, datetime.datetime.now())
return interpolated_command
def format_adhoc_command(cmd):
return cmd
if framework == 'chronos':
return format_chronos_command
elif framework == 'marathon':
return format_marathon_command
elif framework == 'adhoc':
return format_adhoc_command
else:
raise ValueError("Invalid Framework")
def configure_and_run_docker_container(
docker_client,
docker_hash,
service,
instance,
cluster,
system_paasta_config,
args,
pull_image=False,
dry_run=False,
):
"""
Run Docker container by image hash with args set in command line.
Function prints the output of run command in stdout.
"""
if instance is None and args.healthcheck_only:
paasta_print(
"With --healthcheck-only, --instance MUST be provided!",
file=sys.stderr,
)
return 1
if instance is None and not sys.stdin.isatty():
paasta_print(
"--instance and --cluster must be specified when using paasta local-run without a tty!",
file=sys.stderr,
)
return 1
soa_dir = args.yelpsoa_config_root
volumes = list()
load_deployments = docker_hash is None or pull_image
interactive = args.interactive
try:
if instance is None:
instance_type = 'adhoc'
instance = 'interactive'
instance_config = get_default_interactive_config(
service=service,
cluster=cluster,
soa_dir=soa_dir,
load_deployments=load_deployments,
)
interactive = True
else:
instance_type = validate_service_instance(service, instance, cluster, soa_dir)
instance_config = get_instance_config(
service=service,
instance=instance,
cluster=cluster,
load_deployments=load_deployments,
soa_dir=soa_dir,
)
except NoConfigurationForServiceError as e:
paasta_print(str(e), file=sys.stderr)
return 1
except NoDeploymentsAvailable:
paasta_print(
PaastaColors.red(
"Error: No deployments.json found in %(soa_dir)s/%(service)s."
"You can generate this by running:"
"generate_deployments_for_service -d %(soa_dir)s -s %(service)s" % {
'soa_dir': soa_dir,
'service': service,
},
),
sep='\n',
file=sys.stderr,
)
return 1
if docker_hash is None:
try:
docker_url = instance_config.get_docker_url()
except NoDockerImageError:
paasta_print(
PaastaColors.red(
"Error: No sha has been marked for deployment for the %s deploy group.\n"
"Please ensure this service has either run through a jenkins pipeline "
"or paasta mark-for-deployment has been run for %s\n" % (
instance_config.get_deploy_group(), service,
),
),
sep='',
file=sys.stderr,
)
return 1
docker_hash = docker_url
if pull_image:
docker_pull_image(docker_url)
for volume in instance_config.get_volumes(system_paasta_config.get_volumes()):
if os.path.exists(volume['hostPath']):
volumes.append('%s:%s:%s' % (volume['hostPath'], volume['containerPath'], volume['mode'].lower()))
else:
paasta_print(
PaastaColors.yellow(
"Warning: Path %s does not exist on this host. Skipping this binding." % volume['hostPath'],
),
)
if interactive is True and args.cmd is None:
command = 'bash'
elif args.cmd:
command = args.cmd
else:
command_from_config = instance_config.get_cmd()
if command_from_config:
command_modifier = command_function_for_framework(instance_type)
command = command_modifier(command_from_config)
else:
command = instance_config.get_args()
secret_provider_kwargs = {
'vault_cluster_config': system_paasta_config.get_vault_cluster_config(),
'vault_auth_method': args.vault_auth_method,
'vault_token_file': args.vault_token_file,
}
return run_docker_container(
docker_client=docker_client,
service=service,
instance=instance,
docker_hash=docker_hash,
volumes=volumes,
interactive=interactive,
command=command,
healthcheck=args.healthcheck,
healthcheck_only=args.healthcheck_only,
user_port=args.user_port,
instance_config=instance_config,
soa_dir=args.yelpsoa_config_root,
dry_run=dry_run,
json_dict=args.dry_run_json_dict,
framework=instance_type,
secret_provider_name=system_paasta_config.get_secret_provider_name(),
secret_provider_kwargs=secret_provider_kwargs,
skip_secrets=args.skip_secrets,
)
def docker_config_available():
home = os.path.expanduser('~')
oldconfig = os.path.join(home, ".dockercfg")
newconfig = os.path.join(home, ".docker", "config.json")
return (os.path.isfile(oldconfig) and os.access(oldconfig, os.R_OK)) or \
(os.path.isfile(newconfig) and os.access(newconfig, os.R_OK))
def paasta_local_run(args):
if args.action == 'pull' and os.geteuid() != 0 and not docker_config_available():
paasta_print("Re-executing paasta local-run --pull with sudo..")
os.execvp("sudo", ["sudo", "-H"] + sys.argv)
if args.action == 'build' and not makefile_responds_to('cook-image'):
paasta_print("A local Makefile with a 'cook-image' target is required for --build", file=sys.stderr)
paasta_print("If you meant to pull the docker image from the registry, explicitly pass --pull", file=sys.stderr)
return 1
try:
system_paasta_config = load_system_paasta_config()
except PaastaNotConfiguredError:
paasta_print(
PaastaColors.yellow(
"Warning: Couldn't load config files from '/etc/paasta'. This indicates"
"PaaSTA is not configured locally on this host, and local-run may not behave"
"the same way it would behave on a server configured for PaaSTA.",
),
sep='\n',
)
system_paasta_config = SystemPaastaConfig({"volumes": []}, '/etc/paasta')
local_run_config = system_paasta_config.get_local_run_config()
service = figure_out_service_name(args, soa_dir=args.yelpsoa_config_root)
if args.cluster:
cluster = args.cluster
else:
try:
cluster = local_run_config['default_cluster']
except KeyError:
paasta_print(
PaastaColors.red(
"PaaSTA on this machine has not been configured with a default cluster."
"Please pass one to local-run using '-c'.",
),
sep='\n',
file=sys.stderr,
)
return 1
instance = args.instance
docker_client = get_docker_client()
if args.action == 'build':
default_tag = 'paasta-local-run-%s-%s' % (service, get_username())
tag = os.environ.get('DOCKER_TAG', default_tag)
os.environ['DOCKER_TAG'] = tag
pull_image = False
cook_return = paasta_cook_image(args=None, service=service, soa_dir=args.yelpsoa_config_root)
if cook_return != 0:
return cook_return
elif args.action == 'dry_run':
pull_image = False
tag = None
else:
pull_image = True
tag = None
try:
return configure_and_run_docker_container(
docker_client=docker_client,
docker_hash=tag,
service=service,
instance=instance,
cluster=cluster,
args=args,
pull_image=pull_image,
system_paasta_config=system_paasta_config,
dry_run=args.action == 'dry_run',
)
except errors.APIError as e:
paasta_print(
'Can\'t run Docker container. Error: %s' % str(e),
file=sys.stderr,
)
return 1
| apache-2.0 |
defcello/PyYAUL.Base | pyyaul/base/timing.py | 1 | 1204 | #! /usr/bin/env python3.7
from enum import Enum
from calendar import timegm
import time
TIMESTAMPFORMATS = Enum('TIMESTAMPFORMATS',
(
'DATETIMESEC_DOTS',
'MMDDYYYY_SLASHES',
),
)
def timestamp(t=None, f=TIMESTAMPFORMATS.DATETIMESEC_DOTS):
"""
@param t `float` seconds since epoch as returned by `time.time`. If `None`, will use the
current time.
@return `str` timestamp of the format "YYYY.mm.dd.HH.MM.SS".
"""
if t is None:
t = time.time()
if f is TIMESTAMPFORMATS.DATETIMESEC_DOTS:
tStr = time.strftime('%Y.%m.%d.%H.%M.%S')
elif f is TIMESTAMPFORMATS.MMDDYYYY_SLASHES:
tStr = time.strftime('%m/%d/%Y')
else:
raise ValueError(f'Unexpected format: f={f!r}')
return tStr
def timestampParse(tStr, f=TIMESTAMPFORMATS.DATETIMESEC_DOTS):
"""
@param t `float` seconds since epoch as returned by `time.time`. If `None`, will use the
current time.
@return `str` timestamp of the format "YYYY.mm.dd.HH.MM.SS".
"""
if f is TIMESTAMPFORMATS.DATETIMESEC_DOTS:
t = timegm(time.strptime(tStr, '%Y.%m.%d.%H.%M.%S'))
elif f is TIMESTAMPFORMATS.MMDDYYYY_SLASHES:
t = timegm(time.strptime(tStr, '%m/%d/%Y'))
else:
raise ValueError(f'Unexpected format: f={f!r}')
return t
| mit |
karulis/lightblue-0.4 | build/lib/lightblue/_lightbluecommon.py | 179 | 10831 | # Copyright (c) 2009 Bea Lam. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
# Defines attributes with common implementations across the different
# platforms.
# public attributes
__all__ = ("L2CAP", "RFCOMM", "OBEX", "BluetoothError", "splitclass")
# Protocol/service class types, used for sockets and advertising services
L2CAP, RFCOMM, OBEX = (10, 11, 12)
class BluetoothError(IOError):
"""
Generic exception raised for Bluetooth errors. This is not raised for
socket-related errors; socket objects raise the socket.error and
socket.timeout exceptions from the standard library socket module.
Note that error codes are currently platform-independent. In particular,
the Mac OS X implementation returns IOReturn error values from the IOKit
framework, and OBEXError codes from <IOBluetooth/OBEX.h> for OBEX operations.
"""
pass
def splitclass(classofdevice):
"""
Splits the given class of device to return a 3-item tuple with the
major service class, major device class and minor device class values.
These values indicate the device's major services and the type of the
device (e.g. mobile phone, laptop, etc.). If you google for
"assigned numbers bluetooth baseband" you might find some documents
that discuss how to extract this information from the class of device.
Example:
>>> splitclass(1057036)
(129, 1, 3)
>>>
"""
if not isinstance(classofdevice, int):
try:
classofdevice = int(classofdevice)
except (TypeError, ValueError):
raise TypeError("Given device class '%s' cannot be split" % \
str(classofdevice))
data = classofdevice >> 2 # skip over the 2 "format" bits
service = data >> 11
major = (data >> 6) & 0x1F
minor = data & 0x3F
return (service, major, minor)
_validbtaddr = None
def _isbtaddr(address):
"""
Returns whether the given address is a valid bluetooth address.
For example, "00:0e:6d:7b:a2:0a" is a valid address.
Returns False if the argument is None or is not a string.
"""
# Define validity regex. Accept either ":" or "-" as separators.
global _validbtaddr
if _validbtaddr is None:
import re
_validbtaddr = re.compile("((\d|[a-f]){2}(:|-)){5}(\d|[a-f]){2}",
re.IGNORECASE)
import types
if not isinstance(address, types.StringTypes):
return False
return _validbtaddr.match(address) is not None
# --------- other attributes ---------
def _joinclass(codtuple):
"""
The opposite of splitclass(). Joins a (service, major, minor) class-of-
device tuple into a whole class of device value.
"""
if not isinstance(codtuple, tuple):
raise TypeError("argument must be tuple, was %s" % type(codtuple))
if len(codtuple) != 3:
raise ValueError("tuple must have 3 items, has %d" % len(codtuple))
serviceclass = codtuple[0] << 2 << 11
majorclass = codtuple[1] << 2 << 6
minorclass = codtuple[2] << 2
return (serviceclass | majorclass | minorclass)
# Docstrings for socket objects.
# Based on std lib socket docs.
_socketdocs = {
"accept":
"""
accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket representing the
connection, and the address of the client. For RFCOMM sockets, the address
info is a pair (hostaddr, channel).
The socket must be bound and listening before calling this method.
""",
"bind":
"""
bind(address)
Bind the socket to a local address. For RFCOMM sockets, the address is a
pair (host, channel); the host must refer to the local host.
A port value of 0 binds the socket to a dynamically assigned port.
(Note that on Mac OS X, the port value must always be 0.)
The socket must not already be bound.
""",
"close":
"""
close()
Close the socket. It cannot be used after this call.
""",
"connect":
"""
connect(address)
Connect the socket to a remote address. The address should be a
(host, channel) pair for RFCOMM sockets, and a (host, PSM) pair for L2CAP
sockets.
The socket must not be already connected.
""",
"connect_ex":
"""
connect_ex(address) -> errno
This is like connect(address), but returns an error code instead of raising
an exception when an error occurs.
""",
"dup":
"""
dup() -> socket object
Returns a new socket object connected to the same system resource.
""",
"fileno":
"""
fileno() -> integer
Return the integer file descriptor of the socket.
Raises NotImplementedError on Mac OS X and Python For Series 60.
""",
"getpeername":
"""
getpeername() -> address info
Return the address of the remote endpoint. The address info is a
(host, channel) pair for RFCOMM sockets, and a (host, PSM) pair for L2CAP
sockets.
If the socket has not been connected, socket.error will be raised.
""",
"getsockname":
"""
getsockname() -> address info
Return the address of the local endpoint. The address info is a
(host, channel) pair for RFCOMM sockets, and a (host, PSM) pair for L2CAP
sockets.
If the socket has not been connected nor bound, this returns the tuple
("00:00:00:00:00:00", 0).
""",
"getsockopt":
"""
getsockopt(level, option[, bufsize]) -> value
Get a socket option. See the Unix manual for level and option.
If a nonzero buffersize argument is given, the return value is a
string of that length; otherwise it is an integer.
Currently support for socket options are platform independent -- i.e.
depends on the underlying Series 60 or BlueZ socket options support.
The Mac OS X implementation currently does not support any options at
all and automatically raises socket.error.
""",
"gettimeout":
"""
gettimeout() -> timeout
Returns the timeout in floating seconds associated with socket
operations. A timeout of None indicates that timeouts on socket
operations are disabled.
Currently not supported on Python For Series 60 implementation, which
will always return None.
""",
"listen":
"""
listen(backlog)
Enable a server to accept connections. The backlog argument must be at
least 1; it specifies the number of unaccepted connection that the system
will allow before refusing new connections.
The socket must not be already listening.
Currently not implemented on Mac OS X.
""",
"makefile":
"""
makefile([mode[, bufsize]]) -> file object
Returns a regular file object corresponding to the socket. The mode
and bufsize arguments are as for the built-in open() function.
""",
"recv":
"""
recv(bufsize[, flags]) -> data
Receive up to bufsize bytes from the socket. For the optional flags
argument, see the Unix manual. When no data is available, block until
at least one byte is available or until the remote end is closed. When
the remote end is closed and all data is read, return the empty string.
Currently the flags argument has no effect on Mac OS X.
""",
"recvfrom":
"""
recvfrom(bufsize[, flags]) -> (data, address info)
Like recv(buffersize, flags) but also return the sender's address info.
""",
"send":
"""
send(data[, flags]) -> count
Send a data string to the socket. For the optional flags
argument, see the Unix manual. Return the number of bytes
sent.
The socket must be connected to a remote socket.
Currently the flags argument has no effect on Mac OS X.
""",
"sendall":
"""
sendall(data[, flags])
Send a data string to the socket. For the optional flags
argument, see the Unix manual. This calls send() repeatedly
until all data is sent. If an error occurs, it's impossible
to tell how much data has been sent.
""",
"sendto":
"""
sendto(data[, flags], address) -> count
Like send(data, flags) but allows specifying the destination address.
For RFCOMM sockets, the address is a pair (hostaddr, channel).
""",
"setblocking":
"""
setblocking(flag)
Set the socket to blocking (flag is true) or non-blocking (false).
setblocking(True) is equivalent to settimeout(None);
setblocking(False) is equivalent to settimeout(0.0).
Initially a socket is in blocking mode. In non-blocking mode, if a
socket operation cannot be performed immediately, socket.error is raised.
The underlying implementation on Python for Series 60 only supports
non-blocking mode for send() and recv(), and ignores it for connect() and
accept().
""",
"setsockopt":
"""
setsockopt(level, option, value)
Set a socket option. See the Unix manual for level and option.
The value argument can either be an integer or a string.
Currently support for socket options are platform independent -- i.e.
depends on the underlying Series 60 or BlueZ socket options support.
The Mac OS X implementation currently does not support any options at
all and automatically raise socket.error.
""",
"settimeout":
"""
settimeout(timeout)
Set a timeout on socket operations. 'timeout' can be a float,
giving in seconds, or None. Setting a timeout of None disables
the timeout feature and is equivalent to setblocking(1).
Setting a timeout of zero is the same as setblocking(0).
If a timeout is set, the connect, accept, send and receive operations will
raise socket.timeout if a timeout occurs.
Raises NotImplementedError on Python For Series 60.
""",
"shutdown":
"""
shutdown(how)
Shut down the reading side of the socket (flag == socket.SHUT_RD), the
writing side of the socket (flag == socket.SHUT_WR), or both ends
(flag == socket.SHUT_RDWR).
"""
}
| gpl-3.0 |
JackpotClavin/android_kernel_samsung_venturi | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
ibarbech/learnbot | learnbot_dsl/Clients/EBO.py | 1 | 5055 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from learnbot_dsl.Clients.Client import *
from learnbot_dsl.Clients.Devices import *
import os, Ice, numpy as np, PIL.Image as Image, io, cv2, paho.mqtt.client
import learnbot_dsl.Clients.Devices as Devices
from learnbot_dsl.functions import getFuntions
from learnbot_dsl import PATHINTERFACES
ROBOCOMP = ''
try:
ROBOCOMP = os.environ['ROBOCOMP']
except KeyError:
print('$ROBOCOMP environment variable not set, using the default value /opt/robocomp')
ROBOCOMP = os.path.join('opt', 'robocomp')
ICEs = ["Laser.ice", "DifferentialRobot.ice", "JointMotor.ice", "EmotionalMotor.ice", "GenericBase.ice"]
icePaths = []
icePaths.append()PATHINTERFACES
for ice in ICEs:
for p in icePaths:
if os.path.isfile(os.path.join(p, ice)):
wholeStr = ' -I' + p + " --all " + os.path.join(p, ice)
Ice.loadSlice(wholeStr)
break
import RoboCompLaser, RoboCompDifferentialRobot, RoboCompJointMotor, RoboCompGenericBase, RoboCompEmotionalMotor
class Robot(Client):
devicesAvailables = ["base", "camera", "display", "distancesensors", "jointmotor"]
def __init__(self):
Client.__init__(self)
try:
# self.client = paho.mqtt.client.Client(client_id='learnbotClient', clean_session=False)
self.client = paho.mqtt.client.Client()
self.client.on_message = self.on_message
self.client.connect(host='192.168.16.1', port=50000)
self.client.subscribe(topic='camara', qos=2)
self.client.loop_start()
print("Streamer iniciado correctamente")
except Exception as e:
print("Error connect Streamer\n", e)
self.connectToRobot()
self.open_cv_image = np.zeros((240, 320, 3), np.uint8)
self.newImage = False
self.distanceSensors = Devices.DistanceSensors(_readFunction=self.deviceReadLaser)
self.camera = Devices.Camera(_readFunction=self.deviceReadCamera)
self.base = Devices.Base(_callFunction=self.deviceMove)
self.display = Devices.Display(_setEmotion=self.deviceSendEmotion, _setImage=None)
self.addJointMotor("CAMERA",
_JointMotor=Devices.JointMotor(_callDevice=self.deviceSendAngleHead, _readDevice=None))
self.start()
def on_message(self, client, userdata, message):
self.newImage = True
data = message.payload
image_stream = io.BytesIO()
image_stream.write(data)
image = Image.open(image_stream)
self.open_cv_image = np.array(image)
def connectToRobot(self):
self.laser_proxy = connectComponent("laser:tcp -h 192.168.16.1 -p 10104", RoboCompLaser.LaserPrx)
self.differentialrobot_proxy = connectComponent("differentialrobot:tcp -h 192.168.16.1 -p 10004",
RoboCompDifferentialRobot.DifferentialRobotPrx)
self.jointmotor_proxy = connectComponent("jointmotor:tcp -h 192.168.16.1 -p 10067",
RoboCompJointMotor.JointMotorPrx)
self.emotionalmotor_proxy = connectComponent("emotionalmotor:tcp -h 192.168.16.1 -p 30001",
RoboCompEmotionalMotor.EmotionalMotorPrx)
def deviceReadLaser(self):
laserdata = self.laser_proxy.getLaserData()
usList = [d.dist for d in laserdata]
return {"front": usList[1:4], # The values must be in mm
"left": usList[:2],
"right": usList[3:]}
def deviceMove(self, _adv, _rot):
self.differentialrobot_proxy.setSpeedBase(_adv, _rot)
def deviceReadCamera(self, ):
return self.open_cv_image, self.newImage
def deviceSendEmotion(self, _emotion):
if _emotion is Emotions.Joy:
self.emotionalmotor_proxy.expressJoy()
elif _emotion is Emotions.Sadness:
self.emotionalmotor_proxy.expressSadness()
elif _emotion is Emotions.Surprise:
self.emotionalmotor_proxy.expressSurprise()
elif _emotion is Emotions.Disgust:
self.emotionalmotor_proxy.expressDisgust()
elif _emotion is Emotions.Anger:
self.emotionalmotor_proxy.expressAnger()
elif _emotion is Emotions.Fear:
self.emotionalmotor_proxy.expressFear()
elif _emotion is Emotions.Neutral:
self.emotionalmotor_proxy.expressNeutral()
def deviceSendAngleHead(self, _angle):
goal = RoboCompJointMotor.MotorGoalPosition()
goal.name = 'servo'
goal.position = _angle
self.jointmotor_proxy.setPosition(goal)
if __name__ == '__main__':
robot = Robot()
print(robot.__dict__)
while True:
if robot.obstacle_free(50):
if not robot.am_I_turning():
if robot.left_obstacle(50):
robot.turn_right()
else:
robot.turn_left()
else:
robot.move_straight()
ebo.join()
| gpl-3.0 |
HidemotoNakada/cassandra-udf | pylib/cqlshlib/test/test_cqlsh_invocation.py | 160 | 1941 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to configure behavior, define $CQL_TEST_HOST to the destination address
# for Thrift connections, and $CQL_TEST_PORT to the associated port.
from .basecase import BaseTestCase
class TestCqlshInvocation(BaseTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_normal_run(self):
pass
def test_python_interpreter_location(self):
pass
def test_color_capability_detection(self):
pass
def test_colored_output(self):
pass
def test_color_cmdline_option(self):
pass
def test_debug_option(self):
pass
def test_connection_args(self):
pass
def test_connection_config(self):
pass
def test_connection_envvars(self):
pass
def test_command_history(self):
pass
def test_missing_dependencies(self):
pass
def test_completekey_config(self):
pass
def test_ctrl_c(self):
pass
def test_eof(self):
pass
def test_output_encoding_detection(self):
pass
def test_output_encoding(self):
pass
def test_retries(self):
pass
| apache-2.0 |
Goodmind/sunflower-fm | application/indicator.py | 9 | 2995 | import os
import sys
import gtk
class Indicator(object):
"""This class provides access to application indicators in Gnome envirnoments"""
def __init__(self, parent):
self._parent = parent
self._menu = gtk.Menu()
self._create_menu_items()
base_path = os.path.dirname(os.path.dirname(sys.argv[0]))
self._icon = 'sunflower_64.png'
self._icon_path = os.path.abspath(os.path.join(base_path, 'images'))
self._indicator = None
if self._parent.window_options.section('main').get('hide_on_close'):
self._indicator = gtk.StatusIcon()
self._indicator.set_from_file(os.path.join(self._icon_path, self._icon))
self._indicator.connect('activate', self._status_icon_activate)
self._indicator.connect('popup-menu', self._status_icon_popup_menu)
def _create_menu_items(self):
"""Create commonly used menu items in indicator"""
# show window
self._menu_show = self._parent.menu_manager.create_menu_item({
'label': _('Sh_ow main window'),
'callback': self._change_visibility,
'data': True,
})
self._menu_show.hide()
self._menu.append(self._menu_show)
# hide window
self._menu_hide = self._parent.menu_manager.create_menu_item({
'label': _('_Hide main window'),
'callback': self._change_visibility,
'data': False,
})
self._menu.append(self._menu_hide)
# close window option
self._menu.append(self._parent.menu_manager.create_menu_item({'type': 'separator'}))
self._menu.append(self._parent.menu_manager.create_menu_item({
'label': _('_Quit'),
'type': 'image',
'callback': self._parent._destroy,
'stock': gtk.STOCK_QUIT,
}))
# separator
self._separator = self._parent.menu_manager.create_menu_item({'type': 'separator'})
self._menu.append(self._separator)
self._separator.hide()
def _status_icon_activate(self, widget, data=None):
"""Toggle visibility on status icon activate"""
visible = not self._parent.get_visible()
self._change_visibility(widget, visible)
def _status_icon_popup_menu(self, widget, button, activate_time):
"""Show popup menu on right click"""
self._menu.popup(None, None, None, button, activate_time)
def _change_visibility(self, widget, visible):
"""Change main window visibility"""
self._parent.set_visible(visible)
self.adjust_visibility_items(visible)
def adjust_visibility_items(self, visible):
"""Adjust show/hide menu items"""
self._menu_show.set_visible(not visible)
self._menu_hide.set_visible(visible)
def add_operation(self, widget, callback, data):
"""Add operation to operations menu"""
menu_item = gtk.MenuItem()
menu_item.add(widget)
if callback is not None:
menu_item.connect('activate', callback, data)
menu_item.show()
self._separator.show()
self._menu.append(menu_item)
if hasattr(self._indicator, 'set_menu'):
self._indicator.set_menu(self._menu)
return menu_item
| gpl-3.0 |
mezz64/home-assistant | tests/testing_config/custom_components/test/lock.py | 21 | 1382 | """
Provide a mock lock platform.
Call init before using it in your tests to ensure clean test data.
"""
from homeassistant.components.lock import SUPPORT_OPEN, LockEntity
from tests.common import MockEntity
ENTITIES = {}
def init(empty=False):
"""Initialize the platform with entities."""
global ENTITIES
ENTITIES = (
{}
if empty
else {
"support_open": MockLock(
name="Support open Lock",
is_locked=True,
supported_features=SUPPORT_OPEN,
unique_id="unique_support_open",
),
"no_support_open": MockLock(
name="No support open Lock",
is_locked=True,
supported_features=0,
unique_id="unique_no_support_open",
),
}
)
async def async_setup_platform(
hass, config, async_add_entities_callback, discovery_info=None
):
"""Return mock entities."""
async_add_entities_callback(list(ENTITIES.values()))
class MockLock(MockEntity, LockEntity):
"""Mock Lock class."""
@property
def is_locked(self):
"""Return true if the lock is locked."""
return self._handle("is_locked")
@property
def supported_features(self):
"""Return the class of this sensor."""
return self._handle("supported_features")
| apache-2.0 |
codycuellar/bill_tracker | billtracker/config.py | 1 | 1993 | from os import path
import time
import smtplib
import logging
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# BASIC
# -----------------------------------------------------------------------------
data_dir = path.expanduser('~/data')
json_db_path = path.join(data_dir, 'bills_db.json')
# -----------------------------------------------------------------------------
# EMAIL
# -----------------------------------------------------------------------------
email_from_address = 'your@email.com'
email_to_addresses = [email_from_address]
email_pw_path = 'None'
email_title = 'User' # Name for email subject
email_server = smtplib.SMTP("smtp.gmail.com", 587)
# -----------------------------------------------------------------------------
# Logging
# -----------------------------------------------------------------------------
def configure_logging(ext_logger, log_level=logging.WARNING,
file_level=logging.WARNING, console_level=logging.DEBUG,
logname=None):
ext_logger.setLevel(log_level)
# Setup file logging
date = time.strftime('%Y%m%d')
if not logname:
logname = ext_logger.name
logfile = '%s_%s.log' % (date, logname)
logpath = path.join(data_dir, 'logs', logfile)
file_handler = logging.FileHandler(logpath)
file_handler.setLevel(file_level)
file_formatter = logging.Formatter(
'%(asctime)s:%(name)s:%(levelname)s: %(message)s')
file_handler.setFormatter(file_formatter)
# Setup Console logging
console_handler = logging.StreamHandler()
console_handler.setLevel(console_level)
console_formatter = logging.Formatter('%(levelname)s: %(message)s')
console_handler.setFormatter(console_formatter)
ext_logger.addHandler(file_handler)
ext_logger.addHandler(console_handler)
try:
from config_local import *
except:
print('Could not load local configs. Program may not work.')
| mit |
austinvernsonger/metagoofil | hachoir_metadata/metadata.py | 30 | 9122 | # -*- coding: utf-8 -*-
from hachoir_core.compatibility import any, sorted
from hachoir_core.endian import endian_name
from hachoir_core.tools import makePrintable, makeUnicode
from hachoir_core.dict import Dict
from hachoir_core.error import error, HACHOIR_ERRORS
from hachoir_core.i18n import _
from hachoir_core.log import Logger
from hachoir_metadata.metadata_item import (
MIN_PRIORITY, MAX_PRIORITY, QUALITY_NORMAL)
from hachoir_metadata.register import registerAllItems
extractors = {}
class Metadata(Logger):
header = u"Metadata"
def __init__(self, parent, quality=QUALITY_NORMAL):
assert isinstance(self.header, unicode)
# Limit to 0.0 .. 1.0
if parent:
quality = parent.quality
else:
quality = min(max(0.0, quality), 1.0)
object.__init__(self)
object.__setattr__(self, "_Metadata__data", {})
object.__setattr__(self, "quality", quality)
header = self.__class__.header
object.__setattr__(self, "_Metadata__header", header)
registerAllItems(self)
def _logger(self):
pass
def __setattr__(self, key, value):
"""
Add a new value to data with name 'key'. Skip duplicates.
"""
# Invalid key?
if key not in self.__data:
raise KeyError(_("%s has no metadata '%s'") % (self.__class__.__name__, key))
# Skip duplicates
self.__data[key].add(value)
def setHeader(self, text):
object.__setattr__(self, "header", text)
def getItems(self, key):
try:
return self.__data[key]
except LookupError:
raise ValueError("Metadata has no value '%s'" % key)
def getItem(self, key, index):
try:
return self.getItems(key)[index]
except (LookupError, ValueError):
return None
def has(self, key):
return 1 <= len(self.getItems(key))
def get(self, key, default=None, index=0):
"""
Read first value of tag with name 'key'.
>>> from datetime import timedelta
>>> a = RootMetadata()
>>> a.duration = timedelta(seconds=2300)
>>> a.get('duration')
datetime.timedelta(0, 2300)
>>> a.get('author', u'Anonymous')
u'Anonymous'
"""
item = self.getItem(key, index)
if item is None:
if default is None:
raise ValueError("Metadata has no value '%s' (index %s)" % (key, index))
else:
return default
return item.value
def getValues(self, key):
try:
data = self.__data[key]
except LookupError:
raise ValueError("Metadata has no value '%s'" % key)
return [ item.value for item in data ]
def getText(self, key, default=None, index=0):
"""
Read first value, as unicode string, of tag with name 'key'.
>>> from datetime import timedelta
>>> a = RootMetadata()
>>> a.duration = timedelta(seconds=2300)
>>> a.getText('duration')
u'38 min 20 sec'
>>> a.getText('titre', u'Unknown')
u'Unknown'
"""
item = self.getItem(key, index)
if item is not None:
return item.text
else:
return default
def register(self, data):
assert data.key not in self.__data
data.metadata = self
self.__data[data.key] = data
def __iter__(self):
return self.__data.itervalues()
def __str__(self):
r"""
Create a multi-line ASCII string (end of line is "\n") which
represents all datas.
>>> a = RootMetadata()
>>> a.author = "haypo"
>>> a.copyright = unicode("© Hachoir", "UTF-8")
>>> print a
Metadata:
- Author: haypo
- Copyright: \xa9 Hachoir
@see __unicode__() and exportPlaintext()
"""
text = self.exportPlaintext()
return "\n".join( makePrintable(line, "ASCII") for line in text )
def __unicode__(self):
r"""
Create a multi-line Unicode string (end of line is "\n") which
represents all datas.
>>> a = RootMetadata()
>>> a.copyright = unicode("© Hachoir", "UTF-8")
>>> print repr(unicode(a))
u'Metadata:\n- Copyright: \xa9 Hachoir'
@see __str__() and exportPlaintext()
"""
return "\n".join(self.exportPlaintext())
def exportPlaintext(self, priority=None, human=True, line_prefix=u"- ", title=None):
r"""
Convert metadata to multi-line Unicode string and skip datas
with priority lower than specified priority.
Default priority is Metadata.MAX_PRIORITY. If human flag is True, data
key are translated to better human name (eg. "bit_rate" becomes
"Bit rate") which may be translated using gettext.
If priority is too small, metadata are empty and so None is returned.
>>> print RootMetadata().exportPlaintext()
None
>>> meta = RootMetadata()
>>> meta.copyright = unicode("© Hachoir", "UTF-8")
>>> print repr(meta.exportPlaintext())
[u'Metadata:', u'- Copyright: \xa9 Hachoir']
@see __str__() and __unicode__()
"""
if priority is not None:
priority = max(priority, MIN_PRIORITY)
priority = min(priority, MAX_PRIORITY)
else:
priority = MAX_PRIORITY
if not title:
title = self.header
text = ["%s:" % title]
for data in sorted(self):
if priority < data.priority:
break
if not data.values:
continue
if human:
title = data.description
else:
title = data.key
for item in data.values:
if human:
value = item.text
else:
value = makeUnicode(item.value)
text.append("%s%s: %s" % (line_prefix, title, value))
if 1 < len(text):
return text
else:
return None
def __nonzero__(self):
return any(item for item in self.__data.itervalues())
class RootMetadata(Metadata):
def __init__(self, quality=QUALITY_NORMAL):
Metadata.__init__(self, None, quality)
class MultipleMetadata(RootMetadata):
header = _("Common")
def __init__(self, quality=QUALITY_NORMAL):
RootMetadata.__init__(self, quality)
object.__setattr__(self, "_MultipleMetadata__groups", Dict())
object.__setattr__(self, "_MultipleMetadata__key_counter", {})
def __contains__(self, key):
return key in self.__groups
def __getitem__(self, key):
return self.__groups[key]
def iterGroups(self):
return self.__groups.itervalues()
def __nonzero__(self):
if RootMetadata.__nonzero__(self):
return True
return any(bool(group) for group in self.__groups)
def addGroup(self, key, metadata, header=None):
"""
Add a new group (metadata of a sub-document).
Returns False if the group is skipped, True if it has been added.
"""
if not metadata:
self.warning("Skip empty group %s" % key)
return False
if key.endswith("[]"):
key = key[:-2]
if key in self.__key_counter:
self.__key_counter[key] += 1
else:
self.__key_counter[key] = 1
key += "[%u]" % self.__key_counter[key]
if header:
metadata.setHeader(header)
self.__groups.append(key, metadata)
return True
def exportPlaintext(self, priority=None, human=True, line_prefix=u"- "):
common = Metadata.exportPlaintext(self, priority, human, line_prefix)
if common:
text = common
else:
text = []
for key, metadata in self.__groups.iteritems():
if not human:
title = key
else:
title = None
value = metadata.exportPlaintext(priority, human, line_prefix, title=title)
if value:
text.extend(value)
if len(text):
return text
else:
return None
def registerExtractor(parser, extractor):
assert parser not in extractors
assert issubclass(extractor, RootMetadata)
extractors[parser] = extractor
def extractMetadata(parser, quality=QUALITY_NORMAL):
"""
Create a Metadata class from a parser. Returns None if no metadata
extractor does exist for the parser class.
"""
try:
extractor = extractors[parser.__class__]
except KeyError:
return None
metadata = extractor(quality)
try:
metadata.extract(parser)
except HACHOIR_ERRORS, err:
error("Error during metadata extraction: %s" % unicode(err))
if metadata:
metadata.mime_type = parser.mime_type
metadata.endian = endian_name[parser.endian]
return metadata
| gpl-2.0 |
sysadminmatmoz/OCB | openerp/addons/base/res/res_config.py | 8 | 31381 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
import logging
from operator import attrgetter
import re
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import ustr
from openerp.tools.translate import _
from openerp import exceptions
from lxml import etree
from openerp.exceptions import UserError
_logger = logging.getLogger(__name__)
class res_config_module_installation_mixin(object):
def _install_modules(self, cr, uid, modules, context):
"""Install the requested modules.
return the next action to execute
modules is a list of tuples
(mod_name, browse_record | None)
"""
ir_module = self.pool.get('ir.module.module')
to_install_ids = []
to_install_missing_names = []
for name, module in modules:
if not module:
to_install_missing_names.append(name)
elif module.state == 'uninstalled':
to_install_ids.append(module.id)
result = None
if to_install_ids:
result = ir_module.button_immediate_install(cr, uid, to_install_ids, context=context)
#FIXME: if result is not none, the corresponding todo will be skipped because it was just marked done
if to_install_missing_names:
return {
'type': 'ir.actions.client',
'tag': 'apps',
'params': {'modules': to_install_missing_names},
}
return result
class res_config_configurable(osv.osv_memory):
''' Base classes for new-style configuration items
Configuration items should inherit from this class, implement
the execute method (and optionally the cancel one) and have
their view inherit from the related res_config_view_base view.
'''
_name = 'res.config'
def _next_action(self, cr, uid, context=None):
Todos = self.pool['ir.actions.todo']
_logger.info('getting next %s', Todos)
active_todos = Todos.browse(cr, uid,
Todos.search(cr, uid, ['&', ('type', '=', 'automatic'), ('state','=','open')]),
context=context)
user_groups = set(map(
lambda g: g.id,
self.pool['res.users'].browse(cr, uid, [uid], context=context)[0].groups_id))
valid_todos_for_user = [
todo for todo in active_todos
if not todo.groups_id or bool(user_groups.intersection((
group.id for group in todo.groups_id)))
]
if valid_todos_for_user:
return valid_todos_for_user[0]
return None
def _next(self, cr, uid, context=None):
_logger.info('getting next operation')
next = self._next_action(cr, uid, context=context)
_logger.info('next action is %s', next)
if next:
return next.action_launch(context=context)
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def start(self, cr, uid, ids, context=None):
return self.next(cr, uid, ids, context)
def next(self, cr, uid, ids, context=None):
""" Returns the next todo action to execute (using the default
sort order)
"""
return self._next(cr, uid, context=context)
def execute(self, cr, uid, ids, context=None):
""" Method called when the user clicks on the ``Next`` button.
Execute *must* be overloaded unless ``action_next`` is overloaded
(which is something you generally don't need to do).
If ``execute`` returns an action dictionary, that action is executed
rather than just going to the next configuration item.
"""
raise NotImplementedError(
'Configuration items need to implement execute')
def cancel(self, cr, uid, ids, context=None):
""" Method called when the user click on the ``Skip`` button.
``cancel`` should be overloaded instead of ``action_skip``. As with
``execute``, if it returns an action dictionary that action is
executed in stead of the default (going to the next configuration item)
The default implementation is a NOOP.
``cancel`` is also called by the default implementation of
``action_cancel``.
"""
pass
def action_next(self, cr, uid, ids, context=None):
""" Action handler for the ``next`` event.
Sets the status of the todo the event was sent from to
``done``, calls ``execute`` and -- unless ``execute`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.execute(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_skip(self, cr, uid, ids, context=None):
""" Action handler for the ``skip`` event.
Sets the status of the todo the event was sent from to
``skip``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Action handler for the ``cancel`` event. That event isn't
generated by the res.config.view.base inheritable view, the
inherited view has to overload one of the buttons (or add one
more).
Sets the status of the todo the event was sent from to
``cancel``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
class res_config_installer(osv.osv_memory, res_config_module_installation_mixin):
""" New-style configuration base specialized for addons selection
and installation.
Basic usage
-----------
Subclasses can simply define a number of _columns as
fields.boolean objects. The keys (column names) should be the
names of the addons to install (when selected). Upon action
execution, selected boolean fields (and those only) will be
interpreted as addons to install, and batch-installed.
Additional addons
-----------------
It is also possible to require the installation of an additional
addon set when a specific preset of addons has been marked for
installation (in the basic usage only, additionals can't depend on
one another).
These additionals are defined through the ``_install_if``
property. This property is a mapping of a collection of addons (by
name) to a collection of addons (by name) [#]_, and if all the *key*
addons are selected for installation, then the *value* ones will
be selected as well. For example::
_install_if = {
('sale','crm'): ['sale_crm'],
}
This will install the ``sale_crm`` addon if and only if both the
``sale`` and ``crm`` addons are selected for installation.
You can define as many additionals as you wish, and additionals
can overlap in key and value. For instance::
_install_if = {
('sale','crm'): ['sale_crm'],
('sale','project'): ['sale_service'],
}
will install both ``sale_crm`` and ``sale_service`` if all of
``sale``, ``crm`` and ``project`` are selected for installation.
Hook methods
------------
Subclasses might also need to express dependencies more complex
than that provided by additionals. In this case, it's possible to
define methods of the form ``_if_%(name)s`` where ``name`` is the
name of a boolean field. If the field is selected, then the
corresponding module will be marked for installation *and* the
hook method will be executed.
Hook methods take the usual set of parameters (cr, uid, ids,
context) and can return a collection of additional addons to
install (if they return anything, otherwise they should not return
anything, though returning any "falsy" value such as None or an
empty collection will have the same effect).
Complete control
----------------
The last hook is to simply overload the ``modules_to_install``
method, which implements all the mechanisms above. This method
takes the usual set of parameters (cr, uid, ids, context) and
returns a ``set`` of addons to install (addons selected by the
above methods minus addons from the *basic* set which are already
installed) [#]_ so an overloader can simply manipulate the ``set``
returned by ``res_config_installer.modules_to_install`` to add or
remove addons.
Skipping the installer
----------------------
Unless it is removed from the view, installers have a *skip*
button which invokes ``action_skip`` (and the ``cancel`` hook from
``res.config``). Hooks and additionals *are not run* when skipping
installation, even for already installed addons.
Again, setup your hooks accordingly.
.. [#] note that since a mapping key needs to be hashable, it's
possible to use a tuple or a frozenset, but not a list or a
regular set
.. [#] because the already-installed modules are only pruned at
the very end of ``modules_to_install``, additionals and
hooks depending on them *are guaranteed to execute*. Setup
your hooks accordingly.
"""
_name = 'res.config.installer'
_inherit = 'res.config'
_install_if = {}
def already_installed(self, cr, uid, context=None):
""" For each module, check if it's already installed and if it
is return its name
:returns: a list of the already installed modules in this
installer
:rtype: [str]
"""
return map(attrgetter('name'),
self._already_installed(cr, uid, context=context))
def _already_installed(self, cr, uid, context=None):
""" For each module (boolean fields in a res.config.installer),
check if it's already installed (either 'to install', 'to upgrade'
or 'installed') and if it is return the module's record
:returns: a list of all installed modules in this installer
:rtype: recordset (collection of Record)
"""
modules = self.pool['ir.module.module']
selectable = [field for field in self._columns
if type(self._columns[field]) is fields.boolean]
return modules.browse(
cr, uid,
modules.search(cr, uid,
[('name','in',selectable),
('state','in',['to install', 'installed', 'to upgrade'])],
context=context),
context=context)
def modules_to_install(self, cr, uid, ids, context=None):
""" selects all modules to install:
* checked boolean fields
* return values of hook methods. Hook methods are of the form
``_if_%(addon_name)s``, and are called if the corresponding
addon is marked for installation. They take the arguments
cr, uid, ids and context, and return an iterable of addon
names
* additionals, additionals are setup through the ``_install_if``
class variable. ``_install_if`` is a dict of {iterable:iterable}
where key and value are iterables of addon names.
If all the addons in the key are selected for installation
(warning: addons added through hooks don't count), then the
addons in the value are added to the set of modules to install
* not already installed
"""
base = set(module_name
for installer in self.read(cr, uid, ids, context=context)
for module_name, to_install in installer.iteritems()
if module_name != 'id'
if type(self._columns.get(module_name)) is fields.boolean
if to_install)
hooks_results = set()
for module in base:
hook = getattr(self, '_if_%s'% module, None)
if hook:
hooks_results.update(hook(cr, uid, ids, context=None) or set())
additionals = set(
module for requirements, consequences \
in self._install_if.iteritems()
if base.issuperset(requirements)
for module in consequences)
return (base | hooks_results | additionals).difference(
self.already_installed(cr, uid, context))
def default_get(self, cr, uid, fields_list, context=None):
''' If an addon is already installed, check it by default
'''
defaults = super(res_config_installer, self).default_get(
cr, uid, fields_list, context=context)
return dict(defaults,
**dict.fromkeys(
self.already_installed(cr, uid, context=context),
True))
def fields_get(self, cr, uid, fields=None, context=None, write_access=True, attributes=None):
""" If an addon is already installed, set it to readonly as
res.config.installer doesn't handle uninstallations of already
installed addons
"""
fields = super(res_config_installer, self).fields_get(
cr, uid, fields, context, write_access, attributes)
for name in self.already_installed(cr, uid, context=context):
if name not in fields:
continue
fields[name].update(
readonly=True,
help= ustr(fields[name].get('help', '')) +
_('\n\nThis addon is already installed on your system'))
return fields
def execute(self, cr, uid, ids, context=None):
to_install = list(self.modules_to_install(
cr, uid, ids, context=context))
_logger.info('Selecting addons %s to install', to_install)
ir_module = self.pool.get('ir.module.module')
modules = []
for name in to_install:
mod_ids = ir_module.search(cr, uid, [('name', '=', name)])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
return self._install_modules(cr, uid, modules, context=context)
class res_config_settings(osv.osv_memory, res_config_module_installation_mixin):
""" Base configuration wizard for application settings. It provides support for setting
default values, assigning groups to employee users, and installing modules.
To make such a 'settings' wizard, define a model like::
class my_config_wizard(osv.osv_memory):
_name = 'my.settings'
_inherit = 'res.config.settings'
_columns = {
'default_foo': fields.type(..., default_model='my.model'),
'group_bar': fields.boolean(..., group='base.group_user', implied_group='my.group'),
'module_baz': fields.boolean(...),
'other_field': fields.type(...),
}
The method ``execute`` provides some support based on a naming convention:
* For a field like 'default_XXX', ``execute`` sets the (global) default value of
the field 'XXX' in the model named by ``default_model`` to the field's value.
* For a boolean field like 'group_XXX', ``execute`` adds/removes 'implied_group'
to/from the implied groups of 'group', depending on the field's value.
By default 'group' is the group Employee. Groups are given by their xml id.
The attribute 'group' may contain several xml ids, separated by commas.
* For a selection field like 'group_XXX' composed of 2 integers values ('0' and '1'),
``execute`` adds/removes 'implied_group' to/from the implied groups of 'group',
depending on the field's value.
By default 'group' is the group Employee. Groups are given by their xml id.
The attribute 'group' may contain several xml ids, separated by commas.
* For a boolean field like 'module_XXX', ``execute`` triggers the immediate
installation of the module named 'XXX' if the field has value ``True``.
* For a selection field like 'module_XXX' composed of 2 integers values ('0' and '1'),
``execute`` triggers the immediate installation of the module named 'XXX'
if the field has the integer value ``1``.
* For the other fields, the method ``execute`` invokes all methods with a name
that starts with 'set_'; such methods can be defined to implement the effect
of those fields.
The method ``default_get`` retrieves values that reflect the current status of the
fields like 'default_XXX', 'group_XXX' and 'module_XXX'. It also invokes all methods
with a name that starts with 'get_default_'; such methods can be defined to provide
current values for other fields.
"""
_name = 'res.config.settings'
def copy(self, cr, uid, id, values, context=None):
raise UserError(_("Cannot duplicate configuration!"), "")
def fields_view_get(self, cr, user, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
ret_val = super(res_config_settings, self).fields_view_get(
cr, user, view_id=view_id, view_type=view_type, context=context,
toolbar=toolbar, submenu=submenu)
can_install_modules = self.pool['ir.module.module'].check_access_rights(
cr, user, 'write', raise_exception=False)
doc = etree.XML(ret_val['arch'])
for field in ret_val['fields']:
if not field.startswith("module_"):
continue
for node in doc.xpath("//field[@name='%s']" % field):
if not can_install_modules:
node.set("readonly", "1")
modifiers = json.loads(node.get("modifiers"))
modifiers['readonly'] = True
node.set("modifiers", json.dumps(modifiers))
if 'on_change' not in node.attrib:
node.set("on_change",
"onchange_module(%s, '%s')" % (field, field))
ret_val['arch'] = etree.tostring(doc)
return ret_val
def onchange_module(self, cr, uid, ids, field_value, module_name, context=None):
module_pool = self.pool.get('ir.module.module')
module_ids = module_pool.search(
cr, SUPERUSER_ID, [('name', '=', module_name.replace("module_", '')),
('state','in', ['to install', 'installed', 'to upgrade'])],
context=context)
if module_ids and not field_value:
dep_ids = module_pool.downstream_dependencies(cr, SUPERUSER_ID, module_ids, context=context)
dep_name = [x.shortdesc for x in module_pool.browse(
cr, SUPERUSER_ID, dep_ids + module_ids, context=context)]
message = '\n'.join(dep_name)
return {
'warning': {
'title': _('Warning!'),
'message': _('Disabling this option will also uninstall the following modules \n%s') % message,
}
}
return {}
def _get_classified_fields(self, cr, uid, context=None):
""" return a dictionary with the fields classified by category::
{ 'default': [('default_foo', 'model', 'foo'), ...],
'group': [('group_bar', [browse_group], browse_implied_group), ...],
'module': [('module_baz', browse_module), ...],
'other': ['other_field', ...],
}
"""
ir_model_data = self.pool['ir.model.data']
ir_module = self.pool['ir.module.module']
def ref(xml_id):
mod, xml = xml_id.split('.', 1)
return ir_model_data.get_object(cr, uid, mod, xml, context=context)
defaults, groups, modules, others = [], [], [], []
for name, field in self._columns.items():
if name.startswith('default_') and hasattr(field, 'default_model'):
defaults.append((name, field.default_model, name[8:]))
elif name.startswith('group_') and (isinstance(field, fields.boolean) or isinstance(field, fields.selection)) \
and hasattr(field, 'implied_group'):
field_groups = getattr(field, 'group', 'base.group_user').split(',')
groups.append((name, map(ref, field_groups), ref(field.implied_group)))
elif name.startswith('module_') and (isinstance(field, fields.boolean) or isinstance(field, fields.selection)):
mod_ids = ir_module.search(cr, SUPERUSER_ID, [('name', '=', name[7:])])
record = ir_module.browse(cr, SUPERUSER_ID, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
else:
others.append(name)
return {'default': defaults, 'group': groups, 'module': modules, 'other': others}
def default_get(self, cr, uid, fields, context=None):
ir_values = self.pool['ir.values']
classified = self._get_classified_fields(cr, uid, context)
res = super(res_config_settings, self).default_get(cr, uid, fields, context)
# defaults: take the corresponding default value they set
for name, model, field in classified['default']:
value = ir_values.get_default(cr, uid, model, field)
if value is not None:
res[name] = value
# groups: which groups are implied by the group Employee
for name, groups, implied_group in classified['group']:
res[name] = all(implied_group in group.implied_ids for group in groups)
if self._fields[name].type == 'selection':
res[name] = int(res[name])
# modules: which modules are installed/to install
for name, module in classified['module']:
res[name] = False if not module else module.state in ('installed', 'to install', 'to upgrade')
if self._fields[name].type == 'selection':
res[name] = int(res[name])
# other fields: call all methods that start with 'get_default_'
for method in dir(self):
if method.startswith('get_default_'):
res.update(getattr(self, method)(cr, uid, fields, context))
return res
def execute(self, cr, uid, ids, context=None):
if context is None:
context = {}
context = dict(context, active_test=False)
if not self.pool['res.users']._is_admin(cr, uid, [uid]):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool['ir.values']
ir_module = self.pool['ir.module.module']
res_groups = self.pool['res.groups']
classified = self._get_classified_fields(cr, uid, context=context)
config = self.browse(cr, uid, ids[0], context)
# default values fields
for name, model, field in classified['default']:
ir_values.set_default(cr, SUPERUSER_ID, model, field, config[name])
# group fields: modify group / implied groups
for name, groups, implied_group in classified['group']:
gids = map(int, groups)
if config[name]:
res_groups.write(cr, uid, gids, {'implied_ids': [(4, implied_group.id)]}, context=context)
else:
res_groups.write(cr, uid, gids, {'implied_ids': [(3, implied_group.id)]}, context=context)
uids = set()
for group in groups:
uids.update(map(int, group.users))
implied_group.write({'users': [(3, u) for u in uids]})
# other fields: execute all methods that start with 'set_'
for method in dir(self):
if method.startswith('set_'):
getattr(self, method)(cr, uid, ids, context)
# module fields: install/uninstall the selected modules
to_install = []
to_uninstall_ids = []
lm = len('module_')
for name, module in classified['module']:
if config[name]:
to_install.append((name[lm:], module))
else:
if module and module.state in ('installed', 'to upgrade'):
to_uninstall_ids.append(module.id)
if to_uninstall_ids:
ir_module.button_immediate_uninstall(cr, uid, to_uninstall_ids, context=context)
action = self._install_modules(cr, uid, to_install, context=context)
if action:
return action
# After the uninstall/install calls, the self.pool is no longer valid.
# So we reach into the RegistryManager directly.
res_config = openerp.modules.registry.RegistryManager.get(cr.dbname)['res.config']
config = res_config.next(cr, uid, [], context=context) or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# force client-side reload (update user menu and current view)
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def cancel(self, cr, uid, ids, context=None):
# ignore the current record, and send the action to reopen the view
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)])
if action_ids:
return act_window.read(cr, uid, action_ids[0], [], context=context)
return {}
def name_get(self, cr, uid, ids, context=None):
""" Override name_get method to return an appropriate configuration wizard
name, and not the generated name."""
if not ids:
return []
# name_get may receive int id instead of an id list
if isinstance(ids, (int, long)):
ids = [ids]
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)], context=context)
name = self._name
if action_ids:
name = act_window.read(cr, uid, action_ids[0], ['name'], context=context)['name']
return [(record.id, name) for record in self.browse(cr, uid , ids, context=context)]
def get_option_path(self, cr, uid, menu_xml_id, context=None):
"""
Fetch the path to a specified configuration view and the action id to access it.
:param string menu_xml_id: the xml id of the menuitem where the view is located,
structured as follows: module_name.menuitem_xml_id (e.g.: "base.menu_sale_config")
:return tuple:
- t[0]: string: full path to the menuitem (e.g.: "Settings/Configuration/Sales")
- t[1]: int or long: id of the menuitem's action
"""
module_name, menu_xml_id = menu_xml_id.split('.')
dummy, menu_id = self.pool['ir.model.data'].get_object_reference(cr, uid, module_name, menu_xml_id)
ir_ui_menu = self.pool['ir.ui.menu'].browse(cr, uid, menu_id, context=context)
return (ir_ui_menu.complete_name, ir_ui_menu.action.id)
def get_option_name(self, cr, uid, full_field_name, context=None):
"""
Fetch the human readable name of a specified configuration option.
:param string full_field_name: the full name of the field, structured as follows:
model_name.field_name (e.g.: "sale.config.settings.fetchmail_lead")
:return string: human readable name of the field (e.g.: "Create leads from incoming mails")
"""
model_name, field_name = full_field_name.rsplit('.', 1)
return self.pool[model_name].fields_get(cr, uid, allfields=[field_name], context=context)[field_name]['string']
def get_config_warning(self, cr, msg, context=None):
"""
Helper: return a Warning exception with the given message where the %(field:xxx)s
and/or %(menu:yyy)s are replaced by the human readable field's name and/or menuitem's
full path.
Usage:
------
Just include in your error message %(field:model_name.field_name)s to obtain the human
readable field's name, and/or %(menu:module_name.menuitem_xml_id)s to obtain the menuitem's
full path.
Example of use:
---------------
from openerp.addons.base.res.res_config import get_warning_config
raise get_warning_config(cr, _("Error: this action is prohibited. You should check the field %(field:sale.config.settings.fetchmail_lead)s in %(menu:base.menu_sale_config)s."), context=context)
This will return an exception containing the following message:
Error: this action is prohibited. You should check the field Create leads from incoming mails in Settings/Configuration/Sales.
What if there is another substitution in the message already?
-------------------------------------------------------------
You could have a situation where the error message you want to upgrade already contains a substitution. Example:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.
What you want to do here is simply to replace the path by %menu:account.menu_account_config)s, and leave the rest alone.
In order to do that, you can use the double percent (%%) to escape your new substitution, like so:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the %%(menu:account.menu_account_config)s.
"""
res_config_obj = openerp.registry(cr.dbname)['res.config.settings']
regex_path = r'%\(((?:menu|field):[a-z_\.]*)\)s'
# Process the message
# 1/ find the menu and/or field references, put them in a list
references = re.findall(regex_path, msg, flags=re.I)
# 2/ fetch the menu and/or field replacement values (full path and
# human readable field's name) and the action_id if any
values = {}
action_id = None
for item in references:
ref_type, ref = item.split(':')
if ref_type == 'menu':
values[item], action_id = res_config_obj.get_option_path(cr, SUPERUSER_ID, ref, context=context)
elif ref_type == 'field':
values[item] = res_config_obj.get_option_name(cr, SUPERUSER_ID, ref, context=context)
# 3/ substitute and return the result
if (action_id):
return exceptions.RedirectWarning(msg % values, action_id, _('Go to the configuration panel'))
return exceptions.UserError(msg % values)
| agpl-3.0 |
ryankuczka/xhtml2pdf | xhtml2pdf/wsgi.py | 79 | 2857 | # -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xhtml2pdf.pisa as pisa
import StringIO
import logging
log = logging.getLogger("xhtml2pdf.wsgi")
class Filter(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('SCRIPT_NAME', '')
path_info = environ.get('PATH_INFO', '')
sent = []
written_response = StringIO.StringIO()
def replacement_start_response(status, headers, exc_info=None):
if not self.should_filter(status, headers):
return start_response(status, headers, exc_info)
else:
sent[:] = [status, headers, exc_info]
return written_response.write
app_iter = self.app(environ, replacement_start_response)
if not sent:
return app_iter
status, headers, exc_info = sent
try:
for chunk in app_iter:
written_response.write(chunk)
finally:
if hasattr(app_iter, 'close'):
app_iter.close()
body = written_response.getvalue()
status, headers, body = self.filter(
script_name, path_info, environ, status, headers, body)
start_response(status, headers, exc_info)
return [body]
def should_filter(self, status, headers):
print headers
def filter(self, status, headers, body):
raise NotImplementedError
class HTMLFilter(Filter):
def should_filter(self, status, headers):
if not status.startswith('200'):
return False
for name, value in headers:
if name.lower() == 'content-type':
return value.startswith('text/html')
return False
class PisaMiddleware(HTMLFilter):
def filter(self, script_name, path_info, environ, status, headers, body):
topdf = environ.get("pisa.topdf", "")
if topdf:
dst = StringIO.StringIO()
pisa.CreatePDF(body, dst, show_error_as_pdf=True)
headers = [
("content-type", "application/pdf"),
("content-disposition", "attachment; filename=" + topdf)
]
body = dst.getvalue()
return status, headers, body
| apache-2.0 |
nanomolina/controlDeGastos | Icons/application_rc3.py | 6 | 36524 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: Wed Mar 20 13:44:05 2013
# by: The Resource Compiler for PyQt (Qt v4.8.4)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = b"\
\x00\x00\x03\x54\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xd6\xd8\xd4\x4f\x58\x32\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\xe6\x49\x44\x41\x54\x58\xc3\xd5\
\x97\xcd\x4e\x13\x61\x14\x86\xeb\x35\x94\x95\x7b\x71\xe1\xd2\xc4\
\xe0\x05\xb8\xe2\x0e\x5c\xb8\xf4\x02\x5c\xb1\x30\xea\x05\x18\x96\
\x26\x62\x58\xb8\xb0\x91\x58\x20\xd1\x9d\xbf\x89\xa4\x14\xb1\x52\
\xa4\x48\x45\x94\xfe\xd0\x02\x43\xff\xa6\x9d\x19\xa6\x65\x80\xe3\
\x79\x7b\xfa\x85\x51\x4a\x82\xc9\x21\x86\x49\xde\x9c\x33\xa7\xf3\
\xcd\xfb\x9c\xf3\x4d\x9b\x4e\x84\x88\x22\xff\x53\x91\x73\x01\xc0\
\xc7\xd5\x90\x6e\xff\xa5\xfb\xac\xc7\x3d\x3d\x64\x0d\xa9\x02\xf0\
\x31\x32\x3c\x3c\xbc\x6a\x34\x3a\x3a\xba\x19\x56\x3c\x1e\xaf\x26\
\x93\xc9\x56\x3a\x9d\x76\x13\x89\x44\x6b\x60\x60\x20\xcd\x6b\x6e\
\x68\x02\xa4\x38\xd2\xe1\xe1\x71\x99\xba\xef\xb7\xc9\xb2\x2c\xda\
\xdf\xdf\x27\x86\xf1\x78\xcd\x18\xeb\x8a\x1a\x40\x3f\xf3\xb0\x1c\
\xc7\xa5\x4c\x66\xb9\x0b\x14\x04\x01\xc5\x62\xb1\x3a\xaf\x7b\x70\
\x1a\x88\x53\x01\x1c\x1c\x10\x77\x77\xb2\x6c\xdb\xa1\xf9\xf9\xcf\
\x64\x0e\xd7\x75\xe9\xf9\xc4\x44\x17\x42\x05\x00\x26\x7b\xc1\xc9\
\xaa\x37\x1c\x4a\xce\xcd\x53\xf8\x70\x5d\x0f\x8b\x17\x54\x00\x82\
\x10\x40\x67\x4f\x14\xce\xed\xa6\x47\x1f\x67\x66\xe9\xf5\x9b\xb7\
\x14\x9f\x9c\xa4\xa9\xa9\x69\x7a\xf7\xfe\x03\x45\xa3\xd1\x65\x5e\
\x7f\x41\x05\xc0\xef\x10\xed\xb6\x25\x86\x85\x9a\xe3\x05\x94\x5d\
\xcd\xd1\xe4\xf4\x2b\x7a\x32\xfe\x94\x9e\xc5\x5e\xd0\x4c\x62\x0e\
\x8b\x17\x55\x00\xda\x81\x18\xf5\x13\x20\x3c\xff\x90\x6a\xcd\x36\
\x15\x37\xab\x94\x2f\x6e\x53\x89\x63\x8d\xb7\x85\xd7\x7e\x51\x01\
\xf0\x79\xcc\xcd\x5d\x1e\xb5\xc7\x7b\xdb\xee\x9f\x3b\xbe\xe4\x88\
\x5d\xb8\xbd\xee\xe2\x94\xca\x33\xe0\x75\xe4\xc6\x75\x57\x62\xd8\
\x10\x39\xea\xe6\x33\x44\xd4\x01\xa7\x06\xe0\xf4\x3a\xad\x39\x22\
\x98\x98\x68\x72\x80\x98\x6b\x50\x53\x9d\x00\x00\x2a\x2d\xb9\x31\
\xe2\x4e\x53\x8c\x10\x0d\x04\xf2\x6d\xfb\x28\xb6\x7c\x45\x00\x9b\
\x3b\xdb\x6a\xfc\x69\x8e\x3c\x6c\x88\x1a\xae\x39\x13\x80\x3a\x8f\
\xb7\x54\x23\x2a\xd7\xc5\x04\x06\x06\x00\x35\x28\x9c\x17\xab\xbc\
\x25\xbb\xca\x13\xc0\x4d\x61\x0e\x15\x2a\x72\x6e\xcc\x7e\x5a\x02\
\x68\x6a\xdd\xad\xf1\x94\x27\x00\x53\xdc\x1c\x71\x6d\x5b\x40\x60\
\x9a\xab\x1c\x75\x9e\xeb\x81\x41\x15\x47\x11\xc0\x6a\x89\x31\x0c\
\xd6\x77\x04\x20\x0c\x64\x26\x62\xb6\x69\x75\x8b\xa8\xaa\x09\x50\
\xb6\xc5\xbc\xd0\x03\xf8\xbe\x29\x63\x87\x29\x60\x0c\x18\x84\x1c\
\x00\x5b\x4d\x45\x00\x74\x03\x53\x98\xad\x94\xc5\x1c\xe7\x46\xe6\
\x1c\x00\xc8\x71\x5d\xa9\xa1\x08\x80\xfd\xfc\x56\x12\x73\x33\x01\
\x08\x35\x18\x42\xe8\xda\x7c\x8e\x29\xa8\x4e\x00\x5b\x00\x03\xc8\
\x98\x67\x36\x04\x00\x32\xe6\x85\xde\xf8\x17\x0b\xfc\x2c\xd8\x8a\
\x00\x18\x67\x3a\x4f\xb4\x54\x14\x23\x98\x02\x00\x02\x0c\x3e\xfb\
\xc5\x53\x28\xf0\x43\xb8\x66\x49\xf7\x6b\xf9\x52\x87\xd7\xbe\x54\
\x01\xc8\x55\x8f\xba\x4e\xad\x4b\x0e\x90\xaf\x85\xde\xb7\xc2\x92\
\x3d\x4f\xa6\xb3\xde\xa3\xb1\x71\xeb\xda\xd0\xf5\x15\x98\xb3\x6e\
\xa9\x00\x6c\x34\xa4\x6b\x18\xff\xe0\x11\x7f\x5a\x17\x53\xd4\x13\
\x0b\x59\x6f\xe4\xee\xbd\xe2\xa5\xc1\xcb\x4b\x7c\x6d\x8c\x75\x87\
\x35\xa8\xfa\xb7\x1c\xdd\x65\xd9\x3c\x8f\x1f\x19\xfe\x9e\xcf\x1e\
\x37\xbd\xc9\xba\x78\x26\x6f\x46\x00\x68\xf2\xff\x81\x99\x94\x9e\
\xe9\x3f\xbf\x19\x01\x42\xd3\xf4\xfc\xbd\x9c\x9e\xa5\x7e\x03\x51\
\x6c\x25\xa1\x92\x95\x0a\x77\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x05\x3a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xd6\xd8\xd4\x4f\x58\x32\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x04\xcc\x49\x44\x41\x54\x58\xc3\xb5\
\x97\x5d\x4c\x5b\x65\x1c\xc6\x77\x6f\xbc\xd9\xe5\x12\x49\x20\x71\
\xd7\x26\xe3\x4e\x13\xb8\x70\xd1\x85\x44\xbd\x50\xe3\x10\x18\xe5\
\x2b\x2e\x26\x4a\x04\x27\x86\xaa\x8b\x99\xe0\xd0\xa2\x6c\x19\x86\
\x39\x17\xdc\x1a\x16\x98\x80\x40\x6c\xa6\x43\xca\x20\x2b\x83\x1e\
\x28\xcc\xda\xd1\x96\xd2\xd2\x4a\x7b\xfa\x01\xa5\xd0\xef\x16\x1e\
\xdf\xff\xdb\x1d\xc7\xcc\x04\x2a\x87\x93\x3c\x39\x6f\x21\x9c\xe7\
\xf7\x3c\xef\x47\x0f\x87\x00\x1c\xca\x46\xcf\xbd\xfa\xe9\xbb\x4c\
\x5a\x26\x61\x0f\x6a\x60\xca\xd9\xe9\x79\xd9\x9a\x3f\x5d\x50\xf2\
\xa5\xc1\xe9\x8f\xa7\x57\xc3\x40\x30\x02\x84\xa2\x19\xad\xc7\x32\
\x8a\x27\x81\x58\x22\x73\xbf\x79\x6b\xda\x4b\x10\x72\x02\x1c\x7b\
\xe7\xac\xda\x1c\xd8\xc8\x98\x12\x40\x84\x99\x85\xe3\x19\x91\x31\
\x29\x1a\x4b\x61\x25\x94\x44\x38\x9a\x42\x73\x87\xc6\xbe\x13\xc4\
\xff\x02\x90\x12\x93\x79\x24\xf1\xc8\x58\x92\xcf\x1f\x84\x5d\x8c\
\xc2\xe5\x09\x22\x12\x4b\xa3\xf4\xc3\xef\x4d\x34\x75\x59\x01\xb0\
\xeb\xd8\x36\xd5\x90\x9e\x3a\xfc\xcc\xb9\xe7\x5f\x2e\x11\x3f\x56\
\x9e\x45\x45\x55\x0d\x2a\x99\xde\xaf\xad\xc3\x9d\xb1\x89\xc7\x00\
\xac\xb6\x25\xfc\xb9\xe8\x87\x6b\x15\x58\xf6\x04\x10\x08\xc6\xd2\
\xaf\x9c\xbe\x70\x9f\x41\x1c\xd9\x15\x80\x5d\x87\x99\x1a\x8a\x8a\
\x8a\xcc\x92\x5a\x5b\x5b\xdd\xa4\xaf\x55\xad\xfe\xaf\x54\xdf\xa6\
\x06\x06\x06\x31\x39\x35\x85\xd9\xb9\x39\xe8\x26\x26\x50\x50\x50\
\x80\x21\xcd\x6f\x7c\xde\x49\xa6\xf9\x05\xcc\x98\x5c\x1c\xc0\xe1\
\x4f\x41\xf4\x85\xf0\x43\xaf\xce\xcd\x00\x6a\xf6\x02\x50\x43\x66\
\xd8\xe5\x8a\xc7\xe3\xf0\x7a\xbd\x48\xa7\xd3\x98\x9c\x9c\x44\x65\
\x65\x35\x66\x67\x8d\xbc\x81\x07\x66\x1b\x74\xd3\x16\x0e\x40\x32\
\x2d\x78\xf0\xdd\x8d\x51\x8f\xac\x00\xe1\x70\x18\x46\xa3\x91\x8f\
\x53\xa9\x14\x7e\xea\xed\x45\xe3\x27\x9f\x61\x86\x41\x38\x96\xdc\
\x50\x77\x75\xe3\x4c\x43\x23\xce\x35\x9d\xc7\xed\x91\x71\x5c\xbc\
\x3e\x2c\x2f\xc0\xc6\xc6\x06\xf4\x7a\xfd\x63\x40\x7d\x7d\xfd\x50\
\x32\x88\xd0\x46\x1c\x66\x9b\x0b\x82\xc1\x88\xa9\x19\x13\xac\x0e\
\x11\x97\xba\x64\x6e\x80\x00\xa6\xd8\x3a\xd8\x7e\x45\x22\x11\x94\
\x2b\x2a\x30\xae\x13\x40\xe7\x04\x6d\x57\xda\xaa\x34\xbe\x7c\x53\
\xe6\x35\x40\x66\x3a\x9d\x0e\xc3\xc3\xc3\xe8\x65\xf5\xf7\xf7\xf7\
\x43\xab\xd5\xa2\xaa\xba\x06\x63\x77\xf5\x90\x0e\x2a\x77\x90\xed\
\x04\xb6\x0e\xda\xbb\x65\x06\xa0\x79\xb7\xdb\xed\x18\x1a\x1a\x42\
\x67\x67\x27\x7a\x7a\x7a\x38\x50\x49\x69\x19\x6e\x69\xf5\x10\xd7\
\x00\x6f\x08\xb0\xf9\x00\x67\x00\xb8\xd0\x25\x33\xc0\xd6\xd6\x16\
\xdf\x09\x81\x40\x00\xa2\x28\xc2\xef\xf7\x63\x6d\x6d\x0d\xa7\x14\
\x95\xd0\xfc\xae\xe7\xa9\xc9\x7c\xc1\x0b\x98\x3d\x40\x9b\xdc\x00\
\xdb\x41\x36\x37\x37\xf9\x76\xa4\x56\x14\x15\xd5\xe8\xfb\x55\xe0\
\xa9\x1d\x81\x47\x00\xe7\x3b\x0f\x00\x80\xcc\x25\x80\x24\x33\x4f\
\x24\x12\x28\x2b\xaf\xe2\x00\x7f\xb8\x00\x8b\x98\x01\xa0\x36\x5a\
\xd5\x07\x30\x05\xff\x98\x27\x93\x3c\x3d\x4d\x49\xc9\xa9\x4a\x0e\
\xa0\xb7\xb3\x03\x89\x3d\xc5\xf8\x17\x30\xb1\x00\x7c\x71\xf5\x00\
\x00\xa4\xea\xc9\x98\x14\x8b\xc5\x50\xa6\xa8\x82\x7a\x48\xc0\x98\
\x19\xb8\x6b\x05\xe6\x9c\x99\xfb\xe7\x57\x64\x04\x90\xd2\x53\x6a\
\x02\x88\x46\xa3\xdc\x3c\x14\x0a\xa1\xb8\xb4\x02\xd7\x06\x05\xdc\
\x66\x87\xe4\xa0\x01\x1c\x64\xc4\x04\x28\x3b\x64\x06\x48\x3d\x9c\
\x73\x12\x99\xd3\xb9\x40\x20\xc5\x65\x55\xb8\xd8\x2d\xa0\x7f\x3a\
\x63\xae\x7d\x90\x69\xe0\xa3\x76\x99\x00\xfe\x5d\x3d\xa5\x26\xad\
\xae\xae\x72\x88\xb7\x4a\x2a\x70\xb9\x57\xc0\x3d\x1b\xb8\x7e\x9e\
\x01\xee\xcc\x03\x67\x2e\xed\x13\x40\xaa\x9d\x44\x8b\x8e\x92\xd3\
\x71\x4c\xdf\x01\x2b\x2b\x2b\x58\x5f\x5f\xe7\x10\x27\x59\x03\xdf\
\x74\x09\x50\x4f\x00\xbf\xcc\x65\x1a\xb8\x32\x06\x34\xec\xa7\x01\
\xc9\x58\xda\xeb\x64\x4e\x69\x29\x39\x1d\x44\x04\x40\xf5\xd3\xcf\
\xde\x7c\x5b\x81\x96\xeb\x02\x4f\x7e\x75\x1c\xb8\x71\x0f\xf8\x71\
\x2c\x9e\x7e\xbd\x4e\x6d\xa6\x37\xaa\xac\x00\x9e\x64\x2c\x6d\x37\
\x32\x25\x00\xd1\x23\xf2\xe4\x12\xcc\x1b\x27\x15\x68\xef\x11\xa0\
\xbc\x66\x5b\x7f\x4f\x35\xe2\x3c\x71\x9a\xbf\x8e\x69\xf7\xfc\x4a\
\x26\x01\x90\xa9\x24\x69\xb5\x53\x42\x32\x0f\x06\x83\x70\xb9\x5c\
\xdc\x90\x5e\x4a\xe8\xb3\xc7\xe3\x81\xdb\xed\xc6\xf1\x13\xaf\x25\
\x9f\x7d\xa1\x9c\x4c\x3b\x98\x8a\x99\x8e\x3e\xc9\x78\x47\x00\x95\
\x4a\xc5\x01\xa4\x15\x2e\xcd\x37\x19\x52\x52\x3a\xf7\x29\xb5\xc3\
\xe1\xe0\x22\xe3\xc5\xc5\x45\x0e\xf5\xe2\xf1\x97\x5c\xf4\x1e\xb9\
\x93\xe9\xae\x00\x2d\x2d\x2d\x6e\xe9\x60\xa1\xd4\xd2\x97\x0d\x8d\
\x97\x97\x97\xe1\xf3\xf9\x60\xb3\xd9\xf8\x7d\x69\x69\x89\x43\x10\
\x00\x8d\x0b\x0b\x0b\xcd\xb2\x00\xd0\xa2\x92\x52\x93\x11\x8d\xe9\
\x4e\xdf\x78\x54\x3b\x35\x60\xb5\x5a\x79\xf5\xd4\x0a\xfd\xce\x60\
\x30\x24\xf2\xf2\xf2\xee\xb3\x67\x1c\xd9\x17\x40\x53\x53\x93\x5b\
\x9a\x67\x4a\x4f\x22\x13\xaa\x9a\xc6\x16\x8b\x99\x37\x40\x9f\x47\
\x47\x47\x23\x6d\x6d\x6d\xde\xfc\xfc\x7c\x13\xfb\xdb\x41\xa6\xb2\
\xbd\x9a\xff\x27\x40\x73\x73\x33\x9f\x02\x4a\x47\x10\x54\x3f\x55\
\x3f\x3f\x3f\xcf\xeb\xd6\x68\x34\x91\xba\xba\x3a\xe7\xc3\xb4\x5d\
\x4c\x1f\x30\x1d\xcd\xc6\x78\x47\x00\xa5\x52\xe9\x76\x3a\x9d\xbc\
\x62\x4a\x4a\x6f\x3e\x94\xb4\xbe\xbe\xde\x99\x93\x93\x23\x99\x16\
\x67\x53\x75\x56\x00\x8d\x8d\x8d\x6e\x8b\xc5\x82\x81\x81\x81\x48\
\x6d\x6d\xad\x33\x37\x37\x57\x56\xd3\xdd\x00\xf8\x7f\x46\x4c\xc2\
\x41\x99\x6e\xd7\xdf\x43\x39\x56\x18\x85\x70\xc8\x04\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\x2b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xd6\xd8\xd4\x4f\x58\x32\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x04\xbd\x49\x44\x41\x54\x58\xc3\xed\
\x57\x6b\x4c\x93\x57\x18\x3e\x23\x71\xc9\x32\xe9\x16\x97\xa8\x54\
\x65\x38\x9d\x02\x15\xf6\x03\x87\x32\x93\x01\x66\x2c\x5b\x70\xc4\
\x30\xff\x60\xa2\x2e\x1a\x3a\x1d\x4e\x03\xba\x31\x89\x5b\xb3\x80\
\xd9\x0c\x84\x02\x19\x58\x1c\x14\x8b\x85\xb2\x82\x95\x5e\xe4\x66\
\x0b\x8e\x31\xf8\xc3\x46\xcb\x2d\x81\x15\xdc\xa8\xc2\x1c\x1b\xb7\
\x6a\x69\x91\xf2\xee\xbc\x87\xaf\x0c\xdc\xb8\x0d\x61\xd9\xb2\x93\
\x3c\xed\x97\xf3\x7d\xfd\xde\xe7\xbc\xef\xf3\x5e\x4a\x00\x80\xfc\
\x93\x20\xff\x0a\x02\x74\x09\x28\x44\x14\xd9\x14\x71\x14\x01\x2b\
\x46\x80\xae\xdd\x64\xdd\xc6\x66\x22\x4c\xf8\x95\xc4\x8b\x47\xc8\
\xa1\xd3\xf7\xc8\x8e\x97\x3b\x38\x32\x61\x2b\x41\x20\x85\x9c\xbe\
\x30\x48\x2e\xdd\x80\x19\x40\x32\xab\x79\x4d\xf4\xbe\xfb\x72\x13\
\x68\x64\x06\x91\x04\x5e\xa3\x51\xf4\x06\xee\x85\x47\xf5\xd0\xbd\
\x83\xcb\x4d\x20\x9b\x9d\xf6\x40\x74\x2f\xbd\x16\x32\x3d\x20\x89\
\x3f\x48\xa5\x2c\x1b\x01\x8c\x31\x79\xc1\xbb\x9d\x88\x4b\xc6\xd7\
\xc6\x26\x0e\xa0\x10\xb9\xfd\x42\xfe\xc5\x2b\x36\x46\x8c\x12\x5c\
\x4e\x02\x93\xa7\xa7\xa7\x0d\xcc\xd3\x39\xb9\x98\x63\x36\x14\x0a\
\xd2\xe4\xa3\x2b\x41\x20\x8c\x29\x9e\x2a\xdf\x37\x47\xeb\xdc\x7b\
\xb5\xcc\x89\x9e\x40\x44\x96\x54\x83\x2b\x2c\x0b\x36\x46\x48\x08\
\x13\xf5\x64\x2a\x7b\x2e\x54\x03\x01\xf8\x03\x37\xbf\xc0\x0e\x34\
\x2a\x54\xdf\x62\x88\x52\xd5\x2c\x58\x03\x74\x1d\x16\x08\x04\x7a\
\x45\x55\xf5\xc8\xa0\x6d\x74\xc2\xd4\x73\xf7\x21\xbe\x73\x51\x95\
\x90\xae\x8f\xd0\x13\xcf\xe5\x94\x83\x87\xb4\x02\x9e\xcc\x2e\x03\
\xd4\x06\xdd\xaf\x99\xcb\xb0\xaf\xaf\xaf\x3e\xbf\xd2\x60\xb5\xdb\
\xed\x80\xf8\x79\xe4\x3e\xc4\x5e\xab\xb4\xb9\x88\x2f\x86\x80\x27\
\xd3\xc0\x67\xf9\x8e\x19\xf5\x60\xd7\x5e\x33\xba\x76\xda\x73\xee\
\x68\xd8\xc7\xc7\x47\x9f\xab\xab\xb0\x0e\x0f\x0d\xc1\x10\x87\xb2\
\xf6\x2e\xe7\x96\x37\xf7\x77\x73\x61\xd8\xbd\xe8\x5e\x80\x2f\x66\
\x9a\xa0\x86\xdf\xa9\x36\x42\xf7\xf0\x03\xd8\x19\x9f\xd4\xcf\xa5\
\xe7\x1a\x8a\x98\x2d\x7e\xfe\x6d\x97\x54\x1a\x6b\x5f\x5f\x1f\xb8\
\xd0\xd1\x73\x07\x62\x72\x15\x56\x4e\xc4\x87\x97\xd4\x8c\x30\x14\
\xe9\x15\xb7\x1e\x38\x1c\x0e\x40\xa4\xd6\x19\x31\x9e\x85\x9b\x05\
\x7e\x6d\xa9\x25\x1a\x5b\x97\xd9\x0c\xe6\x2e\x0a\xf3\x24\x14\xdf\
\x36\x8e\x7b\xbd\x1e\xd1\xcd\x42\xc8\x09\x6f\xa9\x04\x3c\xd1\xbd\
\x56\xab\x15\x10\x77\x7f\x1b\x84\xf3\x92\x5c\xbb\x52\xa9\x84\xfa\
\xfa\x7a\x30\x99\x4c\x0c\x75\xdf\x35\xc1\x51\xb1\x64\x18\xc9\x51\
\x44\x3e\xb6\x76\xcc\xb4\x40\x4f\x93\x5f\x7e\xd3\xd6\xdf\xdf\x0f\
\x32\x99\x0c\x44\x22\x11\xa8\x54\x2a\x90\x4a\xa5\xa0\xd1\x68\x20\
\x4b\x5b\x39\xbe\xe9\x95\xe0\x1f\xb8\x53\xaf\x79\x2c\xf3\x00\x97\
\x8e\x22\x9e\xc7\x86\xe6\x53\x29\x19\xf6\x82\x82\x02\xe6\xe2\xa0\
\xa0\x20\xe0\xf1\x78\x60\xb1\x58\x40\x5b\x5e\x01\xfb\xcf\x26\x0c\
\x2d\xa6\x53\xce\x67\x94\xcf\x09\x4c\x83\xe2\x5b\x7b\xe6\xc2\x60\
\x9a\xb2\x14\x14\x0a\x05\x88\xc5\x62\xc8\xcc\xcc\x84\xa2\xa2\x22\
\x50\xab\xd5\xd0\xd9\xd9\xc9\x60\xec\xfe\xc9\xb9\xc9\xdb\xa7\x75\
\x2e\xb7\xcf\x4b\x80\xae\xb7\xd8\x29\x70\x0e\xc0\x6a\x97\xac\x78\
\x88\xca\x7f\x82\xe2\x29\x89\x0e\x3e\x97\x2b\x21\x5b\x96\x0f\x07\
\x63\xe3\x47\x84\x1f\x26\xd8\x92\x72\x64\x8e\x6f\x1a\xbf\x07\xa3\
\xd1\x08\x2d\xad\x2d\xf0\xcb\xc0\x20\x1c\x38\xf1\xbe\x05\xb3\x62\
\xc1\x04\x5c\x69\x84\x85\x85\x84\x46\xdc\x26\xe7\x32\xac\x2c\xcf\
\x33\xb5\x13\xec\x3b\xe3\xba\xd3\x33\xaf\x82\xe5\xfe\x7a\x89\x06\
\x9e\xde\xfc\x62\x1b\xf7\x3c\x92\x8d\x7b\x66\xab\x4f\x5b\xca\x35\
\xed\x58\x43\x43\x3d\x34\x34\x34\x80\xa5\xb7\x17\x32\x14\xc5\xc3\
\xf3\xe9\xc0\x65\x3c\x92\xe5\x28\x9e\x36\x5d\xe5\x9c\x2a\x32\x78\
\x7d\xf4\x83\x2e\x5a\x6c\x12\x31\x0c\x1b\x25\xea\x71\xf7\x2f\xcb\
\x27\xef\x05\x87\x5f\xfe\xd3\xe4\x44\x0b\x4c\x68\xf4\xc9\x3e\x75\
\x95\x1e\x0c\x06\x03\xb4\xb7\xb7\xc3\xd7\xc6\x96\x31\xae\x81\x09\
\x66\xf1\x36\x6d\x38\x68\x3c\x49\x3a\x3a\x65\xf8\x62\x81\x83\x44\
\xbd\x57\x43\xb6\x0a\x5e\x9b\x2a\xc3\x94\x5c\xb0\x42\x0f\xab\x24\
\xb4\x04\x9f\x4a\xaa\x9b\x43\x37\x31\x28\xd4\x4f\xf2\x0a\xc7\x74\
\x3a\x1d\xd4\xd6\xd6\x82\xc9\x7c\xdb\xb9\x61\x9b\xf7\x5f\xea\x62\
\xb2\xe5\x7e\x9c\x75\x1f\x0d\xf3\xb2\xd4\x4e\xf2\xf6\xb1\xeb\x2e\
\xb6\xae\x94\xc3\x90\x6c\x97\x55\xc1\x4b\x57\xab\x80\x9c\x4d\x6e\
\x5a\xd0\x1c\x49\xbd\xb1\xe7\x88\xb0\xef\xca\x57\xc5\x50\x5a\x5a\
\x0a\x1d\x3f\xf6\x4c\x04\x06\x87\x74\x3c\xaa\x0b\xc2\x84\x46\x8d\
\x07\xc8\x6f\x02\xd9\xf9\xaa\x7e\x9a\xf1\x30\x46\x8e\x36\x20\xaf\
\xbc\x4a\x78\x43\x69\x00\x92\x28\x1d\x98\xcd\x95\xb3\x79\xc3\x7d\
\x3d\xbf\xf9\x44\x6a\xa6\x5d\x2e\x97\x43\x53\x4b\x2b\x44\x1c\x7b\
\xf7\xce\xf4\x14\x25\xae\xf1\x8a\xf5\x77\x9c\xf5\x70\x02\xc2\xd9\
\x0f\x89\xd1\x81\x03\x4f\x8e\xf7\xdc\xd2\x69\xe7\xf3\xdf\x75\xfc\
\x6f\x14\x2e\x36\xd2\xef\xd8\x17\x69\x49\xbe\x2c\x9d\xc8\xd3\x96\
\x3b\xa7\x0f\x31\x8c\x25\xc6\xdf\x9f\xba\x77\x5f\x71\x35\xa0\x41\
\x6c\xb5\x08\x8c\xf9\x94\xf1\xe0\xf0\x33\x4b\x9a\x7c\x68\x13\x5a\
\xbd\xce\xa3\xd9\x6b\x4f\x48\xf7\x0c\x0f\xb0\x0f\xfe\xf3\x87\xc8\
\xf9\x2f\xee\xb9\x49\x6e\x00\xf6\x7b\x3e\xed\xf7\x08\x1e\x2a\x3e\
\x5d\xe5\x58\xaa\xf1\x47\x5a\xf5\xb6\x59\x0b\x11\x1d\xb3\x43\xc9\
\x91\x38\x09\x39\xf9\xa9\x96\x21\xfa\x5c\x1a\x0d\xcf\xb3\xff\xff\
\x37\xfc\x4f\x13\xf8\x1d\xe7\x87\x19\xb9\x44\xc3\x01\xcf\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\xa3\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xd6\xd8\xd4\x4f\x58\x32\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x04\x35\x49\x44\x41\x54\x58\xc3\xe5\
\x97\xcd\x8f\x54\x45\x14\xc5\x7f\xb7\xea\xd6\x7b\xaf\xdb\x6e\xc7\
\xf9\x40\x9d\x89\x46\x4d\x34\x99\x44\x8d\x1a\x48\x98\xc4\x8c\x1f\
\x1b\xfe\x02\x4c\x5c\xf1\x07\x18\x16\x2e\x4d\x5c\x6b\x58\xc3\x8e\
\xc4\x8d\x1b\x17\xce\x82\x68\x74\x41\x5c\x18\x0d\xe2\xc4\xc6\x00\
\x3d\x60\x50\x51\x19\x60\x02\xa2\x0e\x0c\x83\xd3\xfd\x5e\xf7\x94\
\x8b\xaa\xee\xf9\x60\xe6\x0d\x84\x51\x16\x56\x52\xa9\xce\x7b\xb7\
\xeb\x9e\x3a\xf7\xd4\xa9\x7a\xea\xbd\xe7\x7e\x36\xe5\x3e\xb7\x3e\
\x80\x5d\xbb\x76\xbd\x03\xec\xfd\x8f\xf2\x4e\x35\x1a\x8d\x03\xeb\
\x19\xd8\xbb\xef\xbd\xa3\x3b\x1f\x1f\x76\x00\x9c\x3c\x3a\xcf\xcc\
\x97\x37\x58\x9c\xef\xdc\x53\xa6\xda\xa0\xf2\xdc\x6b\x03\xbc\xb8\
\x67\x10\x80\x8b\x7f\x16\x7c\xf8\xee\x1e\x80\xdb\x00\x70\xfc\xec\
\x1c\xdf\x3f\x30\x04\x78\x2e\xfd\xb8\xc0\xfe\xb7\xce\x6f\xcb\x72\
\x0f\x1d\x79\x9a\x0b\x23\x96\xd3\x9f\x1f\x64\xfc\xd5\x7d\x9b\x6b\
\x40\x45\xb0\x16\x40\x78\x70\x2c\x23\xcb\xb2\x6d\x01\x30\x30\x96\
\x61\x8d\x50\x1b\x7c\x14\x23\x25\x22\x14\x2b\xd8\x18\x91\xd5\x95\
\x73\xe7\xce\x83\x2a\xb8\x04\xd2\x14\xb2\x0c\xd2\x2c\x8c\x49\x0a\
\x49\x12\xde\x77\x3a\x90\xe7\x90\xb7\xa1\xd5\x82\x76\x2b\x8e\x6d\
\x28\x72\xb2\xfa\x38\xd6\x0a\xe3\xaf\xbc\x49\x6b\xf1\xfa\xe6\x00\
\xac\x15\xac\x15\x04\xb0\x46\xd8\xbd\x7b\xe7\x16\x6b\xeb\x86\xae\
\x80\x5a\xa8\x56\x81\xea\x6d\x51\x8d\xaf\x04\xb5\x82\xf7\xa0\xa6\
\x84\x01\x67\x05\x35\x82\x08\xa8\x0a\x95\x2c\xc3\x23\x20\x1e\x08\
\xc0\xf0\x1e\x2f\x02\xde\x23\x12\x26\x15\x7c\x88\x23\xc4\x21\x1e\
\x3c\x21\x5e\x40\x4d\x58\x18\x40\xd7\x4a\x89\x06\xac\xa0\xda\x63\
\x00\x9a\x33\xbf\x05\x8a\x53\x07\x69\x02\x95\x04\xb2\x34\xf6\x04\
\x12\x07\x4e\xa1\xe8\x40\x5e\x40\x2b\x8f\xbd\x05\x4b\x39\xb4\x73\
\xc8\x0b\x54\x87\x71\x3d\x00\x2a\xe5\x25\x70\x31\x40\xd5\x30\x39\
\xf9\xd2\xd6\x0a\xf3\x3e\xd0\xaf\x16\xaa\x1b\x8b\xf6\xd8\x27\x61\
\x61\xbd\x1c\x25\x25\x20\x00\xf0\x81\x8d\x34\x4d\xa3\x3a\xc3\xb3\
\x98\x11\x89\x6c\x07\xda\x63\x09\x56\x98\x5f\x29\x46\xfc\x61\xcd\
\x72\x7f\x61\x1d\x2d\xd1\x80\x3a\x09\x54\x49\x18\x4f\x34\x2f\xe0\
\x9d\x85\xc4\x21\x89\xc3\x67\x09\x92\x69\xd8\x11\x89\xe2\x13\x87\
\x58\x8b\xef\x76\x91\xbc\x80\xbc\x03\xed\x02\xdf\x6a\x23\xed\x02\
\xf2\x02\x9f\x77\x50\x1d\x45\xd5\x20\x78\x3a\xeb\x54\x78\x9b\x06\
\x9c\x33\x78\x0f\x03\x8f\x24\xbc\xfe\xf2\xf3\x77\x68\xe8\x36\x68\
\xa4\xbe\xf1\xeb\xc6\xfc\xdf\xb1\x04\x52\x5e\x82\x44\x4d\x5f\x84\
\x8f\x0d\xa5\x38\xe7\xb6\xc5\x88\x9e\x18\x4b\xb9\x76\xb3\x03\x08\
\x9d\x52\x11\xaa\x90\xb8\x50\xef\x5a\xc5\x30\x7d\xb1\xcb\x40\xc5\
\xb0\x0e\xf4\x26\xad\x57\xf9\x55\x2e\xe1\xe1\xc6\xd2\x32\xf5\xcc\
\x70\x7d\xc9\x84\x2d\xe9\x4a\x19\x10\x9c\x1a\xc0\x73\xe5\x66\x97\
\x2b\x37\xbb\xac\x51\x57\x3f\xd7\xaa\x64\x7e\xc5\x27\xa2\x29\xac\
\x05\x15\xc3\x9c\x0b\xb5\x77\xa6\x6c\x17\xa8\xc1\xa9\x20\xc8\x1a\
\x35\xaf\x9b\x35\x1a\x8f\x59\x31\x9e\xfe\x7b\xe9\xef\x14\x00\xf1\
\x82\xef\x9b\x58\x30\x2b\x57\x56\x02\x55\x21\xd1\x90\xfc\xe7\x53\
\xdf\xf2\xeb\x99\x13\x2c\x2d\xde\xb8\xa7\xfa\x57\x6a\x03\x3c\xf5\
\xec\x4e\x9e\x79\x61\x02\x0f\xa8\x33\x5b\x31\x10\x03\x7c\x87\xf7\
\xf7\xbf\xc1\xc2\xc2\x02\xb7\x6e\xdd\xa2\x28\x0a\x44\x04\x6b\x2d\
\xd6\x5a\x54\x15\x55\xc5\x39\x87\xaa\x62\xad\xc5\x98\xf0\xdf\xe5\
\xe5\x65\xf2\x3c\xef\xf7\x23\xcd\xf9\xb8\xf2\x2d\x18\x70\x56\x50\
\x17\x18\xdc\x31\x3a\xb6\x72\x4f\x38\x7e\x9c\xe9\xe9\x69\x8c\x31\
\x78\xef\x99\x98\x98\x60\x72\x72\xf2\x8e\x59\xd8\x31\x3a\xd6\xdf\
\x86\xae\xd4\x09\x55\x70\x36\xac\xa2\x56\xaf\xf7\x6b\x39\x33\x33\
\xc3\xd0\xd0\x10\xd6\x5a\xbc\xf7\x34\x9b\xcd\xbb\x02\x50\xab\xd7\
\x70\xd1\x88\xb4\xd4\x88\x14\x9c\x0b\x27\x5c\xa0\x2a\x00\xa8\x56\
\xab\x64\x59\xd6\xa7\xb8\x37\xde\x69\x73\x1a\xa9\x17\x41\x4b\xad\
\x38\x1e\xc7\xbd\x23\xb4\xd7\x8c\x31\x88\x44\xdf\x8f\x3a\xb8\xab\
\x9b\xaf\x35\xa8\x0d\xf3\xf6\x18\x2e\x3d\x8e\x83\x29\x6d\xe3\xd5\
\xdb\x12\xa9\xf7\xe5\x56\x6c\xad\xf4\x91\x0e\x8e\x0c\xc3\xf2\xef\
\xdb\x02\xe0\xa1\x91\x61\xd4\xc2\xb5\x2b\x97\x59\x9c\xbf\xbe\x05\
\x03\x36\xf8\xc0\x60\xad\x02\x0b\xdb\xc3\xc0\x50\xad\xc2\xec\xc5\
\x4b\x9c\xfd\xee\x1b\xce\x9f\x9c\x9e\x03\xa6\x36\x04\x60\x24\x5e\
\x4a\x05\x12\x0b\xed\x91\x27\xa9\x3d\x0c\x6f\x1f\x38\xc8\x66\xc7\
\x81\x27\x3a\xf1\x2a\xe7\x35\x1e\x32\x81\x14\x28\xba\x70\xf9\xea\
\x55\xce\x34\x8e\xd1\xfc\xfa\x8b\xb9\xd9\x1f\x4e\x1d\x02\x0e\x6f\
\x08\xe0\xb3\x8f\x3e\xe0\xa7\xd3\x27\x57\x99\xe9\xda\xa3\x86\x55\
\xe6\xbb\x1e\x04\x1b\x3c\x5f\x1d\x6f\x7c\x77\xee\x8f\xd9\x5f\x0e\
\x01\x87\x1b\x8d\xc6\x5f\x1b\x01\x98\x9a\xfe\xf4\xe3\x7f\xf5\x73\
\x6c\x7d\xf2\x35\x00\xe2\xb7\xda\x81\xff\xdd\xd7\xf1\x3f\x4d\xf0\
\x4b\xb9\xe8\x46\x89\xaf\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
\x00\x00\x06\x6d\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x06\x34\x49\x44\x41\x54\x78\x5e\xad\x97\x5b\x6c\x54\xc7\
\x1d\xc6\x7f\x73\xce\xd9\x8b\xbd\xf6\xfa\x16\xa0\xbe\x00\x0e\xb2\
\x69\x63\x24\x42\x4a\x21\x22\xa1\x2d\x95\x62\xa5\x2f\xee\x4b\x68\
\x2b\x95\xa6\x55\xa5\xc6\x60\x55\xaa\xda\xb4\xaa\xfa\x56\x09\x55\
\xca\x03\x94\x27\xda\x07\x84\x14\x29\xad\xc4\x8b\xa5\x52\x83\x79\
\x08\xc5\x18\x39\x0e\x69\xd3\x84\x9a\x9b\x63\x6a\xec\xb2\x04\x1b\
\x3b\xbb\xf6\x7a\x8f\xbd\xbb\xde\xb3\x67\xa6\xc3\x68\x85\xe5\x72\
\x6c\x88\xc9\x27\x7d\xfa\x9f\x9d\x87\xfd\x7e\xf3\x9f\x99\x73\x11\
\x4a\x29\x82\x24\x84\x78\x05\x78\x9e\xc7\x6b\x48\x29\xf5\x77\xd6\
\x28\x27\x20\xb8\x43\xbb\x01\x68\x97\x52\xbe\xc6\x63\x64\x59\xd6\
\x07\x1a\xf6\xbb\x40\xb7\x06\x39\xff\x14\x00\x26\xfc\xb7\xed\xf5\
\xe2\x60\x5d\x44\x44\x6e\xce\x89\x8a\x2b\x57\xae\x50\x5d\x53\x8d\
\x40\x00\xa0\x50\x08\x65\x28\x41\x29\x66\xd3\x69\x5e\xa9\x17\x2f\
\xbc\xb4\x4e\x6c\x3b\xf1\x1f\xb9\x47\x83\x7c\x5b\x43\x4c\x3c\x4d\
\x07\xf6\xff\x60\x8b\xdd\x2c\x25\xf8\x4a\x32\x3c\x3c\x4c\x65\x65\
\x25\x2b\xc9\x75\x5d\x1e\xc0\x6e\xa9\xb0\x22\x1b\xa2\x2a\x72\x3f\
\xa7\xea\x81\xb5\x03\x08\x2d\x05\x48\xa1\x0d\xf4\x5d\xbc\x48\x2e\
\x97\xc3\x2f\x16\x51\x4a\x91\xcf\xe7\x59\x5c\x5c\xa4\x50\x28\x50\
\xd4\x63\xb5\xb5\xb5\x94\x01\x58\x80\xf8\x82\xf6\x80\x01\x00\x36\
\x44\x05\x1f\x0f\xbc\x4b\x3e\x3b\x8f\x85\x44\x95\x32\xe2\xb6\xc4\
\xb6\x04\x21\x21\x70\x3e\x53\x6c\x8c\x3b\x80\x44\x2a\x04\xf0\x9c\
\x10\x02\xe0\xcb\x40\x05\x50\x0f\x34\x60\xc4\x48\x69\x9f\x24\x02\
\x01\x4e\x9c\x38\x21\x00\x81\x05\xd2\x87\x96\x96\x67\x09\x65\x6d\
\x14\xe5\x28\xa5\xb4\x41\x08\x58\x57\x19\x25\xe2\xd8\x44\x42\x16\
\xc3\x13\x73\x5c\xbc\x3d\x41\xf7\x58\x8e\x5c\x24\xbe\xa9\xbd\x7d\
\xf7\xef\x2d\xcb\x5a\xdc\xb1\x63\x47\x59\x55\x55\x95\xd3\xd8\xd8\
\x18\x7e\xe0\x86\x86\x86\xd0\xa5\x4b\x97\xdc\xae\xae\xae\x08\xf0\
\xd6\xaa\x1d\x00\x13\x44\x55\x2c\xc2\x73\xd5\x31\xf2\x9e\x4f\xa1\
\x28\x91\x4a\x61\x09\x41\xd8\xb1\x88\x86\x6c\xe6\x72\x05\x12\xa2\
\x8e\x3f\x9f\xff\x2b\x0d\x4d\x1b\x01\x22\xc0\x66\x96\x84\xef\xfb\
\x78\x9e\x47\x75\x75\xb5\x9e\x50\x4b\xf4\xea\xd5\xab\x87\x84\x10\
\x28\xa5\xde\x5a\x11\xc0\xb2\x41\x00\xb6\x2d\x90\xda\xb6\x14\x38\
\x08\xa4\x12\x58\xc2\x8c\x1b\x8f\x4c\xb9\xec\x7b\xf5\x3b\xd4\x37\
\x36\x11\x7c\x2f\xc1\x84\x67\x32\x19\xca\xcb\xcb\xcd\x66\x3e\x76\
\xec\xd8\x26\xbd\x7f\x0e\x2e\x41\x2c\x01\xd0\xd9\xd9\xa9\x0e\x1d\
\x3a\xa4\x6c\x21\x08\x59\x10\xb6\x2d\x1c\xc7\xc6\x42\x50\xb4\xcd\
\x1a\x1b\x00\xc7\xb2\x88\x38\x96\xae\x02\x60\x59\x78\x10\xc0\xdc\
\xdc\x1c\x35\x35\x35\x06\x20\x1a\x8d\x72\xe4\xc8\x91\xcd\xc0\x03\
\x88\x1b\x1a\xa2\xc7\x62\xb9\xb0\x6d\x74\x30\x66\x8d\xcb\x23\x36\
\xb1\xa8\xa3\xc7\x2c\x32\x8b\x1e\x93\x99\x1c\x63\xa9\x79\xee\xcc\
\x2e\xe8\xdf\x45\x72\xf9\x3c\xab\xc8\x2c\x41\x36\x9b\x35\xa7\x66\
\xe9\xff\x6d\x0e\x1c\x38\xb0\x1e\xe8\x00\x58\x06\xa0\xb4\x74\x16\
\x8e\x0d\xe1\x90\xc0\x53\x8a\xb1\xa4\xcb\x8d\x8c\x83\xd3\xb2\x97\
\xa6\x7d\xaf\xb3\xb5\xe3\x17\xac\xdb\xfb\x3a\x0d\x2f\xb4\x73\xfb\
\xce\x24\xfd\xfd\xfd\x24\x93\x49\x94\x52\xe6\xfa\xf8\xf1\xe3\xe8\
\xba\xac\x33\xe7\xce\x9d\xe3\xe8\xd1\xa3\x1c\x3e\x7c\x98\xde\xde\
\x5e\x12\x89\x84\x04\x2c\xa1\x15\xdc\x01\xed\xff\xce\xe6\xf8\xe7\
\x94\x4f\x6b\xc7\xcf\xf8\xe6\x2f\xdf\x26\xf6\xf5\x37\x99\x7c\xa6\
\x83\x6b\xfe\x2e\xae\xf1\x2d\x64\x6b\x17\xad\x7b\x7f\x4e\x5e\x56\
\x73\xfa\x6f\x67\xd1\x77\x4d\xee\xdc\x9d\xe2\x1b\xaf\x76\x72\xfd\
\xfa\x75\x03\xa0\x67\x6b\xd6\x3f\x16\x8b\x99\xeb\x78\x3c\x8e\xe3\
\x38\x25\x38\x04\xc0\x23\x00\x96\x25\x98\xca\x41\x3a\xde\xca\xfe\
\xdf\xbd\x4d\xd5\xae\xd7\x28\x84\x62\x08\xdb\x42\x59\x82\x6c\x41\
\x72\x7f\x66\x91\x4f\xee\x66\x18\xb8\xea\x72\xfa\x1f\x61\x64\xd5\
\x5e\xae\x8f\xdc\x67\x32\xd7\xc6\x85\x0f\xee\x9b\x00\xed\x87\xa1\
\xcd\xcd\xcd\xb4\xb5\xb5\x19\x37\x35\x35\xa1\xa1\x14\x20\x83\x1f\
\x46\x16\xdc\x71\x15\xdf\xff\xe9\x6f\xa8\x6c\xd8\x48\xe2\xec\x3b\
\x4c\x8f\x5e\xc3\x89\x94\xb1\xb5\x79\x07\x9b\x5b\xb6\xf3\x49\x79\
\x25\x63\x09\x97\xcf\x66\xf2\xdc\x9d\xce\x32\xa1\xed\x88\x0d\x4c\
\x27\xe7\xd8\xb7\x2b\xca\xfa\x25\x00\x33\x7b\x3d\x6b\xea\xea\xea\
\x00\xcc\x75\x2a\x95\x32\x00\x4a\x2b\x10\xa0\xb9\x5a\x70\xe1\x9d\
\x63\x28\x2c\xca\xe6\xc6\xd9\x10\x8f\x52\x94\x92\x7b\xc3\x7d\x24\
\x65\x05\xdb\xda\x7f\x4c\x4d\xdb\xcb\x7c\x3c\x9c\x66\xd2\x5f\xc0\
\xcd\x78\x2c\xcc\x6b\x2f\x78\x20\x00\xb5\x74\x3a\x42\xa1\x90\x09\
\x2d\xdd\xea\x1f\x8e\x01\x2a\xf8\x3e\x60\xc1\xc6\xb8\xa0\x50\x1c\
\x23\x1c\x8b\x53\xb7\xa5\x96\x92\x78\x76\x7d\x05\xe9\xac\xc7\x68\
\xff\x9f\x98\xae\xbc\x4c\xcb\xf6\x83\xb8\x0b\x61\xbc\x82\xa4\x58\
\x94\x78\xda\x21\xc7\x42\x2d\xaa\x80\xe3\x69\xa0\x96\xd5\x15\x01\
\x00\xd6\xc7\x43\x84\xca\x23\xfc\xbf\x6a\x63\x21\x9e\xa9\x0c\x73\
\xe1\xdf\x83\xec\xd9\xf9\x13\xca\xa3\x0e\xb9\x32\x47\x03\x28\x03\
\x61\x6b\x00\x16\x4b\x21\xa5\x1c\x25\x30\x2a\x15\xa4\x5c\x05\x40\
\x58\xa5\x2a\xcc\xf5\x23\xfa\x70\x6c\x86\xf1\x59\x8f\xef\xfd\xfa\
\x8f\xdc\xca\xd4\xe0\x44\x5c\xa2\x11\x1b\xcf\x93\x14\x3d\x07\xd3\
\x01\xa5\x90\x52\xf2\x50\x6a\x59\x01\x56\x05\x10\x08\x4c\x0d\x04\
\x18\x9d\x76\xf9\xd5\x5f\x86\x18\xbd\xb7\x80\x3d\x93\x67\xd3\xba\
\x32\xf2\x79\x5f\xbb\x68\xea\xce\xaf\xd4\x70\xf9\xdd\xe0\x25\x00\
\x9e\x78\x09\x4c\xb8\x10\x3c\xa2\xd6\x2f\x55\xf2\x87\x1f\x3e\xcf\
\xf5\x4f\x33\x44\x1b\xb7\xb1\xf3\xc5\x97\x59\x12\x5c\x4e\x60\x8e\
\xdb\x53\x01\x28\xc0\x12\x25\x00\x6d\xd4\x52\x7d\xb1\xb5\x96\xdd\
\x5b\xe2\x74\xbf\x97\xa5\x6a\xf7\x57\xf9\xd1\x1b\x6f\x10\xa0\xb5\
\x03\x98\xb5\x37\xd5\xd8\x08\x01\xd2\xcb\x53\x70\x53\x78\xf3\x33\
\x14\xb3\x69\x0a\x19\x1f\x25\xfd\xd5\x82\xd6\x08\xf0\xf0\x29\xe7\
\xe3\xe7\x33\x14\xe6\x75\xa8\x0e\xd6\x00\xcb\xf7\x89\x10\xc1\x33\
\x7d\xfa\xd7\x72\x8c\xb2\x13\x37\x03\xc7\x01\xb2\x1e\xfe\xad\x94\
\xcc\x6f\xf7\x44\x54\x03\xd8\x5f\x70\x07\x08\x92\x09\xfd\xd7\x3d\
\x3f\xfd\x7e\x42\xa6\xcf\xdf\xf6\xef\x02\xee\x76\x3b\xfc\x92\x06\
\xa8\xe3\x73\xca\x75\x5d\x1f\x70\x57\xed\x00\x40\x32\xab\x0a\x1f\
\x7e\x2a\xd3\xbd\xb7\xfc\xd4\xcd\x69\x39\x05\xf4\x03\x97\x74\x68\
\xbf\x10\xa2\xd3\xb6\xed\xaf\x7d\x9e\x25\x58\x58\x58\xf0\x07\x06\
\x06\xd2\x27\x4f\x9e\x9c\x06\xba\x83\x00\x3e\x1a\x49\xca\xad\xe3\
\xb3\x2a\xd7\x3b\xe2\xa7\x6e\x4c\xcb\xd1\x52\xe8\x59\x1d\x74\x8b\
\x00\x3d\x09\xc0\xd0\xd0\x90\xdb\xd3\xd3\x93\xd2\x4e\xcf\xce\xce\
\x9e\x2e\xbd\x1d\xdf\x08\x02\xe8\xee\xea\x29\x00\x8c\x04\x84\x06\
\x85\xaf\x08\x30\x35\x35\x55\xd0\x2f\x22\xa9\x53\xa7\x4e\x25\xc7\
\xc7\xc7\x2f\x03\x67\x81\x7e\x1d\xec\xae\xb8\x09\x4b\xdf\x76\xda\
\x4f\x26\x85\x01\x40\x08\x40\x61\x5a\xfc\xde\xe0\x60\xba\xbb\xbb\
\x3b\xa5\xdf\x8a\xcc\x24\xd0\x5e\xed\x73\xcd\x61\xed\x9a\x77\x33\
\x6e\x11\x60\x70\xf0\xfd\x74\x5f\x5f\x5f\xfa\xcc\x99\x33\xa6\xc5\
\xa5\xd0\x8f\x78\x02\x89\xb5\x9e\x63\x21\x44\x18\x78\x13\xd8\x4f\
\x69\x73\x06\xb4\xf8\xb1\xfa\x1f\xbd\xfa\x2a\x5f\xf2\xd8\x15\x9d\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x08\x19\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xd6\xd8\xd4\x4f\x58\x32\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x07\xab\x49\x44\x41\x54\x58\xc3\xad\
\x57\x5b\x50\x93\x67\x1a\xf6\xca\xce\xec\xcc\xf6\x62\x2f\xbc\xd9\
\xe9\xce\xec\x6e\xbd\xda\xd9\x9b\xb5\xce\xba\x3b\x7b\xb0\xad\xcc\
\x7a\xb1\xce\xce\x3a\xb3\x76\x54\x70\x75\xdb\xe2\x81\xd6\xb6\x54\
\x04\xbb\xa5\x20\x6d\xc1\x82\x06\x08\x07\x51\x42\x80\x80\x80\x02\
\x21\x81\x10\x92\x40\x48\x10\x73\x24\x21\x67\x72\x80\x04\x42\x20\
\x9c\x09\x47\xb5\x54\x78\xf6\xfb\x7e\x13\x16\x30\x58\x8b\x7d\x67\
\x9e\xf9\x2f\x92\xfc\xcf\xfb\x3e\xcf\xfb\xbe\xdf\x97\x5d\x00\x76\
\xfd\x98\x20\xf1\x0b\x82\x14\x02\x03\xc1\x75\x82\x03\xcf\xfd\xfe\
\x8f\x48\xbc\x9b\x20\xe1\x57\xaf\xef\xb5\x2a\x8c\xd6\x65\xdb\x02\
\x60\x19\x1e\x5b\x09\x27\xf1\x33\xfa\x19\x81\x22\xfc\xdc\x3e\x76\
\x48\x7e\x8a\xa0\xb9\xb6\x59\x1c\x32\xcf\xad\x42\x39\xfe\x1d\x44\
\xf6\x51\xd8\xc7\xe6\xe8\x87\x86\x3d\x7b\xf6\x58\x53\x52\xae\x2c\
\xca\x3a\x3a\x10\x4e\xe2\xe5\x49\xc3\xc4\x31\x04\xb7\x3e\x49\xf9\
\x2c\x60\x9b\x5d\x59\x53\x4d\x03\x4d\xb6\x11\x34\xeb\xfb\x20\x31\
\x79\x60\x19\x9d\xc5\xbb\xef\xbe\x3f\xc5\xab\xbe\x83\xf1\x89\x29\
\x4c\x4f\xcf\xae\x92\xef\xd7\xbc\x74\x02\x11\x9f\x0f\xbe\x1d\xe3\
\xb2\x04\x43\x4f\xb4\x33\x40\x8b\x7b\x06\xcd\x3d\x2e\x34\xeb\xec\
\xa8\x57\xf6\x20\x87\x53\x85\x32\x5e\x35\x43\xbc\xb0\xf4\x90\x81\
\xc1\x60\x5c\x26\xbf\x4b\x7c\xe1\x04\x48\x1c\x24\x38\x41\xfd\xdd\
\xea\x73\x27\xf1\xb9\x27\x04\x48\x87\x97\xc1\xd7\xbb\x20\x22\x55\
\x37\xdc\x37\xa2\xb8\x4e\x88\x2c\x56\x3e\xcc\x56\xdb\x3a\x71\x04\
\x2c\x16\x6b\x2c\xfc\xce\xe7\x27\x10\x91\x36\x93\x95\x3f\x46\x7d\
\xa5\xfe\x12\xc4\x6f\xf4\x59\x31\xb6\x02\x7e\xef\x20\x5a\x7b\x9c\
\xe0\x3f\x30\xa1\x4c\x28\x43\x46\x0e\x1b\xb2\x0e\xf9\x26\xd2\xf9\
\xc5\x65\xcc\x2d\x2c\x21\x34\xbf\x88\xbd\x7b\xf7\x5a\xc9\x3b\x7e\
\xba\x6d\x02\x24\x7e\x43\x90\x46\x3d\x35\x13\x69\x75\xb3\x80\xd2\
\x3f\x0f\xcb\xc4\xe2\x9a\x50\xa1\x5a\xb4\x6c\xf1\x59\xa0\xb6\xa0\
\xa6\x5d\x8d\x2f\xb2\x73\x71\xb7\x9e\xff\x0c\x31\x25\x9d\x09\xcd\
\x63\x62\x6a\x06\x83\x43\x81\x27\xe4\xdd\xbc\x2d\xd3\xb0\x3b\x92\
\x03\x33\x26\xd4\x53\xb5\xd3\xfb\x58\x4f\x88\xc5\x03\x21\x88\x2c\
\x43\x50\xba\x46\xd0\xed\x09\x42\xe5\x9b\x42\x9b\x73\xfc\xa9\xcf\
\x5a\x1b\xee\x2a\x74\xc8\xbc\xc9\x45\x09\xa7\x6c\x93\xcf\x9b\x88\
\x27\xa7\x11\x18\x1d\xc3\x80\x6f\x08\xa2\xd6\xd6\x25\xc2\x51\xdb\
\x28\x12\x87\xc6\x1f\xaf\x82\x2f\x62\x94\x4d\x89\x24\x90\x22\xea\
\x52\x2d\x9a\x42\xab\xe8\x18\x79\x04\xa1\xc5\xcf\x10\x53\x74\xf6\
\x0d\xa3\xd3\xe1\x87\xd4\x3c\x80\x16\xbd\x03\x0d\x5d\x06\x14\xd5\
\x0a\x90\x91\x95\x0d\x2f\x79\xf1\xc6\xaa\xa9\xd4\xb3\x73\x0b\x4c\
\xc5\x94\xd8\xdd\xef\x85\xc9\x62\x05\xb7\xbc\x12\xa5\xe5\x95\x4b\
\x13\xf3\xcb\xab\x23\x0f\x01\x37\xd9\x11\xe6\xd9\x15\x84\x97\x15\
\x13\x06\xcb\x3c\xd0\x68\xf2\xa3\xdd\xee\x5f\x27\x96\x3b\x86\x20\
\xb3\x78\xd7\x7d\xe6\x08\xa4\xf8\x3c\x33\x1b\x2a\x8d\x36\xaa\xdc\
\x53\x33\x21\x8c\x8e\x8d\x33\x15\xd3\x26\xe4\x37\x09\xf1\xc1\xc5\
\x8f\x51\x73\xaf\x01\xbe\x65\x60\xfc\x11\xa0\x23\x13\x23\xf2\xce\
\xa1\xbe\x5d\xb9\xb8\x51\x01\x83\x81\x74\x74\x4d\xa7\x1e\x0a\x67\
\x80\xa9\xb8\xdd\xea\x83\xd8\xe8\x42\x93\xca\xcc\xf8\x7c\xe5\xcb\
\x2c\x88\xda\x24\x51\x89\xa7\x67\xe7\x18\x1b\x86\x86\x47\x60\x77\
\x38\x49\x82\x3a\x24\x7c\xf8\x21\xae\xb3\x0b\xe1\x99\x5c\x80\x6f\
\x09\xd0\x90\xde\xe1\x0f\x2c\x81\xab\x1f\xc4\x7d\xef\x04\xdd\x07\
\x1d\x61\xeb\xff\x9f\xc0\x1d\xb9\x16\x1d\xf6\x21\x48\xcc\xfd\x4f\
\x7d\xee\xd4\x22\x9d\x55\x84\xaa\x9a\xba\x4d\x3e\x47\xe4\x8e\xf8\
\x3c\x3c\x12\x84\xd3\xdd\x0f\xbd\xc1\x88\xc2\xe2\x62\x9c\x7e\x2f\
\x1e\x3d\x03\x01\xf4\x2f\x02\x83\x84\xbc\xc5\xff\x2d\xee\x3a\x43\
\x28\x51\x91\xf7\xf6\x05\xf1\x4e\xdc\xbf\x7d\x84\x33\x69\xe3\x20\
\x18\xf4\x33\xab\xe0\xc9\x54\x68\x35\x38\xd1\xd8\xdd\x0b\x9e\x58\
\x89\xac\x5c\xf6\x33\x3e\x47\xaa\x9e\x9c\x9e\x65\xe4\xee\xf7\x0e\
\xa2\xd7\x6c\x41\x43\x03\x1f\x27\x62\xe3\x20\xe9\xd6\xc0\x45\xcf\
\x01\x52\x90\x24\xb8\x86\xb2\x9e\x00\x6e\xb4\xdb\x50\xd1\x1b\x44\
\x85\xce\x8b\x4a\x7e\x0b\x6d\xbe\x9b\x5b\x27\xd1\xa0\x99\xf8\x16\
\x65\x22\x05\xee\x29\xf4\x28\x13\xc8\x90\x78\x35\x0b\x1a\xad\x3e\
\xaa\xdc\x63\x13\x93\xf0\x0d\x0d\xc3\x66\xef\x83\xb4\x5d\x8e\xc4\
\x4b\x97\x90\xc3\xca\xc3\xd4\x63\xc0\x4e\x7a\x49\x31\x4e\xfa\x89\
\x94\x7f\x5b\x3b\x84\x7c\x85\x13\x25\x6a\x1f\x4a\xd5\x03\xe8\xf2\
\x30\xa3\x28\x22\xf8\xf9\x33\x09\x74\x8f\x2e\xa1\xa8\xbe\x15\xa5\
\x7c\x09\xb2\x4a\x2a\xf0\xcf\xe3\x71\x51\xe5\xf6\x07\x46\xd1\xe7\
\xf2\x40\xab\x37\x20\xfd\x6a\x06\x92\xbf\x48\x83\xcd\x37\x02\x27\
\xa9\xda\x40\x1a\x4c\xe0\x7b\x88\x52\x9d\x1f\x45\xdd\xfd\x0c\x71\
\x41\x97\x1b\xc5\xdd\x1e\x88\x9c\x41\xfc\xf9\xcd\xb7\x5d\x84\xeb\
\x6c\xb4\x43\xd0\x28\xf7\x4e\x23\xa7\xfc\x1e\xb2\x4b\xab\xf1\x51\
\xea\x57\x48\xfe\x6f\xea\xfa\x58\x51\xb9\x47\x82\xe3\xf0\x0c\xf8\
\x60\x34\x99\x51\xc9\xab\xc2\xfb\x67\xcf\x41\xfe\x40\x03\x3f\xe9\
\x6e\xb2\x8d\x19\xb9\x6f\x69\x06\x19\xd2\x9b\x2a\x2f\x72\xe5\x0e\
\xe4\x75\xf6\xa1\xf0\xbe\x1b\x1c\x95\x1b\xf9\x9c\xca\x29\xc2\x53\
\xb8\xdd\x29\xdc\x2b\x76\x04\x90\x51\xc8\xc5\x95\x6b\x79\x38\x11\
\x9f\x80\x9b\xb7\x6e\x33\x63\x15\x91\xdb\x6a\x73\x40\x22\x6d\xc7\
\x85\x84\x0f\x50\x74\xbb\x0c\xf3\x2b\x80\x9f\x34\x58\xf7\x24\x20\
\x1c\x7c\x84\x4a\xd3\x18\x38\xfa\x61\x86\x9c\x56\xfd\x55\xb3\x1e\
\xac\x0e\x3b\xb8\x3a\x1f\xd9\x21\x1e\x7a\x2f\xe0\x13\xbc\xba\x5d\
\x02\x26\xbe\xc1\x83\x94\x6f\xd8\x38\x9f\x9c\x8a\x03\x7f\x3d\x04\
\x63\xaf\x99\xe9\x6e\x2a\xb7\x46\xd7\x83\xa4\xcb\xc9\x48\xff\x3a\
\x8b\x8c\xd5\x3c\x53\xb5\x71\xf6\xa9\xdc\x35\xf6\x69\x5c\x97\x59\
\x19\xd9\xbf\x6e\x21\xa7\xa0\xd4\x82\x74\xbe\x1a\x57\x9b\x34\x60\
\xc9\xcc\x10\xbb\x82\xf8\xe5\xaf\x5f\xa7\x67\xc0\x3b\xe1\x75\x1f\
\x35\xcc\x35\xdd\x66\x7c\x94\x96\x85\xb8\x73\x17\xf1\x97\x43\x31\
\x4c\xd5\x74\x99\xf0\xaa\xaa\x71\xfa\xf4\x19\x68\xcc\x0e\x8c\x92\
\x2d\x36\x14\x1e\xab\x5a\xc7\x0c\x78\xe6\x71\x70\x0d\x23\x4c\xa3\
\x65\x8a\x0c\x8c\xec\xb4\xfa\x9c\xb6\x5e\x94\x74\x39\xd0\x66\xf7\
\xaf\x1e\x3d\x11\x4b\x47\x2e\x6f\xc3\x79\x13\x35\x2c\x5c\x99\x1a\
\xf1\x97\x3e\xc7\xd1\xd8\x33\xf8\x38\x31\x09\x86\x5e\x13\x1a\x9b\
\x04\xf8\xdd\x1b\xfb\x51\x4f\xd4\xf1\x90\x99\xee\x9a\x00\xaa\xad\
\x93\x60\x2b\x5d\x0c\x39\xf5\xbc\xf0\xbe\x67\xbd\xea\xcc\x16\x3d\
\x4a\x55\x1e\x08\x6d\x01\x94\xd4\xf1\x43\xe1\x65\x53\x40\xf0\xca\
\xf7\x25\x60\x2b\x6e\x6a\xc7\xa9\x84\x44\xc4\x1c\x39\x8a\xdc\x7c\
\x36\x5a\x5a\xc5\x38\x14\x13\x83\x2f\x39\x35\xc8\x14\x6a\x98\xe6\
\xa2\xd5\xd2\x27\xf5\x9a\x7a\x4c\x13\xa1\x49\x64\xb7\x99\x90\xdb\
\x6e\x46\xb9\xda\x8d\x06\xa5\x76\x39\x2c\x39\x3d\xf9\x4e\x13\xec\
\xd9\x72\xd4\x47\x0d\x3b\xab\x46\x88\x63\xff\x39\x8f\xdf\xee\xfb\
\x3d\x1a\xf9\x02\x9c\xbf\x90\x80\x93\xf1\x17\x70\xa3\xad\x07\x19\
\xc4\x4f\x4a\x14\xe9\x6e\xba\x58\xa8\xef\x2c\xfa\x94\x98\x50\x28\
\xb7\x40\xe9\x0e\x3c\xf9\x57\xec\x29\x2a\x77\x2d\xc1\x67\x04\xfb\
\xb6\xb9\xe4\x44\x8d\xbe\xcc\xb2\x5a\xfc\xe3\xe4\x19\x1c\x3c\xf4\
\x37\xb0\x72\xf3\xb0\xef\xc0\x1f\x50\x20\xd1\x21\x89\x27\x65\x2a\
\xa6\x4b\x85\x3e\xbf\x21\xd5\x46\xe4\x2e\x90\x5b\x21\xb0\x0c\xae\
\xe5\xdc\xe2\xd2\x11\x13\x13\xe4\x87\x6f\x3c\xaf\x3c\xe7\x96\x15\
\x35\x9c\x69\x45\xe5\xf8\xfb\xb1\x58\x1c\x3f\x19\x87\x37\xf6\xef\
\xc7\x8d\x3a\x11\x92\xab\xa4\x0c\x21\xed\x70\xea\x35\x55\x21\x8b\
\x34\x5b\xc9\x03\x37\x2a\x34\x6e\xd4\x49\x3a\x17\xc3\x72\x73\x08\
\x8e\x6d\x95\xfb\x87\x24\xe0\x4a\x65\x73\x70\xe4\xf8\x29\x1c\x3e\
\x7c\x98\x8c\x63\x2e\x32\x05\x2a\x5c\x22\xd5\xd3\x5d\x7e\x4d\xdc\
\x0b\x36\xe9\x74\x76\xa7\x1d\x77\x8c\xe4\x88\xb6\xf9\x9e\x84\xb7\
\x1a\x95\xfb\x22\xbd\x49\xfd\x80\x0b\x6d\xf4\x04\x32\x4a\x78\x4c\
\x0f\x9c\x4b\x49\xc3\xb5\xa6\x2e\x7c\xc2\x6d\x65\x36\x59\xf1\x83\
\x01\x5c\x97\x9a\xc1\x51\x7b\x20\xf3\x04\xd7\xce\x25\x26\x05\x36\
\xc8\xfd\xc7\x9d\xc8\x1d\xd5\x82\xdc\x1a\x01\xce\x5e\x4e\x45\x81\
\x58\x85\x78\xf6\x5d\x5c\xa9\x55\x90\xaa\xfb\xc0\x96\xdb\x50\xad\
\x75\xe3\xae\x54\x41\x2f\x10\xca\x0d\x72\xbf\xba\xd3\x6a\xa3\x05\
\xb7\xa2\x51\xf8\x1d\xaf\x43\x8d\x4f\xb9\x2d\x88\xcb\xe6\xe1\x9a\
\x48\x8f\xaa\x1e\x2f\x9a\x35\xe6\xc7\x7f\x7a\xf3\x2d\x57\x78\xac\
\xa8\xdc\xaf\xbd\xac\xdc\xd1\xe2\x08\xdd\x05\x5c\x75\x1f\xde\xcb\
\xaf\x45\xb9\x76\x00\x32\x67\x60\xf5\xc2\xa7\x97\xa9\xdc\xf7\x08\
\xd2\xa9\xdc\x3b\xf8\x03\xf3\xc2\xf1\x13\x82\xca\x1c\xee\x9d\x50\
\x0b\x39\x94\xb8\x0d\xc2\xc8\x16\xa3\x17\x87\xc3\x2f\x22\xf7\x0e\
\xff\xda\x6d\x8a\xdd\x61\x99\xd5\x1b\xb6\xd8\x6b\xbb\x5e\x32\xbe\
\x2f\x89\xff\x01\x66\xb9\x5f\xfc\x11\x80\x3d\xcf\x00\x00\x00\x00\
\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x07\
\x04\xca\x57\xa7\
\x00\x6e\
\x00\x65\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x06\x7c\x5a\x07\
\x00\x63\
\x00\x6f\x00\x70\x00\x79\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x07\
\x0a\xc7\x57\x87\
\x00\x63\
\x00\x75\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x08\xc8\x58\x67\
\x00\x73\
\x00\x61\x00\x76\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x0a\xa8\xba\x47\
\x00\x70\
\x00\x61\x00\x73\x00\x74\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x06\xc1\x59\x87\
\x00\x6f\
\x00\x70\x00\x65\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\x00\x02\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x26\x00\x00\x00\x00\x00\x01\x00\x00\x03\x58\
\x00\x00\x00\x7e\x00\x00\x00\x00\x00\x01\x00\x00\x18\xdd\
\x00\x00\x00\x50\x00\x00\x00\x00\x00\x01\x00\x00\x0d\xc5\
\x00\x00\x00\x66\x00\x00\x00\x00\x00\x01\x00\x00\x12\x6c\
\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x08\x96\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| gpl-2.0 |
stuart-knock/bokeh | examples/plotting/file/ajax_source_realtime.py | 22 | 1793 | import numpy as np
from bokeh.plotting import figure, show, output_file
from bokeh.models.sources import AjaxDataSource
output_file("ajax_source_realtime.html", title="ajax_source_realtime.py example")
source = AjaxDataSource(data_url='http://localhost:5050/data', mode="append",
if_modified=True, polling_interval=1000, max_size=125)
p = figure()
p.line('x', 'y', source=source)
show(p)
import time
from threading import Thread
from collections import namedtuple, deque
Entry = namedtuple('Entry', ['x', 'y', 'creation'])
entries = deque(maxlen=120)
def gen_entry():
global entries
x = 0
while True:
last_entry = Entry(x, np.sin(x*np.pi/10), time.time())
entries.append(last_entry)
print("Entry generated: %s" % str(last_entry))
x += 1
if x > entries.maxlen and x % 10 == 0:
time.sleep(2)
t = Thread(target=gen_entry)
t.daemon = True
t.start()
import json
from flask import Flask, Response, request
from bokeh.server.crossdomain import crossdomain
app = Flask(__name__)
@app.route('/data', methods=['GET', 'OPTIONS'])
@crossdomain(origin="*", methods=['GET', 'POST'])
def hello_world():
global entries
try:
modified_since = float(request.headers.get('If-Modified-Since'))
except TypeError:
modified_since = 0
new_entries = [e for e in entries if e.creation > modified_since]
js = json.dumps({'x':[e.x for e in new_entries], 'y':[e.y for e in new_entries]})
resp = Response(js, status=200, mimetype='application/json')
if new_entries:
resp.headers['Last-Modified'] = new_entries[-1].creation
elif modified_since:
resp.headers['Last-Modified'] = modified_since
return resp
if __name__ == "__main__":
app.run(port=5050)
| bsd-3-clause |
github-account-because-they-want-it/django | django/template/backends/dummy.py | 480 | 2037 | # Since this package contains a "django" module, this is required on Python 2.
from __future__ import absolute_import
import errno
import io
import string
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template import Origin, TemplateDoesNotExist
from django.utils.html import conditional_escape
from .base import BaseEngine
from .utils import csrf_input_lazy, csrf_token_lazy
class TemplateStrings(BaseEngine):
app_dirname = 'template_strings'
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
if options:
raise ImproperlyConfigured(
"Unknown options: {}".format(", ".join(options)))
super(TemplateStrings, self).__init__(params)
def from_string(self, template_code):
return Template(template_code)
def get_template(self, template_name):
tried = []
for template_file in self.iter_template_filenames(template_name):
try:
with io.open(template_file, encoding=settings.FILE_CHARSET) as fp:
template_code = fp.read()
except IOError as e:
if e.errno == errno.ENOENT:
tried.append((
Origin(template_file, template_name, self),
'Source does not exist',
))
continue
raise
return Template(template_code)
else:
raise TemplateDoesNotExist(template_name, tried=tried, backend=self)
class Template(string.Template):
def render(self, context=None, request=None):
if context is None:
context = {}
else:
context = {k: conditional_escape(v) for k, v in context.items()}
if request is not None:
context['csrf_input'] = csrf_input_lazy(request)
context['csrf_token'] = csrf_token_lazy(request)
return self.safe_substitute(context)
| bsd-3-clause |
ity/pants | contrib/node/src/python/pants/contrib/node/tasks/node_run.py | 7 | 1521 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.util.contextutil import pushd
from pants.contrib.node.tasks.node_paths import NodePaths
from pants.contrib.node.tasks.node_task import NodeTask
class NodeRun(NodeTask):
"""Runs a script specified in a package.json file, currently through "npm run [script name]"."""
@classmethod
def register_options(cls, register):
super(NodeRun, cls).register_options(register)
register('--script-name', default='start',
help='The script name to run.')
@classmethod
def supports_passthru_args(cls):
return True
def execute(self):
target = self.require_single_root_target()
if self.is_node_module(target):
node_paths = self.context.products.get_data(NodePaths)
node_path = node_paths.node_path(target)
args = ['run-script', self.get_options().script_name, '--'] + self.get_passthru_args()
with pushd(node_path):
result, npm_run = self.execute_npm(args, workunit_labels=[WorkUnitLabel.RUN])
if result != 0:
raise TaskError('npm run script failed:\n'
'\t{} failed with exit code {}'.format(npm_run, result))
| apache-2.0 |
lfblogs/aiopy | aiopy/required/aiohttp/web_reqrep.py | 1 | 20801 | __all__ = ('Request', 'StreamResponse', 'Response')
import asyncio
import binascii
import cgi
import collections
import http.cookies
import io
import json
import warnings
from urllib.parse import urlsplit, parse_qsl, unquote
from types import MappingProxyType
from . import hdrs
from .helpers import reify
from .multidict import (CIMultiDictProxy,
CIMultiDict,
MultiDictProxy,
MultiDict)
from aiopy.required.aiohttp.protocol import Response as ResponseImpl, HttpVersion10
from .streams import EOF_MARKER
sentinel = object()
class HeadersMixin:
_content_type = None
_content_dict = None
_stored_content_type = sentinel
def _parse_content_type(self, raw):
self._stored_content_type = raw
if raw is None:
# default value according to RFC 2616
self._content_type = 'application/octet-stream'
self._content_dict = {}
else:
self._content_type, self._content_dict = cgi.parse_header(raw)
@property
def content_type(self, _CONTENT_TYPE=hdrs.CONTENT_TYPE):
"""The value of content part for Content-Type HTTP header."""
raw = self.headers.get(_CONTENT_TYPE)
if self._stored_content_type != raw:
self._parse_content_type(raw)
return self._content_type
@property
def charset(self, _CONTENT_TYPE=hdrs.CONTENT_TYPE):
"""The value of charset part for Content-Type HTTP header."""
raw = self.headers.get(_CONTENT_TYPE)
if self._stored_content_type != raw:
self._parse_content_type(raw)
return self._content_dict.get('charset')
@property
def content_length(self, _CONTENT_LENGTH=hdrs.CONTENT_LENGTH):
"""The value of Content-Length HTTP header."""
l = self.headers.get(_CONTENT_LENGTH)
if l is None:
return None
else:
return int(l)
FileField = collections.namedtuple('Field', 'name filename file content_type')
############################################################
# HTTP Request
############################################################
class Request(dict, HeadersMixin):
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT,
hdrs.METH_TRACE, hdrs.METH_DELETE}
def __init__(self, app, message, payload, transport, reader, writer, *,
_HOST=hdrs.HOST, secure_proxy_ssl_header=None):
self._app = app
self._version = message.version
self._transport = transport
self._reader = reader
self._writer = writer
self._method = message.method
self._host = message.headers.get(_HOST)
self._path_qs = message.path
res = urlsplit(message.path)
self._path = unquote(res.path)
self._query_string = res.query
self._post = None
self._post_files_cache = None
self._headers = CIMultiDictProxy(message.headers)
if self._version < HttpVersion10:
self._keep_alive = False
else:
self._keep_alive = not message.should_close
# matchdict, route_name, handler
# or information about traversal lookup
self._match_info = None # initialized after route resolving
self._payload = payload
self._cookies = None
self._read_bytes = None
self._has_body = not payload.at_eof()
self._secure_proxy_ssl_header = secure_proxy_ssl_header
@property
def scheme(self):
"""A string representing the scheme of the request.
'http' or 'https'.
"""
if self._transport.get_extra_info('sslcontext'):
return 'https'
secure_proxy_ssl_header = self._secure_proxy_ssl_header
if secure_proxy_ssl_header is not None:
header, value = secure_proxy_ssl_header
if self._headers.get(header) == value:
return 'https'
return 'http'
@property
def method(self):
"""Read only property for getting HTTP method.
The value is upper-cased str like 'GET', 'POST', 'PUT' etc.
"""
return self._method
@property
def version(self):
"""Read only property for getting HTTP version of request.
Returns aiohttp.protocol.HttpVersion instance.
"""
return self._version
@property
def host(self):
"""Read only property for getting *HOST* header of request.
Returns str or None if HTTP request has no HOST header.
"""
return self._host
@property
def path_qs(self):
"""The URL including PATH_INFO and the query string.
E.g, /app/blog?id=10
"""
return self._path_qs
@property
def path(self):
"""The URL including *PATH INFO* without the host or scheme.
E.g., ``/app/blog``
"""
return self._path
@property
def query_string(self):
"""The query string in the URL.
E.g., id=10
"""
return self._query_string
@reify
def GET(self):
"""A multidict with all the variables in the query string.
Lazy property.
"""
return MultiDictProxy(MultiDict(parse_qsl(self._query_string)))
@reify
def POST(self):
"""A multidict with all the variables in the POST parameters.
post() methods has to be called before using this attribute.
"""
if self._post is None:
raise RuntimeError("POST is not available before post()")
return self._post
@property
def headers(self):
"""A case-insensitive multidict proxy with all headers."""
return self._headers
@property
def keep_alive(self):
"""Is keepalive enabled by client?"""
return self._keep_alive
@property
def match_info(self):
"""Result of route resolving."""
return self._match_info
@property
def app(self):
"""Application instance."""
return self._app
@property
def transport(self):
"""Transport used for request processing."""
return self._transport
@property
def cookies(self):
"""Return request cookies.
A read-only dictionary-like object.
"""
if self._cookies is None:
raw = self.headers.get(hdrs.COOKIE, '')
parsed = http.cookies.SimpleCookie(raw)
self._cookies = MappingProxyType(
{key: val.value for key, val in parsed.items()})
return self._cookies
@property
def payload(self):
"""Return raw payload stream."""
warnings.warn('use Request.content instead', DeprecationWarning)
return self._payload
@property
def content(self):
"""Return raw payload stream."""
return self._payload
@property
def has_body(self):
"""Return True if request has HTTP BODY, False otherwise."""
return self._has_body
@asyncio.coroutine
def release(self):
"""Release request.
Eat unread part of HTTP BODY if present.
"""
chunk = yield from self._payload.readany()
while chunk is not EOF_MARKER or chunk:
chunk = yield from self._payload.readany()
@asyncio.coroutine
def read(self):
"""Read request body if present.
Returns bytes object with full request content.
"""
if self._read_bytes is None:
body = bytearray()
while True:
chunk = yield from self._payload.readany()
body.extend(chunk)
if chunk is EOF_MARKER:
break
self._read_bytes = bytes(body)
return self._read_bytes
@asyncio.coroutine
def text(self):
"""Return BODY as text using encoding from .charset."""
bytes_body = yield from self.read()
encoding = self.charset or 'utf-8'
return bytes_body.decode(encoding)
@asyncio.coroutine
def json(self, *, loader=json.loads):
"""Return BODY as JSON."""
body = yield from self.text()
return loader(body)
@asyncio.coroutine
def post(self):
"""Return POST parameters."""
if self._post is not None:
return self._post
if self.method not in self.POST_METHODS:
self._post = MultiDictProxy(MultiDict())
return self._post
content_type = self.content_type
if (content_type not in ('',
'application/x-www-form-urlencoded',
'multipart/form-data')):
self._post = MultiDictProxy(MultiDict())
return self._post
body = yield from self.read()
content_charset = self.charset or 'utf-8'
environ = {'REQUEST_METHOD': self.method,
'CONTENT_LENGTH': str(len(body)),
'QUERY_STRING': '',
'CONTENT_TYPE': self.headers.get(hdrs.CONTENT_TYPE)}
fs = cgi.FieldStorage(fp=io.BytesIO(body),
environ=environ,
keep_blank_values=True,
encoding=content_charset)
supported_transfer_encoding = {
'base64': binascii.a2b_base64,
'quoted-printable': binascii.a2b_qp
}
out = MultiDict()
for field in fs.list or ():
transfer_encoding = field.headers.get(
hdrs.CONTENT_TRANSFER_ENCODING, None)
if field.filename:
ff = FileField(field.name,
field.filename,
field.file, # N.B. file closed error
field.type)
if self._post_files_cache is None:
self._post_files_cache = {}
self._post_files_cache[field.name] = field
out.add(field.name, ff)
else:
value = field.value
if transfer_encoding in supported_transfer_encoding:
# binascii accepts bytes
value = value.encode('utf-8')
value = supported_transfer_encoding[
transfer_encoding](value)
out.add(field.name, value)
self._post = MultiDictProxy(out)
return self._post
def __repr__(self):
return "<{} {} {} >".format(self.__class__.__name__,
self.method, self.path)
############################################################
# HTTP Response classes
############################################################
class StreamResponse(HeadersMixin):
def __init__(self, *, status=200, reason=None, headers=None):
self._body = None
self._keep_alive = None
self._chunked = False
self._chunk_size = None
self._compression = False
self._compression_force = False
self._headers = CIMultiDict()
self._cookies = http.cookies.SimpleCookie()
self.set_status(status, reason)
self._req = None
self._resp_impl = None
self._eof_sent = False
if headers is not None:
self._headers.extend(headers)
def _copy_cookies(self):
for cookie in self._cookies.values():
value = cookie.output(header='')[1:]
self.headers.add(hdrs.SET_COOKIE, value)
@property
def started(self):
return self._resp_impl is not None
@property
def status(self):
return self._status
@property
def chunked(self):
return self._chunked
@property
def compression(self):
return self._compression
@property
def reason(self):
return self._reason
def set_status(self, status, reason=None):
self._status = int(status)
if reason is None:
reason = ResponseImpl.calc_reason(status)
self._reason = reason
@property
def keep_alive(self):
return self._keep_alive
def force_close(self):
self._keep_alive = False
def enable_chunked_encoding(self, chunk_size=None):
"""Enables automatic chunked transfer encoding."""
self._chunked = True
self._chunk_size = chunk_size
def enable_compression(self, force=False):
"""Enables response compression with `deflate` encoding."""
self._compression = True
self._compression_force = force
@property
def headers(self):
return self._headers
@property
def cookies(self):
return self._cookies
def set_cookie(self, name, value, *, expires=None,
domain=None, max_age=None, path='/',
secure=None, httponly=None, version=None):
"""Set or update response cookie.
Sets new cookie or updates existent with new value.
Also updates only those params which are not None.
"""
old = self._cookies.get(name)
if old is not None and old.coded_value == '':
# deleted cookie
self._cookies.pop(name, None)
self._cookies[name] = value
c = self._cookies[name]
if expires is not None:
c['expires'] = expires
if domain is not None:
c['domain'] = domain
if max_age is not None:
c['max-age'] = max_age
if path is not None:
c['path'] = path
if secure is not None:
c['secure'] = secure
if httponly is not None:
c['httponly'] = httponly
if version is not None:
c['version'] = version
def del_cookie(self, name, *, domain=None, path='/'):
"""Delete cookie.
Creates new empty expired cookie.
"""
# TODO: do we need domain/path here?
self._cookies.pop(name, None)
self.set_cookie(name, '', max_age=0, domain=domain, path=path)
@property
def content_length(self):
# Just a placeholder for adding setter
return super().content_length
@content_length.setter
def content_length(self, value):
if value is not None:
value = int(value)
# TODO: raise error if chunked enabled
self.headers[hdrs.CONTENT_LENGTH] = str(value)
elif hdrs.CONTENT_LENGTH in self.headers:
del self.headers[hdrs.CONTENT_LENGTH]
@property
def content_type(self):
# Just a placeholder for adding setter
return super().content_type
@content_type.setter
def content_type(self, value):
self.content_type # read header values if needed
self._content_type = str(value)
self._generate_content_type_header()
@property
def charset(self):
# Just a placeholder for adding setter
return super().charset
@charset.setter
def charset(self, value):
ctype = self.content_type # read header values if needed
if ctype == 'application/octet-stream':
raise RuntimeError("Setting charset for application/octet-stream "
"doesn't make sense, setup content_type first")
if value is None:
self._content_dict.pop('charset', None)
else:
self._content_dict['charset'] = str(value).lower()
self._generate_content_type_header()
def _generate_content_type_header(self, CONTENT_TYPE=hdrs.CONTENT_TYPE):
params = '; '.join("%s=%s" % i for i in self._content_dict.items())
if params:
ctype = self._content_type + '; ' + params
else:
ctype = self._content_type
self.headers[CONTENT_TYPE] = ctype
def _start_pre_check(self, request):
if self._resp_impl is not None:
if self._req is not request:
raise RuntimeError(
'Response has been started with different request.')
else:
return self._resp_impl
else:
return None
def start(self, request):
resp_impl = self._start_pre_check(request)
if resp_impl is not None:
return resp_impl
self._req = request
keep_alive = self._keep_alive
if keep_alive is None:
keep_alive = request.keep_alive
self._keep_alive = keep_alive
resp_impl = self._resp_impl = ResponseImpl(
request._writer,
self._status,
request.version,
not keep_alive,
self._reason)
self._copy_cookies()
if self._compression:
if (self._compression_force or
'deflate' in request.headers.get(
hdrs.ACCEPT_ENCODING, '')):
resp_impl.add_compression_filter()
if self._chunked:
resp_impl.enable_chunked_encoding()
if self._chunk_size:
resp_impl.add_chunking_filter(self._chunk_size)
headers = self.headers.items()
for key, val in headers:
resp_impl.add_header(key, val)
resp_impl.send_headers()
return resp_impl
def write(self, data):
assert isinstance(data, (bytes, bytearray, memoryview)), \
'data argument must be byte-ish (%r)' % type(data)
if self._eof_sent:
raise RuntimeError("Cannot call write() after write_eof()")
if self._resp_impl is None:
raise RuntimeError("Cannot call write() before start()")
if data:
return self._resp_impl.write(data)
else:
return ()
@asyncio.coroutine
def drain(self):
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self._resp_impl.transport.drain()
@asyncio.coroutine
def write_eof(self):
if self._eof_sent:
return
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self._resp_impl.write_eof()
self._eof_sent = True
def __repr__(self):
if self.started:
info = "{} {} ".format(self._req.method, self._req.path)
else:
info = "not started"
return "<{} {} {}>".format(self.__class__.__name__,
self.reason, info)
class Response(StreamResponse):
def __init__(self, *, body=None, status=200,
reason=None, text=None, headers=None, content_type=None):
super().__init__(status=status, reason=reason, headers=headers)
if body is not None and text is not None:
raise ValueError("body and text are not allowed together.")
if text is not None:
if hdrs.CONTENT_TYPE not in self.headers:
# fast path for filling headers
if not isinstance(text, str):
raise TypeError('text argument must be str (%r)' %
type(text))
if content_type is None:
content_type = 'text/plain'
self.headers[hdrs.CONTENT_TYPE] = (
content_type + '; charset=utf-8')
self._content_type = content_type
self._content_dict = {'charset': 'utf-8'}
self.body = text.encode('utf-8')
else:
self.text = text
else:
if content_type:
self.content_type = content_type
if body is not None:
self.body = body
else:
self.body = None
@property
def body(self):
return self._body
@body.setter
def body(self, body):
if body is not None and not isinstance(body, bytes):
raise TypeError('body argument must be bytes (%r)' % type(body))
self._body = body
if body is not None:
self.content_length = len(body)
else:
self.content_length = 0
@property
def text(self):
return self._body.decode(self.charset or 'utf-8')
@text.setter
def text(self, text):
if text is not None and not isinstance(text, str):
raise TypeError('text argument must be str (%r)' % type(text))
if self.content_type == 'application/octet-stream':
self.content_type = 'text/plain'
if self.charset is None:
self.charset = 'utf-8'
self.body = text.encode(self.charset)
@asyncio.coroutine
def write_eof(self):
body = self._body
if body is not None:
self.write(body)
yield from super().write_eof()
| gpl-3.0 |
EiSandi/greetingslack | greetingslack/lib/python2.7/site-packages/wheel/archive.py | 93 | 2247 | """
Archive tools for wheel.
"""
import os
import time
import logging
import os.path
import zipfile
log = logging.getLogger("wheel")
def archive_wheelfile(base_name, base_dir):
'''Archive all files under `base_dir` in a whl file and name it like
`base_name`.
'''
olddir = os.path.abspath(os.curdir)
base_name = os.path.abspath(base_name)
try:
os.chdir(base_dir)
return make_wheelfile_inner(base_name)
finally:
os.chdir(olddir)
def make_wheelfile_inner(base_name, base_dir='.'):
"""Create a whl file from all the files under 'base_dir'.
Places .dist-info at the end of the archive."""
zip_filename = base_name + ".whl"
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
# Some applications need reproducible .whl files, but they can't do this
# without forcing the timestamp of the individual ZipInfo objects. See
# issue #143.
timestamp = os.environ.get('SOURCE_DATE_EPOCH')
if timestamp is None:
date_time = None
else:
date_time = time.gmtime(int(timestamp))[0:6]
# XXX support bz2, xz when available
zip = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED)
score = {'WHEEL': 1, 'METADATA': 2, 'RECORD': 3}
deferred = []
def writefile(path, date_time):
st = os.stat(path)
if date_time is None:
mtime = time.gmtime(st.st_mtime)
date_time = mtime[0:6]
zinfo = zipfile.ZipInfo(path, date_time)
zinfo.external_attr = st.st_mode << 16
zinfo.compress_type = zipfile.ZIP_DEFLATED
with open(path, 'rb') as fp:
zip.writestr(zinfo, fp.read())
log.info("adding '%s'" % path)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
if dirpath.endswith('.dist-info'):
deferred.append((score.get(name, 0), path))
else:
writefile(path, date_time)
deferred.sort()
for score, path in deferred:
writefile(path, date_time)
zip.close()
return zip_filename
| mit |
suse110/linux-1 | tools/perf/scripts/python/failed-syscalls-by-pid.py | 1996 | 2233 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
raw_syscalls__sys_exit(**locals())
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
danellecline/stoqs | stoqs/config/settings/production.py | 1 | 5113 | # -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
'''
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assests
# ------------------------
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='new_dj_cookiecutter <noreply@example.com>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[new_dj_cookiecutter] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{0}/{1}".format(env.cache_url('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Your production stuff: Below this line define 3rd party library settings
| gpl-3.0 |
serathius/elasticsearch-raven | tests/test_transport.py | 3 | 5966 | import datetime
import logging
import string
from unittest import TestCase
from unittest import mock
from elasticsearch_raven import exceptions
from elasticsearch_raven import transport
class DummyMock(mock.Mock):
def __eq__(self, other):
return True
class ParseSentryHeadersTest(TestCase):
def test_empty_string(self):
arg = ''
self.assertRaises(exceptions.BadSentryMessageHeaderError,
transport.SentryMessage.parse_headers, arg)
def test_empty_arguments(self):
arg = 'sentry_key=, sentry_secret='
self.assertRaises(exceptions.BadSentryMessageHeaderError,
transport.SentryMessage.parse_headers, arg)
def test_example(self):
arg = ''''Sentry sentry_timestamp=1396269830.8627632,
sentry_client=raven-python/4.0.4, sentry_version=4,
sentry_key=public, sentry_secret=secret'''
result = transport.SentryMessage.parse_headers(arg)
self.assertEqual({'sentry_key': 'public',
'sentry_secret': 'secret'}, result)
def test_reverse_order(self):
arg = 'sentry_secret=b, sentry_key=a'
self.assertRaises(exceptions.BadSentryMessageHeaderError,
transport.SentryMessage.parse_headers, arg)
def test_random_string(self):
arg = string.printable
self.assertRaises(exceptions.BadSentryMessageHeaderError,
transport.SentryMessage.parse_headers, arg)
def test_man_in_the_middle(self):
arg = 'sentry_key=a, man_in_the_middle=yes, sentry_secret=b'
self.assertRaises(exceptions.BadSentryMessageHeaderError,
transport.SentryMessage.parse_headers, arg)
class DecodeBodyTest(TestCase):
def test_empty(self):
message = mock.Mock(transport.SentryMessage)
message.body = b''
self.assertRaises(exceptions.DamagedSentryMessageBodyError,
transport.SentryMessage.decode_body, message)
def test_example(self):
message = mock.Mock(transport.SentryMessage)
message.body = b'x\x9c\xabV*)-\xc8IU\xb2R\x88\x8e\xd5QP\xca\xc9,.' \
b'\x81\xb1\xd3r\xf2\x13A\x1cC=\x03 /3\x0f\xcc\xae\x05' \
b'\x00kU\r\xcc'
result = transport.SentryMessage.decode_body(message)
self.assertEqual({'int': 1, 'float': 1.0, 'list': [], 'tuple': []},
result)
def test_random(self):
message = mock.Mock(transport.SentryMessage)
message.body = b'x\x9c\xd3\xb5\x05\x00\x00\x99\x00k'
self.assertRaises(exceptions.DamagedSentryMessageBodyError,
transport.SentryMessage.decode_body, message)
class CreateFromUDPTest(TestCase):
def test_empty(self):
arg = b''
self.assertRaises(exceptions.DamagedSentryMessageError,
transport.SentryMessage.create_from_udp, arg)
def test_separator(self):
arg = b'\n\n'
self.assertRaises(exceptions.BadSentryMessageHeaderError,
transport.SentryMessage.create_from_udp, arg)
def test_example(self):
arg = b'sentry_key=a, sentry_secret=b\n\nYm9keQ=='
message = transport.SentryMessage.create_from_udp(arg)
self.assertEqual({'sentry_key': 'a', 'sentry_secret': 'b'},
message.headers)
self.assertEqual(b'body', message.body)
class CreateFromHttpTest(TestCase):
def test_empty(self):
args = '', ''
self.assertRaises(exceptions.BadSentryMessageHeaderError,
transport.SentryMessage.create_from_http, *args)
def test_example(self):
args = 'sentry_key=a, sentry_secret=b', 'Ym9keQ=='
message = transport.SentryMessage.create_from_http(*args)
self.assertEqual({'sentry_key': 'a', 'sentry_secret': 'b'},
message.headers)
self.assertEqual(b'body', message.body)
class LogTransportSendTest(TestCase):
@mock.patch('elasticsearch_raven.transport.datetime')
@mock.patch('elasticsearch.Elasticsearch')
def test_example(self, ElasticSearch, datetime_mock):
log_transport = transport.LogTransport('example.com', use_ssl=False,
http_auth='login:password')
datetime_mock.datetime.now.return_value = datetime.datetime(2014, 1, 1)
headers = {}
body = {'project': 'index-{0:%Y.%m.%d}', 'extra': {'foo': 'bar'}}
message = transport.SentryMessage(headers, body)
message.decode_body = mock.Mock()
message.decode_body.return_value = body
log_transport.send_message(message)
self.assertEqual([mock.call(
http_auth='login:password', use_ssl=False,
hosts=['example.com']),
mock.call().__getattr__('index')(
index='index-2014.01.01', doc_type='raven-log', body={
'project': 'index-{0:%Y.%m.%d}', 'extra': {
'foo<string>': 'bar'}},
id=DummyMock())],
ElasticSearch.mock_calls)
def test_get_id(self):
arg = {'a': '1', 'b': 2, 'c': None, 'd': [], 'e': {}}
self.assertEqual('a07adfbed45a1475e48e216e3a38e529b2e4ddcd',
transport.hash_dict(arg))
def test_get_id_sort(self):
arg1 = {'a': '1', 'b': 2, 'c': None, 'd': [], 'e': {}}
arg2 = {'e': {}, 'd': [], 'c': None, 'b': 2, 'a': '1'}
self.assertEqual(transport.hash_dict(arg1),
transport.hash_dict(arg2))
class LoggerLevelToErrorTest(TestCase):
def test_level(self):
logger = logging.getLogger('test')
logger.setLevel(logging.WARNING)
with transport.logger_level_to_error('test'):
self.assertEqual(logging.ERROR, logger.level)
self.assertEqual(logging.WARNING, logger.level)
| mit |
tommy-u/chaco | examples/demo/updating_plot/updating_plot4.py | 2 | 4366 | #!/usr/bin/env python
"""
A modification of updating_plot3.py.
Three of the plots are now oriented vertically, but the dataspace of all
6 plots is still linked. Panning along the X axis of a vertical plot
will move the Y axis of one of the horizontally-oriented plots, and vice
versa.
"""
# Major library imports
from numpy import arange
from scipy.special import jn
# Enthought library imports
from enable.api import Window
from enable.example_support import DemoFrame, demo_main
from traits.api import HasTraits
from pyface.timer.api import Timer
# Chaco imports
from chaco.api import create_line_plot, OverlayPlotContainer, ArrayDataSource
from chaco.tools.api import MoveTool, PanTool, ZoomTool
COLOR_PALETTE = ("mediumslateblue", "maroon", "darkgreen", "goldenrod",
"purple", "indianred")
PLOT_SIZE = 250
class AnimatedPlot(HasTraits):
def __init__(self, x, y, color="blue", bgcolor="white", orientation="h"):
self.y_values = y[:]
if type(x) == ArrayDataSource:
self.x_values = x.get_data()[:]
plot = create_line_plot((x, self.y_values), color=color,
bgcolor=bgcolor, add_grid=True,
add_axis=True, orientation=orientation)
else:
self.x_values = x[:]
plot = create_line_plot((self.x_values,self.y_values), color=color,
bgcolor=bgcolor, add_grid=True,
add_axis=True, orientation=orientation)
plot.resizable = ""
plot.bounds = [PLOT_SIZE, PLOT_SIZE]
plot.unified_draw = True
plot.tools.append(PanTool(plot, drag_button="right"))
plot.tools.append(MoveTool(plot))
plot.overlays.append(ZoomTool(plot, tool_mode="box", always_on=False))
self.plot = plot
self.numpoints = len(self.x_values)
self.current_index = self.numpoints/2
self.increment = 2
def timer_tick(self):
if self.current_index <= self.numpoints/3:
self.increment = 2
elif self.current_index == self.numpoints:
self.increment = -2
self.current_index += self.increment
if self.current_index > self.numpoints:
self.current_index = self.numpoints
self.plot.index.set_data(self.x_values[:self.current_index])
self.plot.value.set_data(self.y_values[:self.current_index])
self.plot.request_redraw()
class PlotFrame(DemoFrame):
def _create_window(self):
numpoints = 50
low = -5
high = 15.0
x = arange(low, high, (high-low)/numpoints)
container = OverlayPlotContainer(bgcolor="lightgray")
common_index = None
index_range = None
value_range = None
self.animated_plots = []
for i, color in enumerate(COLOR_PALETTE):
if not common_index:
animated_plot = AnimatedPlot(x, jn(i,x), color)
plot = animated_plot.plot
common_index = plot.index
index_range = plot.index_mapper.range
value_range = plot.value_mapper.range
else:
if i % 2 == 1:
orientation = "v"
else:
orientation = "h"
animated_plot = AnimatedPlot(common_index, jn(i,x), color,
orientation=orientation)
plot = animated_plot.plot
plot.index_mapper.range = index_range
plot.value_mapper.range = value_range
container.add(plot)
self.animated_plots.append(animated_plot)
for i, a_plot in enumerate(self.animated_plots):
a_plot.plot.position = [50 + (i%3)*(PLOT_SIZE+50),
50 + (i//3)*(PLOT_SIZE+50)]
self.timer = Timer(100.0, self.onTimer)
self.container = container
return Window(self, -1, component=container)
def onTimer(self, *args):
for plot in self.animated_plots:
plot.timer_tick()
return
if __name__ == "__main__":
# Save demo so that it doesn't get garbage collected when run within
# existing event loop (i.e. from ipython).
demo = demo_main(PlotFrame, size=(950, 650), title="Updating line plot")
| bsd-3-clause |
drewandersonnz/openshift-tools | ansible/roles/lib_oa_openshift/src/class/oc_configmap.py | 18 | 6202 | # pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-arguments
class OCConfigMap(OpenShiftCLI):
''' Openshift ConfigMap Class
ConfigMaps are a way to store data inside of objects
'''
def __init__(self,
name,
from_file,
from_literal,
state,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCConfigMap, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = name
self.state = state
self._configmap = None
self._inc_configmap = None
self.from_file = from_file if from_file is not None else {}
self.from_literal = from_literal if from_literal is not None else {}
@property
def configmap(self):
if self._configmap is None:
self._configmap = self.get()
return self._configmap
@configmap.setter
def configmap(self, inc_map):
self._configmap = inc_map
@property
def inc_configmap(self):
if self._inc_configmap is None:
results = self.create(dryrun=True, output=True)
self._inc_configmap = results['results']
return self._inc_configmap
@inc_configmap.setter
def inc_configmap(self, inc_map):
self._inc_configmap = inc_map
def from_file_to_params(self):
'''return from_files in a string ready for cli'''
return ["--from-file={}={}".format(key, value) for key, value in self.from_file.items()]
def from_literal_to_params(self):
'''return from_literal in a string ready for cli'''
return ["--from-literal={}={}".format(key, value) for key, value in self.from_literal.items()]
def get(self):
'''return a configmap by name '''
results = self._get('configmap', self.name)
if results['returncode'] == 0 and results['results'][0]:
self.configmap = results['results'][0]
if results['returncode'] != 0 and '"{}" not found'.format(self.name) in results['stderr']:
results['returncode'] = 0
return results
def delete(self):
'''delete a configmap by name'''
return self._delete('configmap', self.name)
def create(self, dryrun=False, output=False):
'''Create a configmap
:dryrun: Product what you would have done. default: False
:output: Whether to parse output. default: False
'''
cmd = ['create', 'configmap', self.name]
if self.from_literal is not None:
cmd.extend(self.from_literal_to_params())
if self.from_file is not None:
cmd.extend(self.from_file_to_params())
if dryrun:
cmd.extend(['--dry-run', '-ojson'])
results = self.openshift_cmd(cmd, output=output)
return results
def update(self):
'''run update configmap '''
return self._replace_content('configmap', self.name, self.inc_configmap)
def needs_update(self):
'''compare the current configmap with the proposed and return if they are equal'''
return not Utils.check_def_equal(self.inc_configmap, self.configmap, debug=self.verbose)
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
'''run the oc_configmap module'''
oc_cm = OCConfigMap(params['name'],
params['from_file'],
params['from_literal'],
params['state'],
params['namespace'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
state = params['state']
api_rval = oc_cm.get()
if 'failed' in api_rval:
return {'failed': True, 'msg': api_rval}
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval, 'state': state}
if not params['name']:
return {'failed': True,
'msg': 'Please specify a name when state is absent|present.'}
########
# Delete
########
if state == 'absent':
if not Utils.exists(api_rval['results'], params['name']):
return {'changed': False, 'state': 'absent'}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
api_rval = oc_cm.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Create
########
if state == 'present':
if not Utils.exists(api_rval['results'], params['name']):
if check_mode:
return {'changed': True, 'msg': 'Would have performed a create.'}
api_rval = oc_cm.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
api_rval = oc_cm.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if oc_cm.needs_update():
api_rval = oc_cm.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
api_rval = oc_cm.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
| apache-2.0 |
maxantonio/rambocoin | contrib/pyminer/pyminer.py | 1257 | 6438 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit |
Kazade/NeHe-Website | google_appengine/lib/django-1.4/django/templatetags/i18n.py | 80 | 16514 | from __future__ import with_statement
import re
from django.template import (Node, Variable, TemplateSyntaxError,
TokenParser, Library, TOKEN_TEXT, TOKEN_VAR)
from django.template.base import _render_value_in_context
from django.template.defaulttags import token_kwargs
from django.utils import translation
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
from django.conf import settings
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetLanguageInfoNode(Node):
def __init__(self, lang_code, variable):
self.lang_code = Variable(lang_code)
self.variable = variable
def render(self, context):
lang_code = self.lang_code.resolve(context)
context[self.variable] = translation.get_language_info(lang_code)
return ''
class GetLanguageInfoListNode(Node):
def __init__(self, languages, variable):
self.languages = Variable(languages)
self.variable = variable
def get_language_info(self, language):
# ``language`` is either a language code string or a sequence
# with the language code as its first item
if len(language[0]) > 1:
return translation.get_language_info(language[0])
else:
return translation.get_language_info(str(language))
def render(self, context):
langs = self.languages.resolve(context)
context[self.variable] = [self.get_language_info(lang) for lang in langs]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop, asvar=None,
message_context=None):
self.noop = noop
self.asvar = asvar
self.message_context = message_context
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, basestring):
self.filter_expression.var = Variable(u"'%s'" %
self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
if self.message_context:
self.filter_expression.var.message_context = (
self.message_context.resolve(context))
output = self.filter_expression.resolve(context)
value = _render_value_in_context(output, context)
if self.asvar:
context[self.asvar] = value
return ''
else:
return value
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None, message_context=None):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
self.message_context = message_context
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents)
elif token.token_type == TOKEN_VAR:
result.append(u'%%(%s)s' % token.contents)
vars.append(token.contents)
return ''.join(result), vars
def render(self, context):
if self.message_context:
message_context = self.message_context.resolve(context)
else:
message_context = None
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.resolve(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
# Escape all isolated '%'
singular = re.sub(u'%(?!\()', u'%%', singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
plural = re.sub(u'%(?!\()', u'%%', plural)
if message_context:
result = translation.npgettext(message_context, singular,
plural, count)
else:
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
if message_context:
result = translation.pgettext(message_context, singular)
else:
result = translation.ugettext(singular)
data = dict([(v, _render_value_in_context(context.get(v, ''), context)) for v in vars])
context.pop()
try:
result = result % data
except KeyError:
with translation.override(None):
result = self.render(context)
return result
class LanguageNode(Node):
def __init__(self, nodelist, language):
self.nodelist = nodelist
self.language = language
def render(self, context):
with translation.override(self.language.resolve(context)):
output = self.nodelist.render(context)
return output
@register.tag("get_available_languages")
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
@register.tag("get_language_info")
def do_get_language_info(parser, token):
"""
This will store the language information dictionary for the given language
code in a context variable.
Usage::
{% get_language_info for LANGUAGE_CODE as l %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
"""
args = token.contents.split()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoNode(args[2], args[4])
@register.tag("get_language_info_list")
def do_get_language_info_list(parser, token):
"""
This will store a list of language information dictionaries for the given
language codes in a context variable. The language codes can be specified
either as a list of strings or a settings.LANGUAGES style tuple (or any
sequence of sequences whose first items are language codes).
Usage::
{% get_language_info_list for LANGUAGES as langs %}
{% for l in langs %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
{% endfor %}
"""
args = token.contents.split()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoListNode(args[2], args[4])
@register.filter
def language_name(lang_code):
return translation.get_language_info(lang_code)['name']
@register.filter
def language_name_local(lang_code):
return translation.get_language_info(lang_code)['name_local']
@register.filter
def language_bidi(lang_code):
return translation.get_language_info(lang_code)['bidi']
@register.tag("get_current_language")
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
@register.tag("get_current_language_bidi")
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
@register.tag("trans")
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
It is possible to store the translated string into a variable::
{% trans "this is a test" as var %}
{{ var }}
Contextual translations are also supported::
{% trans "this is a test" context "greeting" %}
This is equivalent to calling pgettext instead of (u)gettext.
"""
class TranslateParser(TokenParser):
def top(self):
value = self.value()
# Backwards Compatiblity fix:
# FilterExpression does not support single-quoted strings,
# so we make a cheap localized fix in order to maintain
# backwards compatibility with existing uses of ``trans``
# where single quote use is supported.
if value[0] == "'":
m = re.match("^'([^']+)'(\|.*$)", value)
if m:
value = '"%s"%s' % (m.group(1).replace('"','\\"'), m.group(2))
elif value[-1] == "'":
value = '"%s"' % value[1:-1].replace('"','\\"')
noop = False
asvar = None
message_context = None
while self.more():
tag = self.tag()
if tag == 'noop':
noop = True
elif tag == 'context':
message_context = parser.compile_filter(self.value())
elif tag == 'as':
asvar = self.tag()
else:
raise TemplateSyntaxError(
"Only options for 'trans' are 'noop', " \
"'context \"xxx\"', and 'as VAR'.")
return value, noop, asvar, message_context
value, noop, asvar, message_context = TranslateParser(token.contents).top()
return TranslateNode(parser.compile_filter(value), noop, asvar,
message_context)
@register.tag("blocktrans")
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with bar=foo|filter boo=baz|filter %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count count=var|length %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
The "var as value" legacy format is still supported::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
{% blocktrans count var|length as count %}
Contextual translations are supported::
{% blocktrans with bar=foo|filter context "greeting" %}
This is {{ bar }}.
{% endblocktrans %}
This is equivalent to calling pgettext/npgettext instead of
(u)gettext/(u)ngettext.
"""
bits = token.split_contents()
options = {}
remaining_bits = bits[1:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'count':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if len(value) != 1:
raise TemplateSyntaxError('"count" in %r tag expected exactly '
'one keyword argument.' % bits[0])
elif option == "context":
try:
value = remaining_bits.pop(0)
value = parser.compile_filter(value)
except Exception:
raise TemplateSyntaxError('"context" in %r tag expected '
'exactly one argument.' % bits[0])
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
if 'count' in options:
countervar, counter = options['count'].items()[0]
else:
countervar, counter = None, None
if 'context' in options:
message_context = options['context']
else:
message_context = None
extra_context = options.get('with', {})
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents)
return BlockTranslateNode(extra_context, singular, plural, countervar,
counter, message_context)
@register.tag
def language(parser, token):
"""
This will enable the given language just for this block.
Usage::
{% language "de" %}
This is {{ bar }} and {{ boo }}.
{% endlanguage %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (language)" % bits[0])
language = parser.compile_filter(bits[1])
nodelist = parser.parse(('endlanguage',))
parser.delete_first_token()
return LanguageNode(nodelist, language)
| bsd-3-clause |
daviddupont69/CouchPotatoServer | libs/cache/__init__.py | 99 | 8343 | """
copied from
werkzeug.contrib.cache
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from cache.posixemulation import rename
from itertools import izip
from time import time
import os
import re
import tempfile
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
try:
import cPickle as pickle
except ImportError:
import pickle
def _items(mappingorseq):
"""Wrapper for efficient iteration over mappings represented by dicts
or sequences::
>>> for k, v in _items((i, i*i) for i in xrange(5)):
... assert k*k == v
>>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
... assert k*k == v
"""
return mappingorseq.iteritems() if hasattr(mappingorseq, 'iteritems') \
else mappingorseq
class BaseCache(object):
"""Baseclass for the cache systems. All the cache systems implement this
API or a superset of it.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`set`.
"""
def __init__(self, default_timeout = 300):
self.default_timeout = default_timeout
def delete(self, key):
"""Deletes `key` from the cache. If it does not exist in the cache
nothing happens.
:param key: the key to delete.
"""
pass
def get_many(self, *keys):
"""Returns a list of values for the given keys.
For each key a item in the list is created. Example::
foo, bar = cache.get_many("foo", "bar")
If a key can't be looked up `None` is returned for that key
instead.
:param keys: The function accepts multiple keys as positional
arguments.
"""
return map(self.get, keys)
def get_dict(self, *keys):
"""Works like :meth:`get_many` but returns a dict::
d = cache.get_dict("foo", "bar")
foo = d["foo"]
bar = d["bar"]
:param keys: The function accepts multiple keys as positional
arguments.
"""
return dict(izip(keys, self.get_many(*keys)))
def set(self, key, value, timeout = None):
"""Adds a new key/value to the cache (overwrites value, if key already
exists in the cache).
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout).
"""
pass
def add(self, key, value, timeout = None):
"""Works like :meth:`set` but does not overwrite the values of already
existing keys.
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key or the default
timeout if not specified.
"""
pass
def set_many(self, mapping, timeout = None):
"""Sets multiple keys and values from a mapping.
:param mapping: a mapping with the keys/values to set.
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout).
"""
for key, value in _items(mapping):
self.set(key, value, timeout)
def delete_many(self, *keys):
"""Deletes multiple keys at once.
:param keys: The function accepts multiple keys as positional
arguments.
"""
for key in keys:
self.delete(key)
def clear(self):
"""Clears the cache. Keep in mind that not all caches support
completely clearing the cache.
"""
pass
def inc(self, key, delta = 1):
"""Increments the value of a key by `delta`. If the key does
not yet exist it is initialized with `delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to add.
"""
self.set(key, (self.get(key) or 0) + delta)
def dec(self, key, delta = 1):
"""Decrements the value of a key by `delta`. If the key does
not yet exist it is initialized with `-delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to subtract.
"""
self.set(key, (self.get(key) or 0) - delta)
class FileSystemCache(BaseCache):
"""A cache that stores the items on the file system. This cache depends
on being the only user of the `cache_dir`. Make absolutely sure that
nobody but this cache stores files there or otherwise the cache will
randomly delete files therein.
:param cache_dir: the directory where cache files are stored.
:param threshold: the maximum number of items the cache stores before
it starts deleting some.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`.
:param mode: the file mode wanted for the cache files, default 0600
"""
#: used for temporary files by the FileSystemCache
_fs_transaction_suffix = '.__wz_cache'
def __init__(self, cache_dir, threshold = 500, default_timeout = 300, mode = 0600):
BaseCache.__init__(self, default_timeout)
self._path = cache_dir
self._threshold = threshold
self._mode = mode
if not os.path.exists(self._path):
os.makedirs(self._path)
def _list_dir(self):
"""return a list of (fully qualified) cache filenames
"""
return [os.path.join(self._path, fn) for fn in os.listdir(self._path)
if not fn.endswith(self._fs_transaction_suffix)]
def _prune(self):
entries = self._list_dir()
if len(entries) > self._threshold:
now = time()
for idx, fname in enumerate(entries):
remove = False
f = None
try:
try:
f = open(fname, 'rb')
expires = pickle.load(f)
remove = expires <= now or idx % 3 == 0
finally:
if f is not None:
f.close()
except Exception:
pass
if remove:
try:
os.remove(fname)
except (IOError, OSError):
pass
def clear(self):
for fname in self._list_dir():
try:
os.remove(fname)
except (IOError, OSError):
pass
def _get_filename(self, key):
hash = md5(key).hexdigest()
return os.path.join(self._path, hash)
def get(self, key):
filename = self._get_filename(key)
try:
f = open(filename, 'rb')
try:
if pickle.load(f) >= time():
return pickle.load(f)
finally:
f.close()
os.remove(filename)
except Exception:
return None
def add(self, key, value, timeout = None):
filename = self._get_filename(key)
if not os.path.exists(filename):
self.set(key, value, timeout)
def set(self, key, value, timeout = None):
if timeout is None:
timeout = self.default_timeout
filename = self._get_filename(key)
self._prune()
try:
fd, tmp = tempfile.mkstemp(suffix = self._fs_transaction_suffix,
dir = self._path)
f = os.fdopen(fd, 'wb')
try:
pickle.dump(int(time() + timeout), f, 1)
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
finally:
f.close()
rename(tmp, filename)
os.chmod(filename, self._mode)
except (IOError, OSError):
pass
def delete(self, key):
try:
os.remove(self._get_filename(key))
except (IOError, OSError):
pass
| gpl-3.0 |
cryptapus/electrum-server-uno | src/utils.py | 1 | 6758 | #!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from itertools import imap
import threading
import time
import hashlib
import struct
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
global PUBKEY_ADDRESS
global SCRIPT_ADDRESS
PUBKEY_ADDRESS = 130
SCRIPT_ADDRESS = 30
def rev_hex(s):
return s.decode('hex')[::-1].encode('hex')
Hash = lambda x: hashlib.sha256(hashlib.sha256(x).digest()).digest()
hash_encode = lambda x: x[::-1].encode('hex')
hash_decode = lambda x: x.decode('hex')[::-1]
def header_to_string(res):
pbh = res.get('prev_block_hash')
if pbh is None:
pbh = '0'*64
return int_to_hex4(res.get('version')) \
+ rev_hex(pbh) \
+ rev_hex(res.get('merkle_root')) \
+ int_to_hex4(int(res.get('timestamp'))) \
+ int_to_hex4(int(res.get('bits'))) \
+ int_to_hex4(int(res.get('nonce')))
_unpack_bytes4_to_int = struct.Struct("<L").unpack
_unpack_bytes8_to_int = struct.Struct("<Q").unpack
def bytes4_to_int(s):
return _unpack_bytes4_to_int(s)[0]
def bytes8_to_int(s):
return _unpack_bytes8_to_int(s)[0]
int_to_bytes4 = struct.Struct('<L').pack
int_to_bytes8 = struct.Struct('<Q').pack
def int_to_hex4(i):
return int_to_bytes4(i).encode('hex')
def int_to_hex8(i):
return int_to_bytes8(i).encode('hex')
def header_from_string(s):
return {
'version': bytes4_to_int(s[0:4]),
'prev_block_hash': hash_encode(s[4:36]),
'merkle_root': hash_encode(s[36:68]),
'timestamp': bytes4_to_int(s[68:72]),
'bits': bytes4_to_int(s[72:76]),
'nonce': bytes4_to_int(s[76:80]),
}
############ functions from pywallet #####################
def hash_160(public_key):
try:
md = hashlib.new('ripemd160')
md.update(hashlib.sha256(public_key).digest())
return md.digest()
except:
import ripemd
md = ripemd.new(hashlib.sha256(public_key).digest())
return md.digest()
def public_key_to_pubkey_address(public_key):
return hash_160_to_pubkey_address(hash_160(public_key))
def public_key_to_bc_address(public_key):
""" deprecated """
return public_key_to_pubkey_address(public_key)
def hash_160_to_pubkey_address(h160, addrtype=None):
""" deprecated """
if not addrtype:
addrtype = PUBKEY_ADDRESS
return hash_160_to_address(h160, addrtype)
def hash_160_to_pubkey_address(h160):
return hash_160_to_address(h160, PUBKEY_ADDRESS)
def hash_160_to_script_address(h160):
return hash_160_to_address(h160, SCRIPT_ADDRESS)
def hash_160_to_address(h160, addrtype = 0):
""" Checks if the provided hash is actually 160bits or 20 bytes long and returns the address, else None
"""
if h160 is None or len(h160) is not 20:
return None
vh160 = chr(addrtype) + h160
h = Hash(vh160)
addr = vh160 + h[0:4]
return b58encode(addr)
def bc_address_to_hash_160(addr):
if addr is None or len(addr) is 0:
return None
bytes = b58decode(addr, 25)
return bytes[1:21] if bytes is not None else None
def b58encode(v):
"""encode v, which is a string of bytes, to base58."""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0':
nPad += 1
else:
break
return (__b58chars[0]*nPad) + result
def b58decode(v, length):
""" decode v into a string of len bytes."""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]:
nPad += 1
else:
break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def EncodeBase58Check(vchIn):
hash = Hash(vchIn)
return b58encode(vchIn + hash[0:4])
def DecodeBase58Check(psz):
vchRet = b58decode(psz, None)
key = vchRet[0:-4]
csum = vchRet[-4:]
hash = Hash(key)
cs32 = hash[0:4]
if cs32 != csum:
return None
else:
return key
########### end pywallet functions #######################
import os
def random_string(length):
return b58encode(os.urandom(length))
def timestr():
return time.strftime("[%d/%m/%Y-%H:%M:%S]")
### logger
import logging
import logging.handlers
logging.basicConfig(format="%(asctime)-11s %(message)s", datefmt="[%d/%m/%Y-%H:%M:%S]")
logger = logging.getLogger('electrum')
def init_logger():
logger.setLevel(logging.INFO)
def print_log(*args):
logger.info(" ".join(imap(str, args)))
def print_warning(message):
logger.warning(message)
# profiler
class ProfiledThread(threading.Thread):
def __init__(self, filename, target):
self.filename = filename
threading.Thread.__init__(self, target = target)
def run(self):
import cProfile
profiler = cProfile.Profile()
profiler.enable()
threading.Thread.run(self)
profiler.disable()
profiler.dump_stats(self.filename)
| mit |
20c/pybird | pybird/__init__.py | 1 | 24458 | from builtins import str
from builtins import next
from builtins import object
import logging
import re
import socket
from datetime import datetime, timedelta
from subprocess import Popen, PIPE
class PyBird(object):
ignored_field_numbers = [0, 1, 13, 1008, 2002, 9001]
def __init__(self, socket_file, hostname=None, user=None, password=None, config_file=None, bird_cmd=None):
"""Basic pybird setup.
Required argument: socket_file: full path to the BIRD control socket."""
self.socket_file = socket_file
self.hostname = hostname
self.user = user
self.password = password
self.config_file = config_file
if not bird_cmd:
self.bird_cmd = 'birdc'
else:
self.bird_cmd = bird_cmd
self.clean_input_re = re.compile(r'\W+')
self.field_number_re = re.compile(r'^(\d+)[ -]')
self.routes_field_re = re.compile(r'(\d+) imported, (\d+) exported')
self.log = logging.getLogger(__name__)
def get_config(self):
if not self.config_file:
raise ValueError("config_file is not set")
return self._read_file(self.config_file)
def put_config(self, data):
if not self.config_file:
raise ValueError("config_file is not set")
return self._write_file(data, self.config_file)
def commit_config(self):
return
def check_config(self):
query = "configure check"
data = self._send_query(query)
if not self.socket_file:
return data
err = self._parse_configure(data)
if err:
raise ValueError(err)
def get_bird_status(self):
"""Get the status of the BIRD instance. Returns a dict with keys:
- router_id (string)
- last_reboot (datetime)
- last_reconfiguration (datetime)"""
query = "show status"
data = self._send_query(query)
if not self.socket_file:
return data
return self._parse_status(data)
def _parse_status(self, data):
line_iterator = iter(data.splitlines())
data = {}
for line in line_iterator:
line = line.strip()
self.log.debug("PyBird: parse status: %s", line)
(field_number, line) = self._extract_field_number(line)
if field_number in self.ignored_field_numbers:
continue
if field_number == 1000:
data['version'] = line.split(' ')[1]
elif field_number == 1011:
# Parse the status section, which looks like:
# 1011-Router ID is 195.69.146.34
# Current server time is 10-01-2012 10:24:37
# Last reboot on 03-01-2012 12:46:40
# Last reconfiguration on 03-01-2012 12:46:40
data['router_id'] = self._parse_router_status_line(line)
line = next(line_iterator) # skip current server time
self.log.debug("PyBird: parse status: %s", line)
line = next(line_iterator)
self.log.debug("PyBird: parse status: %s", line)
data['last_reboot'] = self._parse_router_status_line(
line, parse_date=True)
line = next(line_iterator)
self.log.debug("PyBird: parse status: %s", line)
data['last_reconfiguration'] = self._parse_router_status_line(
line, parse_date=True)
return data
def _parse_configure(self, data):
"""
returns error on error, None on success
0001 BIRD 1.4.5 ready.
0002-Reading configuration from /home/grizz/c/20c/tstbird/dev3.conf
8002 /home/grizz/c/20c/tstbird/dev3.conf, line 3: syntax error
0001 BIRD 1.4.5 ready.
0002-Reading configuration from /home/grizz/c/20c/tstbird/dev3.conf
0020 Configuration OK
0004 Reconfiguration in progress
0018 Reconfiguration confirmed
0003 Reconfigured
bogus undo:
0019 Nothing to do
"""
error_fields = (19, 8002)
success_fields = (3, 4, 18, 20)
for line in data.splitlines():
self.log.debug("PyBird: parse configure: %s", line)
fieldno, line = self._extract_field_number(line)
if fieldno == 2:
if not self.config_file:
self.config_file = line.split(' ')[3]
elif fieldno in error_fields:
return line
elif fieldno in success_fields:
return
raise ValueError("unable to parse configure response")
def _parse_router_status_line(self, line, parse_date=False):
"""Parse a line like:
Current server time is 10-01-2012 10:24:37
optionally (if parse_date=True), parse it into a datetime"""
data = line.strip().split(' ', 3)[-1]
if parse_date:
try:
return datetime.strptime(data, '%Y-%m-%d %H:%M:%S')
# old versions of bird used DD-MM-YYYY
except ValueError:
return datetime.strptime(data, '%d-%m-%Y %H:%M:%S')
else:
return data
def configure(self, soft=False, timeout=0):
"""
birdc configure command
"""
query = "configure check"
data = self._send_query(query)
if not self.socket_file:
return data
err = self._parse_configure(data)
if err:
raise ValueError(err)
def get_routes(self, prefix=None, peer=None):
query = "show route all"
if prefix:
query += " for {}".format(prefix)
if peer:
query += " protocol {}".format(peer)
data = self._send_query(query)
return self._parse_route_data(data)
# deprecated by get_routes_received
def get_peer_prefixes_announced(self, peer_name):
"""Get prefixes announced by a specific peer, without applying
filters - i.e. this includes routes which were not accepted"""
clean_peer_name = self._clean_input(peer_name)
query = "show route table T_%s all protocol %s" % (
clean_peer_name, clean_peer_name)
data = self._send_query(query)
return self._parse_route_data(data)
def get_routes_received(self, peer=None):
return self.get_peer_prefixes_announced(peer)
def get_peer_prefixes_exported(self, peer_name):
"""Get prefixes exported TO a specific peer"""
clean_peer_name = self._clean_input(peer_name)
query = "show route all table T_%s export %s" % (
clean_peer_name, clean_peer_name)
data = self._send_query(query)
if not self.socket_file:
return data
return self._parse_route_data(data)
def get_peer_prefixes_accepted(self, peer_name):
"""Get prefixes announced by a specific peer, which were also
accepted by the filters"""
query = "show route all protocol %s" % self._clean_input(peer_name)
data = self._send_query(query)
return self._parse_route_data(data)
def get_peer_prefixes_rejected(self, peer_name):
announced = self.get_peer_prefixes_announced(peer_name)
accepted = self.get_peer_prefixes_accepted(peer_name)
announced_prefixes = [i['prefix'] for i in announced]
accepted_prefixes = [i['prefix'] for i in accepted]
rejected_prefixes = [
item for item in announced_prefixes if item not in accepted_prefixes]
rejected_routes = [item for item in announced if item[
'prefix'] in rejected_prefixes]
return rejected_routes
def get_prefix_info(self, prefix, peer_name=None):
"""Get route-info for specified prefix"""
query = "show route for %s all" % prefix
if peer_name is not None:
query += " protocol %s" % peer_name
data = self._send_query(query)
if not self.socket_file:
return data
return self._parse_route_data(data)
def _parse_route_data(self, data):
"""Parse a blob like:
0001 BIRD 1.3.3 ready.
1007-2a02:898::/32 via 2001:7f8:1::a500:8954:1 on eth1 [PS2 12:46] * (100) [AS8283i]
1008- Type: BGP unicast univ
1012- BGP.origin: IGP
BGP.as_path: 8954 8283
BGP.next_hop: 2001:7f8:1::a500:8954:1 fe80::21f:caff:fe16:e02
BGP.local_pref: 100
BGP.community: (8954,620)
[....]
0000
"""
lines = data.splitlines()
routes = []
route_summary = None
self.log.debug("PyBird: parse route data: lines=%d", len(lines))
line_counter = -1
while line_counter < len(lines) - 1:
line_counter += 1
line = lines[line_counter].strip()
self.log.debug("PyBird: parse route data: %s", line)
(field_number, line) = self._extract_field_number(line)
if field_number in self.ignored_field_numbers:
continue
if field_number == 1007:
route_summary = self._parse_route_summary(line)
route_detail = None
if field_number == 1012:
if not route_summary:
# This is not detail of a BGP route
continue
# A route detail spans multiple lines, read them all
route_detail_raw = []
while 'BGP.' in line:
route_detail_raw.append(line)
line_counter += 1
line = lines[line_counter]
self.log.debug("PyBird: parse route data: %s", line)
# this loop will have walked a bit too far, correct it
line_counter -= 1
route_detail = self._parse_route_detail(route_detail_raw)
# Save the summary+detail info in our result
route_detail.update(route_summary)
# Do not use this summary again on the next run
route_summary = None
routes.append(route_detail)
if field_number == 8001:
# network not in table
return []
return routes
def _re_route_summary(self):
return re.compile(
r"(?P<prefix>[a-f0-9\.:\/]+)?\s+"
r"(?:via\s+(?P<peer>[^\s]+) on (?P<interface>[^\s]+)|(?:\w+)?)?\s*"
r"\[(?P<source>[^\s]+) (?P<time>[^\]\s]+)(?: from (?P<peer2>[^\s]+))?\]"
)
def _parse_route_summary(self, line):
"""Parse a line like:
2a02:898::/32 via 2001:7f8:1::a500:8954:1 on eth1 [PS2 12:46] * (100) [AS8283i]
"""
match = self._re_route_summary().match(line)
if not match:
raise ValueError("couldn't parse line '{}'".format(line))
# Note that split acts on sections of whitespace - not just single
# chars
route = match.groupdict()
# python regex doesn't allow group name reuse
if not route['peer']:
route['peer'] = route.pop('peer2')
else:
del route['peer2']
return route
def _parse_route_detail(self, lines):
"""Parse a blob like:
1012- BGP.origin: IGP
BGP.as_path: 8954 8283
BGP.next_hop: 2001:7f8:1::a500:8954:1 fe80::21f:caff:fe16:e02
BGP.local_pref: 100
BGP.community: (8954,620)
"""
attributes = {}
for line in lines:
line = line.strip()
self.log.debug("PyBird: parse route details: %s", line)
# remove 'BGP.'
line = line[4:]
parts = line.split(": ")
if len(parts) == 2:
(key, value) = parts
else:
# handle [BGP.atomic_aggr:]
key = parts[0].strip(":")
value = True
if key == 'community':
# convert (8954,220) (8954,620) to 8954:220 8954:620
value = value.replace(",", ":").replace(
"(", "").replace(")", "")
attributes[key] = value
return attributes
def get_peer_status(self, peer_name=None):
"""Get the status of all peers or a specific peer.
Optional argument: peer_name: case-sensitive full name of a peer,
as configured in BIRD.
If no argument is given, returns a list of peers - each peer represented
by a dict with fields. See README for a full list.
If a peer_name argument is given, returns a single peer, represented
as a dict. If the peer is not found, returns a zero length array.
"""
if peer_name:
query = 'show protocols all "%s"' % self._clean_input(peer_name)
else:
query = 'show protocols all'
data = self._send_query(query)
if not self.socket_file:
return data
peers = self._parse_peer_data(data=data, data_contains_detail=True)
if not peer_name:
return peers
if len(peers) == 0:
return []
elif len(peers) > 1:
raise ValueError(
"Searched for a specific peer, but got multiple returned from BIRD?")
else:
return peers[0]
def _parse_peer_data(self, data, data_contains_detail):
"""Parse the data from BIRD to find peer information."""
lineiterator = iter(data.splitlines())
peers = []
peer_summary = None
for line in lineiterator:
line = line.strip()
(field_number, line) = self._extract_field_number(line)
if field_number in self.ignored_field_numbers:
continue
if field_number == 1002:
peer_summary = self._parse_peer_summary(line)
if peer_summary['protocol'] != 'BGP':
peer_summary = None
continue
# If there is no detail section to be expected,
# we are done.
if not data_contains_detail:
peers.append_peer_summary()
continue
peer_detail = None
if field_number == 1006:
if not peer_summary:
# This is not detail of a BGP peer
continue
# A peer summary spans multiple lines, read them all
peer_detail_raw = []
while line.strip() != "":
peer_detail_raw.append(line)
line = next(lineiterator)
peer_detail = self._parse_peer_detail(peer_detail_raw)
# Save the summary+detail info in our result
peer_detail.update(peer_summary)
peers.append(peer_detail)
# Do not use this summary again on the next run
peer_summary = None
return peers
def _parse_peer_summary(self, line):
"""Parse the summary of a peer line, like:
PS1 BGP T_PS1 start Jun13 Passive
Returns a dict with the fields:
name, protocol, last_change, state, up
("PS1", "BGP", "Jun13", "Passive", False)
"""
elements = line.split()
try:
if ':' in elements[5]: # newer versions include a timestamp before the state
state = elements[6]
else:
state = elements[5]
up = (state.lower() == "established")
except IndexError:
state = None
up = None
raw_datetime = elements[4]
last_change = self._calculate_datetime(raw_datetime)
return {
'name': elements[0],
'protocol': elements[1],
'last_change': last_change,
'state': state,
'up': up,
}
def _parse_peer_detail(self, peer_detail_raw):
"""Parse the detailed peer information from BIRD, like:
1006- Description: Peering AS8954 - InTouch
Preference: 100
Input filter: ACCEPT
Output filter: ACCEPT
Routes: 24 imported, 23 exported, 0 preferred
Route change stats: received rejected filtered ignored accepted
Import updates: 50 3 19 0 0
Import withdraws: 0 0 --- 0 0
Export updates: 0 0 0 --- 0
Export withdraws: 0 --- --- --- 0
BGP state: Established
Session: external route-server AS4
Neighbor AS: 8954
Neighbor ID: 85.184.4.5
Neighbor address: 2001:7f8:1::a500:8954:1
Source address: 2001:7f8:1::a519:7754:1
Neighbor caps: refresh AS4
Route limit: 9/1000
Hold timer: 112/180
Keepalive timer: 16/60
peer_detail_raw must be an array, where each element is a line of BIRD output.
Returns a dict with the fields, if the peering is up:
routes_imported, routes_exported, router_id
and all combinations of:
[import,export]_[updates,withdraws]_[received,rejected,filtered,ignored,accepted]
wfor which the value above is not "---"
"""
result = {}
route_change_fields = [
"import updates",
"import withdraws",
"export updates",
"export withdraws"
]
field_map = {
'description': 'description',
'neighbor id': 'router_id',
'neighbor address': 'address',
'neighbor as': 'asn',
}
lineiterator = iter(peer_detail_raw)
for line in lineiterator:
line = line.strip()
(field, value) = line.split(":", 1)
value = value.strip()
if field.lower() == "routes":
routes = self.routes_field_re.findall(value)[0]
result['routes_imported'] = int(routes[0])
result['routes_exported'] = int(routes[1])
if field.lower() in route_change_fields:
(received, rejected, filtered, ignored, accepted) = value.split()
key_name_base = field.lower().replace(' ', '_')
self._parse_route_stats(
result, key_name_base + '_received', received)
self._parse_route_stats(
result, key_name_base + '_rejected', rejected)
self._parse_route_stats(
result, key_name_base + '_filtered', filtered)
self._parse_route_stats(
result, key_name_base + '_ignored', ignored)
self._parse_route_stats(
result, key_name_base + '_accepted', accepted)
if field.lower() in field_map.keys():
result[field_map[field.lower()]] = value
return result
def _parse_route_stats(self, result_dict, key_name, value):
if value.strip() == "---":
return
result_dict[key_name] = int(value)
def _extract_field_number(self, line):
"""Parse the field type number from a line.
Line must start with a number, followed by a dash or space.
Returns a tuple of (field_number, cleaned_line), where field_number
is None if no number was found, and cleaned_line is the line without
the field number, if applicable.
"""
matches = self.field_number_re.findall(line)
if len(matches):
field_number = int(matches[0])
cleaned_line = self.field_number_re.sub('', line).strip('-')
return (field_number, cleaned_line)
else:
return (None, line)
def _calculate_datetime(self, value, now=datetime.now()):
"""Turn the BIRD date format into a python datetime."""
# Case: YYYY-MM-DD HH:MM:SS
try:
return datetime(*map(int, (value[:4], value[5:7], value[8:10], value[11:13], value[14:16], value[17:19])))
except ValueError:
pass
# Case: YYYY-MM-DD
try:
return datetime(*map(int, (value[:4], value[5:7], value[8:10])))
except ValueError:
pass
# Case: HH:mm or HH:mm:ss timestamp
try:
try:
parsed_value = datetime.strptime(value, "%H:%M")
except ValueError:
parsed_value = datetime.strptime(value, "%H:%M:%S")
result_date = datetime(
now.year, now.month, now.day, parsed_value.hour, parsed_value.minute)
if now.hour < parsed_value.hour or (now.hour == parsed_value.hour and now.minute < parsed_value.minute):
result_date = result_date - timedelta(days=1)
return result_date
except ValueError:
# It's a different format, keep on processing
pass
# Case: "Jun13" timestamp
try:
parsed = datetime.strptime(value, '%b%d')
# if now is past the month, it's this year, else last year
if now.month == parsed.month:
# bird shows time for same day
if now.day <= parsed.day:
year = now.year - 1
else:
year = now.year
elif now.month > parsed.month:
year = now.year
else:
year = now.year - 1
result_date = datetime(year, parsed.month, parsed.day)
return result_date
except ValueError:
pass
# Case: plain year
try:
year = int(value)
return datetime(year, 1, 1)
except ValueError:
raise ValueError("Can not parse datetime: [%s]" % value)
def _remote_cmd(self, cmd, inp=None):
to = '{}@{}'.format(self.user, self.hostname)
proc = Popen(['ssh', to, cmd], stdin=PIPE, stdout=PIPE)
res = proc.communicate(input=inp)[0]
return res
def _read_file(self, fname):
if self.hostname:
cmd = "cat " + fname
return self._remote_cmd(cmd)
with open(fname) as fobj:
return fobj.read()
def _write_file(self, data, fname):
if self.hostname:
cmd = "cat >" + fname
self._remote_cmd(cmd, inp=data)
return
with open(fname, 'w') as fobj:
fobj.write(data)
return
def _send_query(self, query):
self.log.debug("PyBird: query: %s", query)
if self.hostname:
return self._remote_query(query)
return self._socket_query(query)
def _remote_query(self, query):
"""
mimic a direct socket connect over ssh
"""
cmd = "{} -v -s {} '{}'".format(self.bird_cmd, self.socket_file, query)
res = self._remote_cmd(cmd)
res += "0000\n"
return res
def _socket_query(self, query):
"""Open a socket to the BIRD control socket, send the query and get
the response.
"""
if not isinstance(query, bytes):
query = query.encode('utf-8')
if not query.endswith(b'\n'):
query += b'\n'
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.socket_file)
sock.send(query)
data = b''
while ((data.find(b"\n0000") == -1) and
(data.find(b"\n8003") == -1) and
(data.find(b"\n0013") == -1) and
(data.find(b"\n9001") == -1) and
(data.find(b"\n8001") == -1)):
this_read = sock.recv(1024)
if not this_read:
self.log.debug(data)
raise ValueError("Could not read additional data from BIRD")
data += this_read
sock.close()
return data.decode('utf-8')
def _clean_input(self, inp):
"""Clean the input string of anything not plain alphanumeric chars,
return the cleaned string."""
return self.clean_input_re.sub('', inp).strip()
| apache-2.0 |
romankagan/DDBWorkbench | plugins/hg4idea/testData/bin/mercurial/minirst.py | 92 | 24503 | # minirst.py - minimal reStructuredText parser
#
# Copyright 2009, 2010 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""simplified reStructuredText parser.
This parser knows just enough about reStructuredText to parse the
Mercurial docstrings.
It cheats in a major way: nested blocks are not really nested. They
are just indented blocks that look like they are nested. This relies
on the user to keep the right indentation for the blocks.
Remember to update http://mercurial.selenic.com/wiki/HelpStyleGuide
when adding support for new constructs.
"""
import re
import util, encoding
from i18n import _
import cgi
def section(s):
return "%s\n%s\n\n" % (s, "\"" * encoding.colwidth(s))
def subsection(s):
return "%s\n%s\n\n" % (s, '=' * encoding.colwidth(s))
def subsubsection(s):
return "%s\n%s\n\n" % (s, "-" * encoding.colwidth(s))
def subsubsubsection(s):
return "%s\n%s\n\n" % (s, "." * encoding.colwidth(s))
def replace(text, substs):
'''
Apply a list of (find, replace) pairs to a text.
>>> replace("foo bar", [('f', 'F'), ('b', 'B')])
'Foo Bar'
>>> encoding.encoding = 'latin1'
>>> replace('\\x81\\\\', [('\\\\', '/')])
'\\x81/'
>>> encoding.encoding = 'shiftjis'
>>> replace('\\x81\\\\', [('\\\\', '/')])
'\\x81\\\\'
'''
# some character encodings (cp932 for Japanese, at least) use
# ASCII characters other than control/alphabet/digit as a part of
# multi-bytes characters, so direct replacing with such characters
# on strings in local encoding causes invalid byte sequences.
utext = text.decode(encoding.encoding)
for f, t in substs:
utext = utext.replace(f, t)
return utext.encode(encoding.encoding)
_blockre = re.compile(r"\n(?:\s*\n)+")
def findblocks(text):
"""Find continuous blocks of lines in text.
Returns a list of dictionaries representing the blocks. Each block
has an 'indent' field and a 'lines' field.
"""
blocks = []
for b in _blockre.split(text.lstrip('\n').rstrip()):
lines = b.splitlines()
if lines:
indent = min((len(l) - len(l.lstrip())) for l in lines)
lines = [l[indent:] for l in lines]
blocks.append(dict(indent=indent, lines=lines))
return blocks
def findliteralblocks(blocks):
"""Finds literal blocks and adds a 'type' field to the blocks.
Literal blocks are given the type 'literal', all other blocks are
given type the 'paragraph'.
"""
i = 0
while i < len(blocks):
# Searching for a block that looks like this:
#
# +------------------------------+
# | paragraph |
# | (ends with "::") |
# +------------------------------+
# +---------------------------+
# | indented literal block |
# +---------------------------+
blocks[i]['type'] = 'paragraph'
if blocks[i]['lines'][-1].endswith('::') and i + 1 < len(blocks):
indent = blocks[i]['indent']
adjustment = blocks[i + 1]['indent'] - indent
if blocks[i]['lines'] == ['::']:
# Expanded form: remove block
del blocks[i]
i -= 1
elif blocks[i]['lines'][-1].endswith(' ::'):
# Partially minimized form: remove space and both
# colons.
blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-3]
else:
# Fully minimized form: remove just one colon.
blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-1]
# List items are formatted with a hanging indent. We must
# correct for this here while we still have the original
# information on the indentation of the subsequent literal
# blocks available.
m = _bulletre.match(blocks[i]['lines'][0])
if m:
indent += m.end()
adjustment -= m.end()
# Mark the following indented blocks.
while i + 1 < len(blocks) and blocks[i + 1]['indent'] > indent:
blocks[i + 1]['type'] = 'literal'
blocks[i + 1]['indent'] -= adjustment
i += 1
i += 1
return blocks
_bulletre = re.compile(r'(-|[0-9A-Za-z]+\.|\(?[0-9A-Za-z]+\)|\|) ')
_optionre = re.compile(r'^(-([a-zA-Z0-9]), )?(--[a-z0-9-]+)'
r'((.*) +)(.*)$')
_fieldre = re.compile(r':(?![: ])([^:]*)(?<! ):[ ]+(.*)')
_definitionre = re.compile(r'[^ ]')
_tablere = re.compile(r'(=+\s+)*=+')
def splitparagraphs(blocks):
"""Split paragraphs into lists."""
# Tuples with (list type, item regexp, single line items?). Order
# matters: definition lists has the least specific regexp and must
# come last.
listtypes = [('bullet', _bulletre, True),
('option', _optionre, True),
('field', _fieldre, True),
('definition', _definitionre, False)]
def match(lines, i, itemre, singleline):
"""Does itemre match an item at line i?
A list item can be followed by an indented line or another list
item (but only if singleline is True).
"""
line1 = lines[i]
line2 = i + 1 < len(lines) and lines[i + 1] or ''
if not itemre.match(line1):
return False
if singleline:
return line2 == '' or line2[0] == ' ' or itemre.match(line2)
else:
return line2.startswith(' ')
i = 0
while i < len(blocks):
if blocks[i]['type'] == 'paragraph':
lines = blocks[i]['lines']
for type, itemre, singleline in listtypes:
if match(lines, 0, itemre, singleline):
items = []
for j, line in enumerate(lines):
if match(lines, j, itemre, singleline):
items.append(dict(type=type, lines=[],
indent=blocks[i]['indent']))
items[-1]['lines'].append(line)
blocks[i:i + 1] = items
break
i += 1
return blocks
_fieldwidth = 14
def updatefieldlists(blocks):
"""Find key for field lists."""
i = 0
while i < len(blocks):
if blocks[i]['type'] != 'field':
i += 1
continue
j = i
while j < len(blocks) and blocks[j]['type'] == 'field':
m = _fieldre.match(blocks[j]['lines'][0])
key, rest = m.groups()
blocks[j]['lines'][0] = rest
blocks[j]['key'] = key
j += 1
i = j + 1
return blocks
def updateoptionlists(blocks):
i = 0
while i < len(blocks):
if blocks[i]['type'] != 'option':
i += 1
continue
optstrwidth = 0
j = i
while j < len(blocks) and blocks[j]['type'] == 'option':
m = _optionre.match(blocks[j]['lines'][0])
shortoption = m.group(2)
group3 = m.group(3)
longoption = group3[2:].strip()
desc = m.group(6).strip()
longoptionarg = m.group(5).strip()
blocks[j]['lines'][0] = desc
noshortop = ''
if not shortoption:
noshortop = ' '
opt = "%s%s" % (shortoption and "-%s " % shortoption or '',
("%s--%s %s") % (noshortop, longoption,
longoptionarg))
opt = opt.rstrip()
blocks[j]['optstr'] = opt
optstrwidth = max(optstrwidth, encoding.colwidth(opt))
j += 1
for block in blocks[i:j]:
block['optstrwidth'] = optstrwidth
i = j + 1
return blocks
def prunecontainers(blocks, keep):
"""Prune unwanted containers.
The blocks must have a 'type' field, i.e., they should have been
run through findliteralblocks first.
"""
pruned = []
i = 0
while i + 1 < len(blocks):
# Searching for a block that looks like this:
#
# +-------+---------------------------+
# | ".. container ::" type |
# +---+ |
# | blocks |
# +-------------------------------+
if (blocks[i]['type'] == 'paragraph' and
blocks[i]['lines'][0].startswith('.. container::')):
indent = blocks[i]['indent']
adjustment = blocks[i + 1]['indent'] - indent
containertype = blocks[i]['lines'][0][15:]
prune = containertype not in keep
if prune:
pruned.append(containertype)
# Always delete "..container:: type" block
del blocks[i]
j = i
i -= 1
while j < len(blocks) and blocks[j]['indent'] > indent:
if prune:
del blocks[j]
else:
blocks[j]['indent'] -= adjustment
j += 1
i += 1
return blocks, pruned
_sectionre = re.compile(r"""^([-=`:.'"~^_*+#])\1+$""")
def findtables(blocks):
'''Find simple tables
Only simple one-line table elements are supported
'''
for block in blocks:
# Searching for a block that looks like this:
#
# === ==== ===
# A B C
# === ==== === <- optional
# 1 2 3
# x y z
# === ==== ===
if (block['type'] == 'paragraph' and
len(block['lines']) > 2 and
_tablere.match(block['lines'][0]) and
block['lines'][0] == block['lines'][-1]):
block['type'] = 'table'
block['header'] = False
div = block['lines'][0]
# column markers are ASCII so we can calculate column
# position in bytes
columns = [x for x in xrange(len(div))
if div[x] == '=' and (x == 0 or div[x - 1] == ' ')]
rows = []
for l in block['lines'][1:-1]:
if l == div:
block['header'] = True
continue
row = []
# we measure columns not in bytes or characters but in
# colwidth which makes things tricky
pos = columns[0] # leading whitespace is bytes
for n, start in enumerate(columns):
if n + 1 < len(columns):
width = columns[n + 1] - start
v = encoding.getcols(l, pos, width) # gather columns
pos += len(v) # calculate byte position of end
row.append(v.strip())
else:
row.append(l[pos:].strip())
rows.append(row)
block['table'] = rows
return blocks
def findsections(blocks):
"""Finds sections.
The blocks must have a 'type' field, i.e., they should have been
run through findliteralblocks first.
"""
for block in blocks:
# Searching for a block that looks like this:
#
# +------------------------------+
# | Section title |
# | ------------- |
# +------------------------------+
if (block['type'] == 'paragraph' and
len(block['lines']) == 2 and
encoding.colwidth(block['lines'][0]) == len(block['lines'][1]) and
_sectionre.match(block['lines'][1])):
block['underline'] = block['lines'][1][0]
block['type'] = 'section'
del block['lines'][1]
return blocks
def inlineliterals(blocks):
substs = [('``', '"')]
for b in blocks:
if b['type'] in ('paragraph', 'section'):
b['lines'] = [replace(l, substs) for l in b['lines']]
return blocks
def hgrole(blocks):
substs = [(':hg:`', '"hg '), ('`', '"')]
for b in blocks:
if b['type'] in ('paragraph', 'section'):
# Turn :hg:`command` into "hg command". This also works
# when there is a line break in the command and relies on
# the fact that we have no stray back-quotes in the input
# (run the blocks through inlineliterals first).
b['lines'] = [replace(l, substs) for l in b['lines']]
return blocks
def addmargins(blocks):
"""Adds empty blocks for vertical spacing.
This groups bullets, options, and definitions together with no vertical
space between them, and adds an empty block between all other blocks.
"""
i = 1
while i < len(blocks):
if (blocks[i]['type'] == blocks[i - 1]['type'] and
blocks[i]['type'] in ('bullet', 'option', 'field')):
i += 1
else:
blocks.insert(i, dict(lines=[''], indent=0, type='margin'))
i += 2
return blocks
def prunecomments(blocks):
"""Remove comments."""
i = 0
while i < len(blocks):
b = blocks[i]
if b['type'] == 'paragraph' and (b['lines'][0].startswith('.. ') or
b['lines'] == ['..']):
del blocks[i]
if i < len(blocks) and blocks[i]['type'] == 'margin':
del blocks[i]
else:
i += 1
return blocks
_admonitionre = re.compile(r"\.\. (admonition|attention|caution|danger|"
r"error|hint|important|note|tip|warning)::",
flags=re.IGNORECASE)
def findadmonitions(blocks):
"""
Makes the type of the block an admonition block if
the first line is an admonition directive
"""
i = 0
while i < len(blocks):
m = _admonitionre.match(blocks[i]['lines'][0])
if m:
blocks[i]['type'] = 'admonition'
admonitiontitle = blocks[i]['lines'][0][3:m.end() - 2].lower()
firstline = blocks[i]['lines'][0][m.end() + 1:]
if firstline:
blocks[i]['lines'].insert(1, ' ' + firstline)
blocks[i]['admonitiontitle'] = admonitiontitle
del blocks[i]['lines'][0]
i = i + 1
return blocks
_admonitiontitles = {'attention': _('Attention:'),
'caution': _('Caution:'),
'danger': _('!Danger!') ,
'error': _('Error:'),
'hint': _('Hint:'),
'important': _('Important:'),
'note': _('Note:'),
'tip': _('Tip:'),
'warning': _('Warning!')}
def formatoption(block, width):
desc = ' '.join(map(str.strip, block['lines']))
colwidth = encoding.colwidth(block['optstr'])
usablewidth = width - 1
hanging = block['optstrwidth']
initindent = '%s%s ' % (block['optstr'], ' ' * ((hanging - colwidth)))
hangindent = ' ' * (encoding.colwidth(initindent) + 1)
return ' %s\n' % (util.wrap(desc, usablewidth,
initindent=initindent,
hangindent=hangindent))
def formatblock(block, width):
"""Format a block according to width."""
if width <= 0:
width = 78
indent = ' ' * block['indent']
if block['type'] == 'admonition':
admonition = _admonitiontitles[block['admonitiontitle']]
hang = len(block['lines'][-1]) - len(block['lines'][-1].lstrip())
defindent = indent + hang * ' '
text = ' '.join(map(str.strip, block['lines']))
return '%s\n%s\n' % (indent + admonition,
util.wrap(text, width=width,
initindent=defindent,
hangindent=defindent))
if block['type'] == 'margin':
return '\n'
if block['type'] == 'literal':
indent += ' '
return indent + ('\n' + indent).join(block['lines']) + '\n'
if block['type'] == 'section':
underline = encoding.colwidth(block['lines'][0]) * block['underline']
return "%s%s\n%s%s\n" % (indent, block['lines'][0],indent, underline)
if block['type'] == 'table':
table = block['table']
# compute column widths
widths = [max([encoding.colwidth(e) for e in c]) for c in zip(*table)]
text = ''
span = sum(widths) + len(widths) - 1
indent = ' ' * block['indent']
hang = ' ' * (len(indent) + span - widths[-1])
for row in table:
l = []
for w, v in zip(widths, row):
pad = ' ' * (w - encoding.colwidth(v))
l.append(v + pad)
l = ' '.join(l)
l = util.wrap(l, width=width, initindent=indent, hangindent=hang)
if not text and block['header']:
text = l + '\n' + indent + '-' * (min(width, span)) + '\n'
else:
text += l + "\n"
return text
if block['type'] == 'definition':
term = indent + block['lines'][0]
hang = len(block['lines'][-1]) - len(block['lines'][-1].lstrip())
defindent = indent + hang * ' '
text = ' '.join(map(str.strip, block['lines'][1:]))
return '%s\n%s\n' % (term, util.wrap(text, width=width,
initindent=defindent,
hangindent=defindent))
subindent = indent
if block['type'] == 'bullet':
if block['lines'][0].startswith('| '):
# Remove bullet for line blocks and add no extra
# indention.
block['lines'][0] = block['lines'][0][2:]
else:
m = _bulletre.match(block['lines'][0])
subindent = indent + m.end() * ' '
elif block['type'] == 'field':
key = block['key']
subindent = indent + _fieldwidth * ' '
if len(key) + 2 > _fieldwidth:
# key too large, use full line width
key = key.ljust(width)
else:
# key fits within field width
key = key.ljust(_fieldwidth)
block['lines'][0] = key + block['lines'][0]
elif block['type'] == 'option':
return formatoption(block, width)
text = ' '.join(map(str.strip, block['lines']))
return util.wrap(text, width=width,
initindent=indent,
hangindent=subindent) + '\n'
def formathtml(blocks):
"""Format RST blocks as HTML"""
out = []
headernest = ''
listnest = []
def escape(s):
return cgi.escape(s, True)
def openlist(start, level):
if not listnest or listnest[-1][0] != start:
listnest.append((start, level))
out.append('<%s>\n' % start)
blocks = [b for b in blocks if b['type'] != 'margin']
for pos, b in enumerate(blocks):
btype = b['type']
level = b['indent']
lines = b['lines']
if btype == 'admonition':
admonition = escape(_admonitiontitles[b['admonitiontitle']])
text = escape(' '.join(map(str.strip, lines)))
out.append('<p>\n<b>%s</b> %s\n</p>\n' % (admonition, text))
elif btype == 'paragraph':
out.append('<p>\n%s\n</p>\n' % escape('\n'.join(lines)))
elif btype == 'margin':
pass
elif btype == 'literal':
out.append('<pre>\n%s\n</pre>\n' % escape('\n'.join(lines)))
elif btype == 'section':
i = b['underline']
if i not in headernest:
headernest += i
level = headernest.index(i) + 1
out.append('<h%d>%s</h%d>\n' % (level, escape(lines[0]), level))
elif btype == 'table':
table = b['table']
out.append('<table>\n')
for row in table:
out.append('<tr>')
for v in row:
out.append('<td>')
out.append(escape(v))
out.append('</td>')
out.append('\n')
out.pop()
out.append('</tr>\n')
out.append('</table>\n')
elif btype == 'definition':
openlist('dl', level)
term = escape(lines[0])
text = escape(' '.join(map(str.strip, lines[1:])))
out.append(' <dt>%s\n <dd>%s\n' % (term, text))
elif btype == 'bullet':
bullet, head = lines[0].split(' ', 1)
if bullet == '-':
openlist('ul', level)
else:
openlist('ol', level)
out.append(' <li> %s\n' % escape(' '.join([head] + lines[1:])))
elif btype == 'field':
openlist('dl', level)
key = escape(b['key'])
text = escape(' '.join(map(str.strip, lines)))
out.append(' <dt>%s\n <dd>%s\n' % (key, text))
elif btype == 'option':
openlist('dl', level)
opt = escape(b['optstr'])
desc = escape(' '.join(map(str.strip, lines)))
out.append(' <dt>%s\n <dd>%s\n' % (opt, desc))
# close lists if indent level of next block is lower
if listnest:
start, level = listnest[-1]
if pos == len(blocks) - 1:
out.append('</%s>\n' % start)
listnest.pop()
else:
nb = blocks[pos + 1]
ni = nb['indent']
if (ni < level or
(ni == level and
nb['type'] not in 'definition bullet field option')):
out.append('</%s>\n' % start)
listnest.pop()
return ''.join(out)
def parse(text, indent=0, keep=None):
"""Parse text into a list of blocks"""
pruned = []
blocks = findblocks(text)
for b in blocks:
b['indent'] += indent
blocks = findliteralblocks(blocks)
blocks = findtables(blocks)
blocks, pruned = prunecontainers(blocks, keep or [])
blocks = findsections(blocks)
blocks = inlineliterals(blocks)
blocks = hgrole(blocks)
blocks = splitparagraphs(blocks)
blocks = updatefieldlists(blocks)
blocks = updateoptionlists(blocks)
blocks = addmargins(blocks)
blocks = prunecomments(blocks)
blocks = findadmonitions(blocks)
return blocks, pruned
def formatblocks(blocks, width):
text = ''.join(formatblock(b, width) for b in blocks)
return text
def format(text, width=80, indent=0, keep=None, style='plain'):
"""Parse and format the text according to width."""
blocks, pruned = parse(text, indent, keep or [])
if style == 'html':
text = formathtml(blocks)
else:
text = ''.join(formatblock(b, width) for b in blocks)
if keep is None:
return text
else:
return text, pruned
def getsections(blocks):
'''return a list of (section name, nesting level, blocks) tuples'''
nest = ""
level = 0
secs = []
for b in blocks:
if b['type'] == 'section':
i = b['underline']
if i not in nest:
nest += i
level = nest.index(i) + 1
nest = nest[:level]
secs.append((b['lines'][0], level, [b]))
else:
if not secs:
# add an initial empty section
secs = [('', 0, [])]
secs[-1][2].append(b)
return secs
def decorateblocks(blocks, width):
'''generate a list of (section name, line text) pairs for search'''
lines = []
for s in getsections(blocks):
section = s[0]
text = formatblocks(s[2], width)
lines.append([(section, l) for l in text.splitlines(True)])
return lines
def maketable(data, indent=0, header=False):
'''Generate an RST table for the given table data as a list of lines'''
widths = [max(encoding.colwidth(e) for e in c) for c in zip(*data)]
indent = ' ' * indent
div = indent + ' '.join('=' * w for w in widths) + '\n'
out = [div]
for row in data:
l = []
for w, v in zip(widths, row):
pad = ' ' * (w - encoding.colwidth(v))
l.append(v + pad)
out.append(indent + ' '.join(l) + "\n")
if header and len(data) > 1:
out.insert(2, div)
out.append(div)
return out
| apache-2.0 |
jasonwyatt/Flask-ErrorMail | docs/_themes/flask_theme_support.py | 2228 | 4875 | # flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
| mit |
dan-blanchard/conda-build | conda_build/noarch_python.py | 1 | 4949 | import os
import io
import sys
import json
import shutil
import locale
from os.path import basename, dirname, isdir, join, isfile
from conda_build.config import config
from conda_build.post import SHEBANG_PAT
ISWIN = sys.platform.startswith('win')
def _force_dir(dirname):
if not isdir(dirname):
os.makedirs(dirname)
def _error_exit(exit_message):
sys.exit("[noarch_python] %s" % exit_message)
def rewrite_script(fn):
"""Take a file from the bin directory and rewrite it into the python-scripts
directory after it passes some sanity checks for noarch pacakges"""
# Load and check the source file for not being a binary
src = join(config.build_prefix, 'Scripts' if ISWIN else 'bin', fn)
with io.open(src, encoding=locale.getpreferredencoding()) as fi:
try:
data = fi.read()
except UnicodeDecodeError: # file is binary
_error_exit("Noarch package contains binary script: %s" % fn)
os.unlink(src)
# Get rid of '-script.py' suffix on Windows
if ISWIN and fn.endswith('-script.py'):
fn = fn[:-10]
# Check that it does have a #! python string, and skip it
encoding = sys.stdout.encoding or 'utf8'
m = SHEBANG_PAT.match(data.encode(encoding))
if m and b'python' in m.group():
new_data = data[data.find('\n') + 1:]
elif ISWIN:
new_data = data
else:
_error_exit("No python shebang in: %s" % fn)
# Rewrite the file to the python-scripts directory
dst_dir = join(config.build_prefix, 'python-scripts')
_force_dir(dst_dir)
with open(join(dst_dir, fn), 'w') as fo:
fo.write(new_data)
return fn
def handle_file(f, d):
"""Process a file for inclusion in a noarch python package.
"""
path = join(config.build_prefix, f)
# Ignore egg-info and pyc files.
if f.endswith(('.egg-info', '.pyc')):
os.unlink(path)
# The presence of .so indicated this is not a noarch package
elif f.endswith(('.so', '.dll', '.pyd', '.exe', '.dylib')):
if f.endswith('.exe') and (isfile(f[:-4] + '-script.py') or
basename(f[:-4]) in d['python-scripts']):
os.unlink(path) # this is an entry point with a matching xx-script.py
return
_error_exit("Error: Binary library or executable found: %s" % f)
elif 'site-packages' in f:
nsp = join(config.build_prefix, 'site-packages')
_force_dir(nsp)
g = f[f.find('site-packages'):]
dst = join(config.build_prefix, g)
dst_dir = dirname(dst)
_force_dir(dst_dir)
os.rename(path, dst)
d['site-packages'].append(g[14:])
# Treat scripts specially with the logic from above
elif f.startswith(('bin/', 'Scripts')):
fn = basename(path)
fn = rewrite_script(fn)
d['python-scripts'].append(fn)
# Include examples in the metadata doc
elif f.startswith(('Examples/', 'Examples\\')):
d['Examples'].append(f[9:])
else:
_error_exit("Error: Don't know how to handle file: %s" % f)
def transform(m, files):
assert 'py_' in m.dist()
prefix = config.build_prefix
name = m.name()
bin_dir = join(prefix, 'bin')
_force_dir(bin_dir)
# Create *nix prelink script
# Note: it's important to use LF newlines or it wont work if we build on Win
with open(join(bin_dir, '.%s-pre-link.sh' % name), 'wb') as fo:
fo.write('''\
#!/bin/bash
$PREFIX/bin/python $SOURCE_DIR/link.py
'''.encode('utf-8'))
scripts_dir = join(prefix, 'Scripts')
_force_dir(scripts_dir)
# Create windows prelink script (be nice and use Windows newlines)
with open(join(scripts_dir, '.%s-pre-link.bat' % name), 'wb') as fo:
fo.write('''\
@echo off
"%PREFIX%\\python.exe" "%SOURCE_DIR%\\link.py"
'''.replace('\n', '\r\n').encode('utf-8'))
d = {'dist': m.dist(),
'site-packages': [],
'python-scripts': [],
'Examples': []}
# Populate site-package, python-scripts, and Examples into above
for f in files:
handle_file(f, d)
# Windows path conversion
if ISWIN:
for fns in (d['site-packages'], d['Examples']):
for i in range(len(fns)):
fns[i] = fns[i].replace('\\', '/')
# Find our way to this directory
this_dir = dirname(__file__)
# copy in windows exe shims if there are any python-scripts
if d['python-scripts']:
for fn in 'cli-32.exe', 'cli-64.exe':
shutil.copyfile(join(this_dir, fn), join(prefix, fn))
# Read the local _link.py
with open(join(this_dir, '_link.py')) as fi:
link_code = fi.read()
# Write the package metadata, and bumper with code for linking
with open(join(prefix, 'link.py'), 'w') as fo:
fo.write('DATA = ')
json.dump(d, fo, indent=2, sort_keys=True)
fo.write('\n## END DATA\n\n')
fo.write(link_code)
| bsd-3-clause |
rowemoore/odoo | addons/hw_proxy/__openerp__.py | 313 | 1675 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Hardware Proxy',
'version': '1.0',
'category': 'Point Of Sale',
'sequence': 6,
'summary': 'Connect the Web Client to Hardware Peripherals',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Hardware Poxy
=============
This module allows you to remotely use peripherals connected to this server.
This modules only contains the enabling framework. The actual devices drivers
are found in other modules that must be installed separately.
""",
'author': 'OpenERP SA',
'depends': [],
'test': [
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ncoghlan/pip | pip/_vendor/distlib/scripts.py | 221 | 14183 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from io import BytesIO
import logging
import os
import re
import struct
import sys
from .compat import sysconfig, detect_encoding, ZipFile
from .resources import finder
from .util import (FileOperator, get_export_entry, convert_path,
get_executable, in_venv)
logger = logging.getLogger(__name__)
_DEFAULT_MANIFEST = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%s"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>'''.strip()
# check if Python is called on the first line with this expression
FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*-
if __name__ == '__main__':
import sys, re
def _resolve(module, func):
__import__(module)
mod = sys.modules[module]
parts = func.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
try:
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
func = _resolve('%(module)s', '%(func)s')
rc = func() # None interpreted as 0
except Exception as e: # only supporting Python >= 2.6
sys.stderr.write('%%s\\n' %% e)
rc = 1
sys.exit(rc)
'''
class ScriptMaker(object):
"""
A class to copy or create scripts from source scripts or callable
specifications.
"""
script_template = SCRIPT_TEMPLATE
executable = None # for shebangs
def __init__(self, source_dir, target_dir, add_launchers=True,
dry_run=False, fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
# It only makes sense to set mode bits on POSIX.
self.set_mode = (os.name == 'posix') or (os.name == 'java' and
os._name == 'posix')
self.variants = set(('', 'X.Y'))
self._fileop = fileop or FileOperator(dry_run)
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and os.name == 'nt':
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
if sys.platform.startswith('java'): # pragma: no cover
def _is_shell(self, executable):
"""
Determine if the specified executable is a script
(contains a #! line)
"""
try:
with open(executable) as fp:
return fp.read(2) == '#!'
except (OSError, IOError):
logger.warning('Failed to open %s', executable)
return False
def _fix_jython_executable(self, executable):
if self._is_shell(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty('os.name') == 'Linux':
return executable
elif executable.lower().endswith('jython.exe'):
# Use wrapper exe for Jython on Windows
return executable
return '/usr/bin/env %s' % executable
def _get_shebang(self, encoding, post_interp=b'', options=None):
enquote = True
if self.executable:
executable = self.executable
enquote = False # assume this will be taken care of
elif not sysconfig.is_python_build():
executable = get_executable()
elif in_venv():
executable = os.path.join(sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else:
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if options:
executable = self._get_alternate_executable(executable, options)
if sys.platform.startswith('java'): # pragma: no cover
executable = self._fix_jython_executable(executable)
# Normalise case for Windows
executable = os.path.normcase(executable)
# If the user didn't specify an executable, it may be necessary to
# cater for executable paths with spaces (not uncommon on Windows)
if enquote and ' ' in executable:
executable = '"%s"' % executable
# Issue #51: don't use fsencode, since we later try to
# check that the shebang is decodable using utf-8.
executable = executable.encode('utf-8')
# in case of IronPython, play safe and enable frames support
if (sys.platform == 'cli' and '-X:Frames' not in post_interp
and '-X:FullFrames' not in post_interp):
post_interp += b' -X:Frames'
shebang = b'#!' + executable + post_interp + b'\n'
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError:
raise ValueError(
'The shebang (%r) is not decodable from utf-8' % shebang)
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
if encoding != 'utf-8':
try:
shebang.decode(encoding)
except UnicodeDecodeError:
raise ValueError(
'The shebang (%r) is not decodable '
'from the script encoding (%r)' % (shebang, encoding))
return shebang
def _get_script_text(self, entry):
return self.script_template % dict(module=entry.prefix,
func=entry.suffix)
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return self.manifest % base
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = self.add_launchers and os.name == 'nt'
linesep = os.linesep.encode('utf-8')
if not use_launcher:
script_bytes = shebang + linesep + script_bytes
else:
if ext == 'py':
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = launcher + shebang + linesep + zip_data
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher:
n, e = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = '%s.exe' % outname
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
# Failed writing an executable - it might be in use.
logger.warning('Failed to write executable - trying to '
'use .deleteme logic')
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using '
'.deleteme logic')
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
if os.name == 'nt' and not outname.endswith('.' + ext):
outname = '%s.%s' % (outname, ext)
if os.path.exists(outname) and not self.clobber:
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
def _make_script(self, entry, filenames, options=None):
post_interp = b''
if options:
args = options.get('interpreter_args', [])
if args:
args = ' %s' % ' '.join(args)
post_interp = args.encode('utf-8')
shebang = self._get_shebang('utf-8', post_interp, options=options)
script = self._get_script_text(entry).encode('utf-8')
name = entry.name
scriptnames = set()
if '' in self.variants:
scriptnames.add(name)
if 'X' in self.variants:
scriptnames.add('%s%s' % (name, sys.version[0]))
if 'X.Y' in self.variants:
scriptnames.add('%s-%s' % (name, sys.version[:3]))
if options and options.get('gui', False):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if not self.force and not self._fileop.newer(script, outname):
logger.debug('not copying %s (up-to-date)', script)
return
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, 'rb')
except IOError:
if not self.dry_run:
raise
f = None
else:
encoding, lines = detect_encoding(f.readline)
f.seek(0)
first_line = f.readline()
if not first_line:
logger.warning('%s: %s is an empty file (skipping)',
self.get_command_name(), script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = match.group(1) or b''
if not adjust:
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script,
self.target_dir)
if not self._fileop.dry_run:
shebang = self._get_shebang(encoding, post_interp)
if b'pythonw' in first_line:
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
@property
def dry_run(self):
return self._fileop.dry_run
@dry_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if os.name == 'nt':
# Executable launcher support.
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
def _get_launcher(self, kind):
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
name = '%s%s.exe' % (kind, bits)
# Issue 31: don't hardcode an absolute package name, but
# determine it relative to the current package
distlib_package = __name__.rsplit('.', 1)[0]
result = finder(distlib_package).find(name).bytes
return result
# Public API follows
def make(self, specification, options=None):
"""
Make a script.
:param specification: The specification, which is either a valid export
entry specification (to make a script from a
callable) or a filename (to make a script by
copying from a source location).
:param options: A dictionary of options controlling script generation.
:return: A list of all absolute pathnames written to.
"""
filenames = []
entry = get_export_entry(specification)
if entry is None:
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames
| mit |
sekikn/incubator-airflow | airflow/secrets/base_secrets.py | 6 | 3356 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import warnings
from abc import ABC
from typing import TYPE_CHECKING, List, Optional
if TYPE_CHECKING:
from airflow.models.connection import Connection
class BaseSecretsBackend(ABC):
"""Abstract base class to retrieve Connection object given a conn_id or Variable given a key"""
def __init__(self, **kwargs):
pass
@staticmethod
def build_path(path_prefix: str, secret_id: str, sep: str = "/") -> str:
"""
Given conn_id, build path for Secrets Backend
:param path_prefix: Prefix of the path to get secret
:type path_prefix: str
:param secret_id: Secret id
:type secret_id: str
:param sep: separator used to concatenate connections_prefix and conn_id. Default: "/"
:type sep: str
"""
return f"{path_prefix}{sep}{secret_id}"
def get_conn_uri(self, conn_id: str) -> Optional[str]:
"""
Get conn_uri from Secrets Backend
:param conn_id: connection id
:type conn_id: str
"""
raise NotImplementedError()
def get_connection(self, conn_id: str) -> Optional['Connection']:
"""
Return connection object with a given ``conn_id``.
:param conn_id: connection id
:type conn_id: str
"""
from airflow.models.connection import Connection
conn_uri = self.get_conn_uri(conn_id=conn_id)
if not conn_uri:
return None
conn = Connection(conn_id=conn_id, uri=conn_uri)
return conn
def get_connections(self, conn_id: str) -> List['Connection']:
"""
Return connection object with a given ``conn_id``.
:param conn_id: connection id
:type conn_id: str
"""
warnings.warn(
"This method is deprecated. Please use "
"`airflow.secrets.base_secrets.BaseSecretsBackend.get_connection`.",
PendingDeprecationWarning,
stacklevel=2,
)
conn = self.get_connection(conn_id=conn_id)
if conn:
return [conn]
return []
def get_variable(self, key: str) -> Optional[str]:
"""
Return value for Airflow Variable
:param key: Variable Key
:type key: str
:return: Variable Value
"""
raise NotImplementedError()
def get_config(self, key: str) -> Optional[str]: # pylint: disable=unused-argument
"""
Return value for Airflow Config Key
:param key: Config Key
:return: Config Value
"""
return None
| apache-2.0 |
mtlchun/edx | common/djangoapps/util/milestones_helpers.py | 1 | 8678 | # pylint: disable=invalid-name
"""
Utility library for working with the edx-milestones app
"""
from django.conf import settings
from django.utils.translation import ugettext as _
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from courseware.models import StudentModule
from xmodule.modulestore.django import modulestore
from milestones.api import (
get_course_milestones,
add_milestone,
add_course_milestone,
remove_course_milestone,
get_course_milestones_fulfillment_paths,
add_user_milestone,
get_user_milestones,
)
from milestones.models import MilestoneRelationshipType
NAMESPACE_CHOICES = {
'ENTRANCE_EXAM': 'entrance_exams'
}
def add_prerequisite_course(course_key, prerequisite_course_key):
"""
It would create a milestone, then it would set newly created
milestones as requirement for course referred by `course_key`
and it would set newly created milestone as fulfilment
milestone for course referred by `prerequisite_course_key`.
"""
if settings.FEATURES.get('MILESTONES_APP', False):
# create a milestone
milestone = add_milestone({
'name': _('Course {} requires {}'.format(unicode(course_key), unicode(prerequisite_course_key))),
'namespace': unicode(prerequisite_course_key),
'description': _('System defined milestone'),
})
# add requirement course milestone
add_course_milestone(course_key, 'requires', milestone)
# add fulfillment course milestone
add_course_milestone(prerequisite_course_key, 'fulfills', milestone)
def remove_prerequisite_course(course_key, milestone):
"""
It would remove pre-requisite course milestone for course
referred by `course_key`.
"""
if settings.FEATURES.get('MILESTONES_APP', False):
remove_course_milestone(
course_key,
milestone,
)
def set_prerequisite_courses(course_key, prerequisite_course_keys):
"""
It would remove any existing requirement milestones for the given `course_key`
and create new milestones for each pre-requisite course in `prerequisite_course_keys`.
To only remove course milestones pass `course_key` and empty list or
None as `prerequisite_course_keys` .
"""
if settings.FEATURES.get('MILESTONES_APP', False):
#remove any existing requirement milestones with this pre-requisite course as requirement
course_milestones = get_course_milestones(course_key=course_key, relationship="requires")
if course_milestones:
for milestone in course_milestones:
remove_prerequisite_course(course_key, milestone)
# add milestones if pre-requisite course is selected
if prerequisite_course_keys:
for prerequisite_course_key_string in prerequisite_course_keys:
prerequisite_course_key = CourseKey.from_string(prerequisite_course_key_string)
add_prerequisite_course(course_key, prerequisite_course_key)
def get_pre_requisite_courses_not_completed(user, enrolled_courses):
"""
It would make dict of prerequisite courses not completed by user among courses
user has enrolled in. It calls the fulfilment api of milestones app and
iterates over all fulfilment milestones not achieved to make dict of
prerequisite courses yet to be completed.
"""
pre_requisite_courses = {}
if settings.FEATURES.get('ENABLE_PREREQUISITE_COURSES'):
for course_key in enrolled_courses:
required_courses = []
fulfilment_paths = get_course_milestones_fulfillment_paths(course_key, {'id': user.id})
for milestone_key, milestone_value in fulfilment_paths.items(): # pylint: disable=unused-variable
for key, value in milestone_value.items():
if key == 'courses' and value:
for required_course in value:
required_course_key = CourseKey.from_string(required_course)
required_course_descriptor = modulestore().get_course(required_course_key)
required_courses.append({
'key': required_course_key,
'display': get_course_display_name(required_course_descriptor)
})
# if there are required courses add to dict
if required_courses:
pre_requisite_courses[course_key] = {'courses': required_courses}
return pre_requisite_courses
def get_prerequisite_courses_display(course_descriptor):
"""
It would retrieve pre-requisite courses, make display strings
and return list of dictionary with course key as 'key' field
and course display name as `display` field.
"""
pre_requisite_courses = []
if settings.FEATURES.get('ENABLE_PREREQUISITE_COURSES', False) and course_descriptor.pre_requisite_courses:
for course_id in course_descriptor.pre_requisite_courses:
course_key = CourseKey.from_string(course_id)
required_course_descriptor = modulestore().get_course(course_key)
prc = {
'key': course_key,
'display': get_course_display_name(required_course_descriptor)
}
pre_requisite_courses.append(prc)
return pre_requisite_courses
def get_course_display_name(descriptor):
"""
It would return display name from given course descriptor
"""
return ' '.join([
descriptor.display_org_with_default,
descriptor.display_number_with_default
])
def fulfill_course_milestone(course_key, user):
"""
Marks the course specified by the given course_key as complete for the given user.
If any other courses require this course as a prerequisite, their milestones will be appropriately updated.
"""
if settings.FEATURES.get('MILESTONES_APP', False):
course_milestones = get_course_milestones(course_key=course_key, relationship="fulfills")
for milestone in course_milestones:
add_user_milestone({'id': user.id}, milestone)
def calculate_entrance_exam_score(user, course_descriptor, exam_modules):
"""
Calculates the score (percent) of the entrance exam using the provided modules
"""
exam_module_ids = [exam_module.location for exam_module in exam_modules]
student_modules = StudentModule.objects.filter(
student=user,
course_id=course_descriptor.id,
module_state_key__in=exam_module_ids,
)
exam_pct = 0
if student_modules:
module_pcts = []
ignore_categories = ['course', 'chapter', 'sequential', 'vertical']
for module in exam_modules:
if module.graded and module.category not in ignore_categories:
module_pct = 0
try:
student_module = student_modules.get(module_state_key=module.location)
if student_module.max_grade:
module_pct = student_module.grade / student_module.max_grade
module_pcts.append(module_pct)
except StudentModule.DoesNotExist:
pass
if module_pcts:
exam_pct = sum(module_pcts) / float(len(module_pcts))
return exam_pct
def milestones_achieved_by_user(user, namespace):
"""
It would fetch list of milestones completed by user
"""
if settings.FEATURES.get('MILESTONES_APP', False):
return get_user_milestones({'id': user.id}, namespace)
def is_valid_course_key(key):
"""
validates course key. returns True if valid else False.
"""
try:
course_key = CourseKey.from_string(key)
except InvalidKeyError:
course_key = key
return isinstance(course_key, CourseKey)
def seed_milestone_relationship_types():
"""
Helper method to pre-populate MRTs so the tests can run
"""
if settings.FEATURES.get('MILESTONES_APP', False):
MilestoneRelationshipType.objects.create(name='requires')
MilestoneRelationshipType.objects.create(name='fulfills')
def generate_milestone_namespace(namespace, course_key=None):
"""
Returns a specifically-formatted namespace string for the specified type
"""
if namespace in NAMESPACE_CHOICES.values():
if namespace == 'entrance_exams':
return '{}.{}'.format(unicode(course_key), NAMESPACE_CHOICES['ENTRANCE_EXAM'])
def serialize_user(user):
"""
Returns a milestones-friendly representation of a user object
"""
return {
'id': user.id,
}
| agpl-3.0 |
savoirfairelinux/rekishi | rekishi/api/query_builder.py | 1 | 2223 | class InfluxQueryHelper(object):
def __init__(self):
self.where_clause = ''
self.limit_clause = ''
self.query = ''
def build_query(self, base_query, **kwargs):
where_clause_dict = {}
if 'start' in kwargs:
where_clause_dict['start'] = kwargs['start'].pop()
if 'end' in kwargs:
where_clause_dict['end'] = kwargs['end'].pop()
if 'where' in kwargs:
where_clause_dict['where'] = kwargs['where'].pop()
if len(where_clause_dict) > 0:
self.where_clause = self.build_where_clause(where_clause_dict)
if 'limit' in kwargs:
self.limit_clause = self.build_limit_clause(kwargs['limit'][0])
# SELECT * FROM SERIE_NAME WHERE TIME=XX LIMIT 1;
self.query = "%s%s%s;" % (base_query, self.where_clause, self.limit_clause)
return self.query
def build_limit_clause(self, limit):
return ' limit %s' % (limit)
def build_where_clause(self, where_dict):
where_clause = ''
for key, value in where_dict.iteritems():
new_segment = ''
# Where clause still empty
if where_clause == '':
new_segment += ' WHERE '
else:
new_segment += ' AND '
if key == 'start':
new_segment += 'time > %s' % value
where_clause += new_segment
elif key == 'end':
new_segment += 'time < %s' % value
where_clause += new_segment
# Where list
elif key == 'where':
cond_list = value.split(';')
for cond in cond_list:
if where_clause == '':
new_segment = ' WHERE '
else:
new_segment = ' AND '
try:
wkey, wop, wval = cond.split(',')
new_segment += '%s %s %s' % (wkey, wop, wval)
where_clause += new_segment
except:
new_segment = ''
raise ValueError('Invalid WHERE clause.')
return where_clause
| gpl-3.0 |
jotes/boto | tests/integration/__init__.py | 112 | 2340 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Base class to make checking the certs easier.
"""
# We subclass from ``object`` instead of ``TestCase`` here so that this doesn't
# add noise to the test suite (otherwise these no-ops would run on every
# import).
class ServiceCertVerificationTest(object):
ssl = True
# SUBCLASSES MUST OVERRIDE THIS!
# Something like ``boto.sqs.regions()``...
regions = []
def test_certs(self):
self.assertTrue(len(self.regions) > 0)
for region in self.regions:
special_access_required = False
for snippet in ('gov', 'cn-'):
if snippet in region.name:
special_access_required = True
break
try:
c = region.connect()
self.sample_service_call(c)
except:
# This is bad (because the SSL cert failed). Re-raise the
# exception.
if not special_access_required:
raise
def sample_service_call(self, conn):
"""
Subclasses should override this method to do a service call that will
always succeed (like fetch a list, even if it's empty).
"""
pass
| mit |
hoppinghippo/HadoopMapReduce | contrib/hod/hodlib/Common/allocationManagerUtil.py | 182 | 1193 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""defines Allocation Manager Utilities"""
# -*- python -*-
from hodlib.allocationManagers.goldAllocationManager import goldAllocationManager
class allocationManagerUtil:
def getAllocationManager(name, cfg, log):
"""returns a concrete instance of the specified AllocationManager"""
if name == 'gold':
return goldAllocationManager(cfg, log)
getAllocationManager = staticmethod(getAllocationManager)
| apache-2.0 |
timoschwarzer/blendworks | BlendWorks Server/python/Lib/test/test_kqueue.py | 72 | 7619 | """
Tests for kqueue wrapper.
"""
import errno
import os
import select
import socket
import sys
import time
import unittest
from test import support
if not hasattr(select, "kqueue"):
raise unittest.SkipTest("test works only on BSD")
class TestKQueue(unittest.TestCase):
def test_create_queue(self):
kq = select.kqueue()
self.assertTrue(kq.fileno() > 0, kq.fileno())
self.assertTrue(not kq.closed)
kq.close()
self.assertTrue(kq.closed)
self.assertRaises(ValueError, kq.fileno)
def test_create_event(self):
from operator import lt, le, gt, ge
fd = os.open(os.devnull, os.O_WRONLY)
self.addCleanup(os.close, fd)
ev = select.kevent(fd)
other = select.kevent(1000)
self.assertEqual(ev.ident, fd)
self.assertEqual(ev.filter, select.KQ_FILTER_READ)
self.assertEqual(ev.flags, select.KQ_EV_ADD)
self.assertEqual(ev.fflags, 0)
self.assertEqual(ev.data, 0)
self.assertEqual(ev.udata, 0)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
self.assertTrue(ev < other)
self.assertTrue(other >= ev)
for op in lt, le, gt, ge:
self.assertRaises(TypeError, op, ev, None)
self.assertRaises(TypeError, op, ev, 1)
self.assertRaises(TypeError, op, ev, "ev")
ev = select.kevent(fd, select.KQ_FILTER_WRITE)
self.assertEqual(ev.ident, fd)
self.assertEqual(ev.filter, select.KQ_FILTER_WRITE)
self.assertEqual(ev.flags, select.KQ_EV_ADD)
self.assertEqual(ev.fflags, 0)
self.assertEqual(ev.data, 0)
self.assertEqual(ev.udata, 0)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
ev = select.kevent(fd, select.KQ_FILTER_WRITE, select.KQ_EV_ONESHOT)
self.assertEqual(ev.ident, fd)
self.assertEqual(ev.filter, select.KQ_FILTER_WRITE)
self.assertEqual(ev.flags, select.KQ_EV_ONESHOT)
self.assertEqual(ev.fflags, 0)
self.assertEqual(ev.data, 0)
self.assertEqual(ev.udata, 0)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
ev = select.kevent(1, 2, 3, 4, 5, 6)
self.assertEqual(ev.ident, 1)
self.assertEqual(ev.filter, 2)
self.assertEqual(ev.flags, 3)
self.assertEqual(ev.fflags, 4)
self.assertEqual(ev.data, 5)
self.assertEqual(ev.udata, 6)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
bignum = 0x7fff
ev = select.kevent(bignum, 1, 2, 3, bignum - 1, bignum)
self.assertEqual(ev.ident, bignum)
self.assertEqual(ev.filter, 1)
self.assertEqual(ev.flags, 2)
self.assertEqual(ev.fflags, 3)
self.assertEqual(ev.data, bignum - 1)
self.assertEqual(ev.udata, bignum)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
def test_queue_event(self):
serverSocket = socket.socket()
serverSocket.bind(('127.0.0.1', 0))
serverSocket.listen(1)
client = socket.socket()
client.setblocking(False)
try:
client.connect(('127.0.0.1', serverSocket.getsockname()[1]))
except OSError as e:
self.assertEqual(e.args[0], errno.EINPROGRESS)
else:
#raise AssertionError("Connect should have raised EINPROGRESS")
pass # FreeBSD doesn't raise an exception here
server, addr = serverSocket.accept()
kq = select.kqueue()
kq2 = select.kqueue.fromfd(kq.fileno())
ev = select.kevent(server.fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD | select.KQ_EV_ENABLE)
kq.control([ev], 0)
ev = select.kevent(server.fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_ADD | select.KQ_EV_ENABLE)
kq.control([ev], 0)
ev = select.kevent(client.fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD | select.KQ_EV_ENABLE)
kq2.control([ev], 0)
ev = select.kevent(client.fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_ADD | select.KQ_EV_ENABLE)
kq2.control([ev], 0)
events = kq.control(None, 4, 1)
events = set((e.ident, e.filter) for e in events)
self.assertEqual(events, set([
(client.fileno(), select.KQ_FILTER_WRITE),
(server.fileno(), select.KQ_FILTER_WRITE)]))
client.send(b"Hello!")
server.send(b"world!!!")
# We may need to call it several times
for i in range(10):
events = kq.control(None, 4, 1)
if len(events) == 4:
break
time.sleep(1.0)
else:
self.fail('timeout waiting for event notifications')
events = set((e.ident, e.filter) for e in events)
self.assertEqual(events, set([
(client.fileno(), select.KQ_FILTER_WRITE),
(client.fileno(), select.KQ_FILTER_READ),
(server.fileno(), select.KQ_FILTER_WRITE),
(server.fileno(), select.KQ_FILTER_READ)]))
# Remove completely client, and server read part
ev = select.kevent(client.fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
kq.control([ev], 0)
ev = select.kevent(client.fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
kq.control([ev], 0)
ev = select.kevent(server.fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
kq.control([ev], 0, 0)
events = kq.control([], 4, 0.99)
events = set((e.ident, e.filter) for e in events)
self.assertEqual(events, set([
(server.fileno(), select.KQ_FILTER_WRITE)]))
client.close()
server.close()
serverSocket.close()
def testPair(self):
kq = select.kqueue()
a, b = socket.socketpair()
a.send(b'foo')
event1 = select.kevent(a, select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE)
event2 = select.kevent(b, select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE)
r = kq.control([event1, event2], 1, 1)
self.assertTrue(r)
self.assertFalse(r[0].flags & select.KQ_EV_ERROR)
self.assertEqual(b.recv(r[0].data), b'foo')
a.close()
b.close()
kq.close()
def test_close(self):
open_file = open(__file__, "rb")
self.addCleanup(open_file.close)
fd = open_file.fileno()
kqueue = select.kqueue()
# test fileno() method and closed attribute
self.assertIsInstance(kqueue.fileno(), int)
self.assertFalse(kqueue.closed)
# test close()
kqueue.close()
self.assertTrue(kqueue.closed)
self.assertRaises(ValueError, kqueue.fileno)
# close() can be called more than once
kqueue.close()
# operations must fail with ValueError("I/O operation on closed ...")
self.assertRaises(ValueError, kqueue.control, None, 4)
def test_fd_non_inheritable(self):
kqueue = select.kqueue()
self.addCleanup(kqueue.close)
self.assertEqual(os.get_inheritable(kqueue.fileno()), False)
def test_main():
support.run_unittest(TestKQueue)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
vatsala/python_koans | python3/koans/about_control_statements.py | 31 | 1925 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutControlStatements(Koan):
def test_if_then_else_statements(self):
if True:
result = 'true value'
else:
result = 'false value'
self.assertEqual(__, result)
def test_if_then_statements(self):
result = 'default value'
if True:
result = 'true value'
self.assertEqual(__, result)
def test_while_statement(self):
i = 1
result = 1
while i <= 10:
result = result * i
i += 1
self.assertEqual(__, result)
def test_break_statement(self):
i = 1
result = 1
while True:
if i > 10: break
result = result * i
i += 1
self.assertEqual(__, result)
def test_continue_statement(self):
i = 0
result = []
while i < 10:
i += 1
if (i % 2) == 0: continue
result.append(i)
self.assertEqual(__, result)
def test_for_statement(self):
phrase = ["fish", "and", "chips"]
result = []
for item in phrase:
result.append(item.upper())
self.assertEqual([__, __, __], result)
def test_for_statement_with_tuples(self):
round_table = [
("Lancelot", "Blue"),
("Galahad", "I don't know!"),
("Robin", "Blue! I mean Green!"),
("Arthur", "Is that an African Swallow or Amazonian Swallow?")
]
result = []
for knight, answer in round_table:
result.append("Contestant: '" + knight + "' Answer: '" + answer + "'")
text = __
self.assertRegexpMatches(result[2], text)
self.assertNoRegexpMatches(result[0], text)
self.assertNoRegexpMatches(result[1], text)
self.assertNoRegexpMatches(result[3], text)
| mit |
Zac-HD/home-assistant | homeassistant/components/sensor/openevse.py | 11 | 3419 | """
Support for monitoring an OpenEVSE Charger.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.openevse/
"""
import logging
from requests import RequestException
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import TEMP_CELSIUS, CONF_HOST
from homeassistant.const import CONF_MONITORED_VARIABLES
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['openevsewifi==0.4']
SENSOR_TYPES = {
'status': ['Charging Status', None],
'charge_time': ['Charge Time Elapsed', 'minutes'],
'ambient_temp': ['Ambient Termperature', TEMP_CELSIUS],
'ir_temp': ['IR Temperature', TEMP_CELSIUS],
'rtc_temp': ['RTC Temperature', TEMP_CELSIUS],
'usage_session': ['Usage this Session', 'kWh'],
'usage_total': ['Total Usage', 'kWh']
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_MONITORED_VARIABLES, default=['status']):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the OpenEVSE sensor."""
import openevsewifi
host = config.get(CONF_HOST)
monitored_variables = config.get(CONF_MONITORED_VARIABLES)
charger = openevsewifi.Charger(host)
dev = []
for variable in monitored_variables:
dev.append(OpenEVSESensor(variable, charger))
add_devices(dev)
class OpenEVSESensor(Entity):
"""Implementation of an OpenEVSE sensor."""
# pylint: disable=too-many-arguments
def __init__(self, sensor_type, charger):
"""Initialize the sensor."""
self._name = SENSOR_TYPES[sensor_type][0]
self.type = sensor_type
self._state = None
self.charger = charger
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this sensor."""
return self._unit_of_measurement
def update(self):
"""Get the monitored data from the charger."""
try:
if self.type == 'status':
self._state = self.charger.getStatus()
elif self.type == 'charge_time':
self._state = self.charger.getChargeTimeElapsed() / 60
elif self.type == 'ambient_temp':
self._state = self.charger.getAmbientTemperature()
elif self.type == 'ir_temp':
self._state = self.charger.getIRTemperature()
elif self.type == 'rtc_temp':
self._state = self.charger.getRTCTemperature()
elif self.type == 'usage_session':
self._state = float(self.charger.getUsageSession()) / 1000
elif self.type == 'usage_total':
self._state = float(self.charger.getUsageTotal()) / 1000
else:
self._state = 'Unknown'
except (RequestException, ValueError, KeyError):
_LOGGER.warning("Could not update status for %s", self.name)
| apache-2.0 |
jank3/django | django/middleware/clickjacking.py | 284 | 1989 | """
Clickjacking Protection Middleware.
This module provides a middleware that implements protection against a
malicious site loading resources from your site in a hidden frame.
"""
from django.conf import settings
class XFrameOptionsMiddleware(object):
"""
Middleware that sets the X-Frame-Options HTTP header in HTTP responses.
Does not set the header if it's already set or if the response contains
a xframe_options_exempt value set to True.
By default, sets the X-Frame-Options header to 'SAMEORIGIN', meaning the
response can only be loaded on a frame within the same site. To prevent the
response from being loaded in a frame in any site, set X_FRAME_OPTIONS in
your project's Django settings to 'DENY'.
Note: older browsers will quietly ignore this header, thus other
clickjacking protection techniques should be used if protection in those
browsers is required.
https://en.wikipedia.org/wiki/Clickjacking#Server_and_client
"""
def process_response(self, request, response):
# Don't set it if it's already in the response
if response.get('X-Frame-Options') is not None:
return response
# Don't set it if they used @xframe_options_exempt
if getattr(response, 'xframe_options_exempt', False):
return response
response['X-Frame-Options'] = self.get_xframe_options_value(request,
response)
return response
def get_xframe_options_value(self, request, response):
"""
Gets the value to set for the X_FRAME_OPTIONS header.
By default this uses the value from the X_FRAME_OPTIONS Django
settings. If not found in settings, defaults to 'SAMEORIGIN'.
This method can be overridden if needed, allowing it to vary based on
the request or response.
"""
return getattr(settings, 'X_FRAME_OPTIONS', 'SAMEORIGIN').upper()
| bsd-3-clause |
dc3-plaso/dfvfs | dfvfs/resolver/resolver.py | 1 | 7222 | # -*- coding: utf-8 -*-
"""The path specification resolver."""
from dfvfs.credentials import keychain
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.mount import manager as mount_manager
from dfvfs.path import path_spec
from dfvfs.resolver import context
class Resolver(object):
"""Class that implements the path specification resolver."""
_resolver_context = context.Context()
_resolver_helpers = {}
key_chain = keychain.KeyChain()
@classmethod
def DeregisterHelper(cls, resolver_helper):
"""Deregisters a path specification resolver helper.
Args:
resolver_helper (ResolverHelper): resolver helper.
Raises:
KeyError: if resolver helper object is not set for the corresponding
type indicator.
"""
if resolver_helper.type_indicator not in cls._resolver_helpers:
raise KeyError(
u'Resolver helper object not set for type indicator: {0:s}.'.format(
resolver_helper.type_indicator))
del cls._resolver_helpers[resolver_helper.type_indicator]
@classmethod
def OpenFileEntry(cls, path_spec_object, resolver_context=None):
"""Opens a file entry object defined by path specification.
Args:
path_spec_object (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built in context which is not multi process safe.
Returns:
FileEntry: file entry or None if the path specification could not be
resolved.
"""
file_system = cls.OpenFileSystem(
path_spec_object, resolver_context=resolver_context)
if resolver_context is None:
resolver_context = cls._resolver_context
file_entry = file_system.GetFileEntryByPathSpec(path_spec_object)
# Release the file system so it will be removed from the cache
# when the file entry is destroyed.
resolver_context.ReleaseFileSystem(file_system)
return file_entry
@classmethod
def OpenFileObject(cls, path_spec_object, resolver_context=None):
"""Opens a file-like object defined by path specification.
Args:
path_spec_object (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built in context which is not multi process safe.
Returns:
FileIO: file-like object or None if the path specification could not
be resolved.
Raises:
KeyError: if resolver helper object is not set for the corresponding
type indicator.
PathSpecError: if the path specification is incorrect.
TypeError: if the path specification type is unsupported.
"""
if not isinstance(path_spec_object, path_spec.PathSpec):
raise TypeError(u'Unsupported path specification type.')
if resolver_context is None:
resolver_context = cls._resolver_context
if path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT:
if path_spec_object.HasParent():
raise errors.PathSpecError(
u'Unsupported mount path specification with parent.')
mount_point = getattr(path_spec_object, u'identifier', None)
if not mount_point:
raise errors.PathSpecError(
u'Unsupported path specification without mount point identifier.')
path_spec_object = mount_manager.MountPointManager.GetMountPoint(
mount_point)
if not path_spec_object:
raise errors.MountPointError(
u'No such mount point: {0:s}'.format(mount_point))
file_object = resolver_context.GetFileObject(path_spec_object)
if not file_object:
if path_spec_object.type_indicator not in cls._resolver_helpers:
raise KeyError((
u'Resolver helper object not set for type indicator: '
u'{0:s}.').format(path_spec_object.type_indicator))
resolver_helper = cls._resolver_helpers[path_spec_object.type_indicator]
file_object = resolver_helper.NewFileObject(resolver_context)
file_object.open(path_spec=path_spec_object)
return file_object
@classmethod
def OpenFileSystem(cls, path_spec_object, resolver_context=None):
"""Opens a file system object defined by path specification.
Args:
path_spec_object (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built in context which is not multi process safe.
Returns:
FileSystem: file system or None if the path specification could not
be resolved or has no file system object.
Raises:
AccessError: if the access to open the file system was denied.
BackEndError: if the file system cannot be opened.
KeyError: if resolver helper object is not set for the corresponding
type indicator.
PathSpecError: if the path specification is incorrect.
TypeError: if the path specification type is unsupported.
"""
if not isinstance(path_spec_object, path_spec.PathSpec):
raise TypeError(u'Unsupported path specification type.')
if resolver_context is None:
resolver_context = cls._resolver_context
if path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT:
if path_spec_object.HasParent():
raise errors.PathSpecError(
u'Unsupported mount path specification with parent.')
mount_point = getattr(path_spec_object, u'identifier', None)
if not mount_point:
raise errors.PathSpecError(
u'Unsupported path specification without mount point identifier.')
path_spec_object = mount_manager.MountPointManager.GetMountPoint(
mount_point)
if not path_spec_object:
raise errors.MountPointError(
u'No such mount point: {0:s}'.format(mount_point))
file_system = resolver_context.GetFileSystem(path_spec_object)
if not file_system:
if path_spec_object.type_indicator not in cls._resolver_helpers:
raise KeyError((
u'Resolver helper object not set for type indicator: '
u'{0:s}.').format(path_spec_object.type_indicator))
resolver_helper = cls._resolver_helpers[path_spec_object.type_indicator]
file_system = resolver_helper.NewFileSystem(resolver_context)
try:
file_system.Open(path_spec_object)
except (errors.AccessError, errors.PathSpecError):
raise
except (IOError, ValueError) as exception:
raise errors.BackEndError(
u'Unable to open file system with error: {0:s}'.format(exception))
return file_system
@classmethod
def RegisterHelper(cls, resolver_helper):
"""Registers a path specification resolver helper.
Args:
resolver_helper (ResolverHelper): resolver helper.
Raises:
KeyError: if resolver helper object is already set for the corresponding
type indicator.
"""
if resolver_helper.type_indicator in cls._resolver_helpers:
raise KeyError((
u'Resolver helper object already set for type indicator: '
u'{0!s}.').format(resolver_helper.type_indicator))
cls._resolver_helpers[resolver_helper.type_indicator] = resolver_helper
| apache-2.0 |
darkleons/lama | addons/hr_attendance/__openerp__.py | 52 | 2119 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Attendances',
'version': '1.1',
'category': 'Human Resources',
'description': """
This module aims to manage employee's attendances.
==================================================
Keeps account of the attendances of the employees on the basis of the
actions(Sign in/Sign out) performed by them.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'images': ['images/hr_attendances.jpeg'],
'depends': ['hr', 'report'],
'data': [
'security/ir_rule.xml',
'security/ir.model.access.csv',
'hr_attendance_view.xml',
'hr_attendance_report.xml',
'wizard/hr_attendance_error_view.xml',
'res_config_view.xml',
'views/report_attendanceerrors.xml',
'views/hr_attendance.xml',
],
'demo': ['hr_attendance_demo.xml'],
'test': [
'test/attendance_process.yml',
'test/hr_attendance_report.yml',
],
'installable': True,
'auto_install': False,
#web
'qweb': ["static/src/xml/attendance.xml"],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zofuthan/edx-platform | lms/djangoapps/commerce/signals.py | 58 | 8652 | """
Signal handling functions for use with external commerce service.
"""
import json
import logging
from urlparse import urljoin
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.dispatch import receiver
from django.utils.translation import ugettext as _
from ecommerce_api_client.exceptions import HttpClientError
import requests
from microsite_configuration import microsite
from request_cache.middleware import RequestCache
from student.models import UNENROLL_DONE
from commerce import ecommerce_api_client, is_commerce_service_configured
log = logging.getLogger(__name__)
@receiver(UNENROLL_DONE)
def handle_unenroll_done(sender, course_enrollment=None, skip_refund=False,
**kwargs): # pylint: disable=unused-argument
"""
Signal receiver for unenrollments, used to automatically initiate refunds
when applicable.
N.B. this signal is also consumed by lms.djangoapps.shoppingcart.
"""
if not is_commerce_service_configured() or skip_refund:
return
if course_enrollment and course_enrollment.refundable():
try:
request_user = get_request_user() or course_enrollment.user
if isinstance(request_user, AnonymousUser):
# Assume the request was initiated via server-to-server
# api call (presumably Otto). In this case we cannot
# construct a client to call Otto back anyway, because
# the client does not work anonymously, and furthermore,
# there's certainly no need to inform Otto about this request.
return
refund_seat(course_enrollment, request_user)
except: # pylint: disable=bare-except
# don't assume the signal was fired with `send_robust`.
# avoid blowing up other signal handlers by gracefully
# trapping the Exception and logging an error.
log.exception(
"Unexpected exception while attempting to initiate refund for user [%s], course [%s]",
course_enrollment.user.id,
course_enrollment.course_id,
)
def get_request_user():
"""
Helper to get the authenticated user from the current HTTP request (if
applicable).
If the requester of an unenrollment is not the same person as the student
being unenrolled, we authenticate to the commerce service as the requester.
"""
request = RequestCache.get_current_request()
return getattr(request, 'user', None)
def refund_seat(course_enrollment, request_user):
"""
Attempt to initiate a refund for any orders associated with the seat being
unenrolled, using the commerce service.
Arguments:
course_enrollment (CourseEnrollment): a student enrollment
request_user: the user as whom to authenticate to the commerce service
when attempting to initiate the refund.
Returns:
A list of the external service's IDs for any refunds that were initiated
(may be empty).
Raises:
exceptions.SlumberBaseException: for any unhandled HTTP error during
communication with the commerce service.
exceptions.Timeout: if the attempt to reach the commerce service timed
out.
"""
course_key_str = unicode(course_enrollment.course_id)
unenrolled_user = course_enrollment.user
try:
refund_ids = ecommerce_api_client(request_user or unenrolled_user).refunds.post(
{'course_id': course_key_str, 'username': unenrolled_user.username}
)
except HttpClientError, exc:
if exc.response.status_code == 403 and request_user != unenrolled_user:
# this is a known limitation; commerce service does not presently
# support the case of a non-superusers initiating a refund on
# behalf of another user.
log.warning("User [%s] was not authorized to initiate a refund for user [%s] "
"upon unenrollment from course [%s]", request_user.id, unenrolled_user.id, course_key_str)
return []
else:
# no other error is anticipated, so re-raise the Exception
raise exc
if refund_ids:
# at least one refundable order was found.
log.info(
"Refund successfully opened for user [%s], course [%s]: %r",
unenrolled_user.id,
course_key_str,
refund_ids,
)
# XCOM-371: this is a temporary measure to suppress refund-related email
# notifications to students and support@) for free enrollments. This
# condition should be removed when the CourseEnrollment.refundable() logic
# is updated to be more correct, or when we implement better handling (and
# notifications) in Otto for handling reversal of $0 transactions.
if course_enrollment.mode != 'verified':
# 'verified' is the only enrollment mode that should presently
# result in opening a refund request.
log.info(
"Skipping refund email notification for non-verified mode for user [%s], course [%s], mode: [%s]",
course_enrollment.user.id,
course_enrollment.course_id,
course_enrollment.mode,
)
else:
try:
send_refund_notification(course_enrollment, refund_ids)
except: # pylint: disable=bare-except
# don't break, just log a warning
log.warning("Could not send email notification for refund.", exc_info=True)
else:
# no refundable orders were found.
log.debug("No refund opened for user [%s], course [%s]", unenrolled_user.id, course_key_str)
return refund_ids
def create_zendesk_ticket(requester_name, requester_email, subject, body, tags=None):
""" Create a Zendesk ticket via API. """
if not (settings.ZENDESK_URL and settings.ZENDESK_USER and settings.ZENDESK_API_KEY):
log.debug('Zendesk is not configured. Cannot create a ticket.')
return
# Copy the tags to avoid modifying the original list.
tags = list(tags or [])
tags.append('LMS')
# Remove duplicates
tags = list(set(tags))
data = {
'ticket': {
'requester': {
'name': requester_name,
'email': requester_email
},
'subject': subject,
'comment': {'body': body},
'tags': tags
}
}
# Encode the data to create a JSON payload
payload = json.dumps(data)
# Set the request parameters
url = urljoin(settings.ZENDESK_URL, '/api/v2/tickets.json')
user = '{}/token'.format(settings.ZENDESK_USER)
pwd = settings.ZENDESK_API_KEY
headers = {'content-type': 'application/json'}
try:
response = requests.post(url, data=payload, auth=(user, pwd), headers=headers)
# Check for HTTP codes other than 201 (Created)
if response.status_code != 201:
log.error(u'Failed to create ticket. Status: [%d], Body: [%s]', response.status_code, response.content)
else:
log.debug('Successfully created ticket.')
except Exception: # pylint: disable=broad-except
log.exception('Failed to create ticket.')
return
def generate_refund_notification_body(student, refund_ids): # pylint: disable=invalid-name
""" Returns a refund notification message body. """
msg = _(
"A refund request has been initiated for {username} ({email}). "
"To process this request, please visit the link(s) below."
).format(username=student.username, email=student.email)
refund_urls = [urljoin(settings.ECOMMERCE_PUBLIC_URL_ROOT, '/dashboard/refunds/{}/'.format(refund_id))
for refund_id in refund_ids]
return '{msg}\n\n{urls}'.format(msg=msg, urls='\n'.join(refund_urls))
def send_refund_notification(course_enrollment, refund_ids):
""" Notify the support team of the refund request. """
tags = ['auto_refund']
if microsite.is_request_in_microsite():
# this is not presently supported with the external service.
raise NotImplementedError("Unable to send refund processing emails to microsite teams.")
student = course_enrollment.user
subject = _("[Refund] User-Requested Refund")
body = generate_refund_notification_body(student, refund_ids)
requester_name = student.profile.name or student.username
create_zendesk_ticket(requester_name, student.email, subject, body, tags)
| agpl-3.0 |
bitemyapp/electron | script/bootstrap.py | 73 | 5824 | #!/usr/bin/env python
import argparse
import os
import subprocess
import sys
from lib.config import LIBCHROMIUMCONTENT_COMMIT, BASE_URL, PLATFORM, \
enable_verbose_mode, is_verbose_mode, get_target_arch
from lib.util import execute_stdout, get_atom_shell_version, scoped_cwd
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
VENDOR_DIR = os.path.join(SOURCE_ROOT, 'vendor')
PYTHON_26_URL = 'https://chromium.googlesource.com/chromium/deps/python_26'
if os.environ.has_key('CI'):
NPM = os.path.join(SOURCE_ROOT, 'node_modules', '.bin', 'npm')
else:
NPM = 'npm'
if sys.platform in ['win32', 'cygwin']:
NPM += '.cmd'
def main():
os.chdir(SOURCE_ROOT)
args = parse_args()
if not args.yes and PLATFORM != 'win32':
check_root()
if args.verbose:
enable_verbose_mode()
if sys.platform == 'cygwin':
update_win32_python()
if PLATFORM != 'win32':
update_clang()
update_submodules()
setup_python_libs()
update_node_modules('.')
bootstrap_brightray(args.dev, args.url, args.target_arch)
if args.target_arch in ['arm', 'ia32'] and PLATFORM == 'linux':
download_sysroot(args.target_arch)
create_chrome_version_h()
touch_config_gypi()
run_update()
update_electron_modules('spec', args.target_arch)
def parse_args():
parser = argparse.ArgumentParser(description='Bootstrap this project')
parser.add_argument('-u', '--url',
help='The base URL from which to download '
'libchromiumcontent (i.e., the URL you passed to '
'libchromiumcontent\'s script/upload script',
default=BASE_URL,
required=False)
parser.add_argument('-v', '--verbose',
action='store_true',
help='Prints the output of the subprocesses')
parser.add_argument('-d', '--dev', action='store_true',
help='Do not download static_library build')
parser.add_argument('-y', '--yes', '--assume-yes',
action='store_true',
help='Run non-interactively by assuming "yes" to all ' \
'prompts.')
parser.add_argument('--target_arch', default=get_target_arch(),
help='Manually specify the arch to build for')
return parser.parse_args()
def check_root():
if os.geteuid() == 0:
print "We suggest not running this as root, unless you're really sure."
choice = raw_input("Do you want to continue? [y/N]: ")
if choice not in ('y', 'Y'):
sys.exit(0)
def update_submodules():
execute_stdout(['git', 'submodule', 'sync'])
execute_stdout(['git', 'submodule', 'update', '--init', '--recursive'])
def setup_python_libs():
for lib in ('requests', 'boto'):
with scoped_cwd(os.path.join(VENDOR_DIR, lib)):
execute_stdout([sys.executable, 'setup.py', 'build'])
def bootstrap_brightray(is_dev, url, target_arch):
bootstrap = os.path.join(VENDOR_DIR, 'brightray', 'script', 'bootstrap')
args = [
'--commit', LIBCHROMIUMCONTENT_COMMIT,
'--target_arch', target_arch,
url,
]
if is_dev:
args = ['--dev'] + args
execute_stdout([sys.executable, bootstrap] + args)
def update_node_modules(dirname, env=None):
if env is None:
env = os.environ
if PLATFORM == 'linux':
# Use prebuilt clang for building native modules.
llvm_dir = os.path.join(SOURCE_ROOT, 'vendor', 'llvm-build',
'Release+Asserts', 'bin')
env['CC'] = os.path.join(llvm_dir, 'clang')
env['CXX'] = os.path.join(llvm_dir, 'clang++')
env['npm_config_clang'] = '1'
with scoped_cwd(dirname):
args = [NPM, 'install']
if is_verbose_mode():
args += ['--verbose']
# Ignore npm install errors when running in CI.
if os.environ.has_key('CI'):
try:
execute_stdout(args, env)
except subprocess.CalledProcessError:
pass
else:
execute_stdout(args, env)
def update_electron_modules(dirname, target_arch):
env = os.environ.copy()
env['npm_config_arch'] = target_arch
env['npm_config_target'] = get_atom_shell_version()
env['npm_config_disturl'] = 'https://atom.io/download/atom-shell'
update_node_modules(dirname, env)
def update_win32_python():
with scoped_cwd(VENDOR_DIR):
if not os.path.exists('python_26'):
execute_stdout(['git', 'clone', PYTHON_26_URL])
def update_clang():
execute_stdout([os.path.join(SOURCE_ROOT, 'script', 'update-clang.sh')])
def download_sysroot(target_arch):
if target_arch == 'ia32':
target_arch = 'i386'
execute_stdout([os.path.join(SOURCE_ROOT, 'script', 'install-sysroot.py'),
'--arch', target_arch])
def create_chrome_version_h():
version_file = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor',
'libchromiumcontent', 'VERSION')
target_file = os.path.join(SOURCE_ROOT, 'atom', 'common', 'chrome_version.h')
template_file = os.path.join(SOURCE_ROOT, 'script', 'chrome_version.h.in')
with open(version_file, 'r') as f:
version = f.read()
with open(template_file, 'r') as f:
template = f.read()
if sys.platform in ['win32', 'cygwin']:
open_mode = 'wb+'
else:
open_mode = 'w+'
with open(target_file, open_mode) as f:
content = template.replace('{PLACEHOLDER}', version.strip())
if f.read() != content:
f.write(content)
def touch_config_gypi():
config_gypi = os.path.join(SOURCE_ROOT, 'vendor', 'node', 'config.gypi')
with open(config_gypi, 'w+') as f:
content = '\n{}'
if f.read() != content:
f.write(content)
def run_update():
update = os.path.join(SOURCE_ROOT, 'script', 'update.py')
execute_stdout([sys.executable, update])
if __name__ == '__main__':
sys.exit(main())
| mit |
eduNEXT/edx-platform | openedx/core/djangoapps/video_pipeline/config/waffle.py | 3 | 2708 | """
This module contains configuration settings via waffle flags
for the Video Pipeline app.
"""
from edx_toggles.toggles import LegacyWaffleFlag, LegacyWaffleFlagNamespace
from openedx.core.djangoapps.waffle_utils import CourseWaffleFlag
# Videos Namespace
WAFFLE_NAMESPACE = 'videos'
# .. toggle_name: videos.deprecate_youtube
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: Waffle flag telling whether youtube is deprecated. When enabled, videos are no longer uploaded
# to YouTube as part of the video pipeline.
# .. toggle_use_cases: open_edx
# .. toggle_creation_date: 2018-08-03
# .. toggle_tickets: https://github.com/edx/edx-platform/pull/18765
# TODO: Replace with CourseWaffleFlag() from waffle_flags().
DEPRECATE_YOUTUBE = 'deprecate_youtube'
# .. toggle_name: videos.enable_devstack_video_uploads
# .. toggle_implementation: WaffleFlag
# .. toggle_default: False
# .. toggle_description: When enabled, use Multi-Factor Authentication (MFA) for authenticating to AWS. These short-
# lived access tokens are well suited for development (probably?). [At the time of annotation, the exact consequences
# of enabling this feature toggle are uncertain.]
# .. toggle_use_cases: open_edx
# .. toggle_creation_date: 2020-03-12
# .. toggle_warnings: Enabling this feature requires that the ROLE_ARN, MFA_SERIAL_NUMBER, MFA_TOKEN settings are
# properly defined.
# .. toggle_tickets: https://github.com/edx/edx-platform/pull/23375
# TODO: Replace with WaffleFlag() from waffle_flags().
ENABLE_DEVSTACK_VIDEO_UPLOADS = 'enable_devstack_video_uploads'
# TODO: Replace with CourseWaffleFlag() from waffle_flags().
ENABLE_VEM_PIPELINE = 'enable_vem_pipeline'
def waffle_flags():
"""
Returns the namespaced, cached, audited Waffle flags dictionary for Videos.
IMPORTANT: Do NOT copy this dict pattern and do NOT add new flags to this dict.
Instead, replace the string constants above with the actual flag instances.
"""
namespace = LegacyWaffleFlagNamespace(name=WAFFLE_NAMESPACE, log_prefix='Videos: ')
return {
DEPRECATE_YOUTUBE: CourseWaffleFlag(
waffle_namespace=namespace,
flag_name=DEPRECATE_YOUTUBE,
module_name=__name__,
),
ENABLE_DEVSTACK_VIDEO_UPLOADS: LegacyWaffleFlag(
waffle_namespace=namespace,
flag_name=ENABLE_DEVSTACK_VIDEO_UPLOADS,
module_name=__name__,
),
ENABLE_VEM_PIPELINE: CourseWaffleFlag( # lint-amnesty, pylint: disable=toggle-missing-annotation
waffle_namespace=namespace,
flag_name=ENABLE_VEM_PIPELINE,
module_name=__name__,
)
}
| agpl-3.0 |
nicko96/Chrome-Infra | appengine/chromium_build_logs/third_party/apiclient/discovery.py | 22 | 24745 | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for discovery based APIs
A client library for Google's discovery based APIs.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = [
'build', 'build_from_document'
]
import copy
import httplib2
import logging
import os
import random
import re
import uritemplate
import urllib
import urlparse
import mimeparse
import mimetypes
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
from apiclient.errors import HttpError
from apiclient.errors import InvalidJsonError
from apiclient.errors import MediaUploadSizeError
from apiclient.errors import UnacceptableMimeTypeError
from apiclient.errors import UnknownApiNameOrVersion
from apiclient.errors import UnknownLinkType
from apiclient.http import HttpRequest
from apiclient.http import MediaFileUpload
from apiclient.http import MediaUpload
from apiclient.model import JsonModel
from apiclient.model import RawModel
from apiclient.schema import Schemas
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from oauth2client.anyjson import simplejson
URITEMPLATE = re.compile('{[^}]*}')
VARNAME = re.compile('[a-zA-Z0-9_-]+')
DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
'{api}/{apiVersion}/rest')
DEFAULT_METHOD_DOC = 'A description of how to use this function'
# Query parameters that work, but don't appear in discovery
STACK_QUERY_PARAMETERS = ['trace', 'fields', 'pp', 'prettyPrint', 'userIp',
'userip', 'strict']
RESERVED_WORDS = ['and', 'assert', 'break', 'class', 'continue', 'def', 'del',
'elif', 'else', 'except', 'exec', 'finally', 'for', 'from',
'global', 'if', 'import', 'in', 'is', 'lambda', 'not', 'or',
'pass', 'print', 'raise', 'return', 'try', 'while' ]
def _fix_method_name(name):
if name in RESERVED_WORDS:
return name + '_'
else:
return name
def _write_headers(self):
# Utility no-op method for multipart media handling
pass
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
parsed = list(urlparse.urlparse(url))
q = parse_qsl(parsed[4])
q.append((name, value))
parsed[4] = urllib.urlencode(q)
return urlparse.urlunparse(parsed)
def key2param(key):
"""Converts key names into parameter names.
For example, converting "max-results" -> "max_results"
"""
result = []
key = list(key)
if not key[0].isalpha():
result.append('x')
for c in key:
if c.isalnum():
result.append(c)
else:
result.append('_')
return ''.join(result)
def build(serviceName, version,
http=None,
discoveryServiceUrl=DISCOVERY_URI,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Construct a Resource for interacting with an API.
Construct a Resource object for interacting with
an API. The serviceName and version are the
names from the Discovery service.
Args:
serviceName: string, name of the service
version: string, the version of the service
discoveryServiceUrl: string, a URI Template that points to
the location of the discovery service. It should have two
parameters {api} and {apiVersion} that when filled in
produce an absolute URI to the discovery document for
that service.
developerKey: string, key obtained
from https://code.google.com/apis/console
model: apiclient.Model, converts to and from the wire format
requestBuilder: apiclient.http.HttpRequest, encapsulator for
an HTTP request
Returns:
A Resource object with methods for interacting with
the service.
"""
params = {
'api': serviceName,
'apiVersion': version
}
if http is None:
http = httplib2.Http()
requested_url = uritemplate.expand(discoveryServiceUrl, params)
# REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment
# variable that contains the network address of the client sending the
# request. If it exists then add that to the request for the discovery
# document to avoid exceeding the quota on discovery requests.
if 'REMOTE_ADDR' in os.environ:
requested_url = _add_query_parameter(requested_url, 'userIp',
os.environ['REMOTE_ADDR'])
logging.info('URL being requested: %s' % requested_url)
resp, content = http.request(requested_url)
if resp.status == 404:
raise UnknownApiNameOrVersion("name: %s version: %s" % (serviceName,
version))
if resp.status >= 400:
raise HttpError(resp, content, requested_url)
try:
service = simplejson.loads(content)
except ValueError, e:
logging.error('Failed to parse as JSON: ' + content)
raise InvalidJsonError()
filename = os.path.join(os.path.dirname(__file__), 'contrib',
serviceName, 'future.json')
try:
f = file(filename, 'r')
future = f.read()
f.close()
except IOError:
future = None
return build_from_document(content, discoveryServiceUrl, future,
http, developerKey, model, requestBuilder)
def build_from_document(
service,
base,
future=None,
http=None,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Create a Resource for interacting with an API.
Same as `build()`, but constructs the Resource object
from a discovery document that is it given, as opposed to
retrieving one over HTTP.
Args:
service: string, discovery document
base: string, base URI for all HTTP requests, usually the discovery URI
future: string, discovery document with future capabilities
auth_discovery: dict, information about the authentication the API supports
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
developerKey: string, Key for controlling API usage, generated
from the API Console.
model: Model class instance that serializes and
de-serializes requests and responses.
requestBuilder: Takes an http request and packages it up to be executed.
Returns:
A Resource object with methods for interacting with
the service.
"""
service = simplejson.loads(service)
base = urlparse.urljoin(base, service['basePath'])
if future:
future = simplejson.loads(future)
auth_discovery = future.get('auth', {})
else:
future = {}
auth_discovery = {}
schema = Schemas(service)
if model is None:
features = service.get('features', [])
model = JsonModel('dataWrapper' in features)
resource = createResource(http, base, model, requestBuilder, developerKey,
service, future, schema)
def auth_method():
"""Discovery information about the authentication the API uses."""
return auth_discovery
setattr(resource, 'auth_discovery', auth_method)
return resource
def _cast(value, schema_type):
"""Convert value to a string based on JSON Schema type.
See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on
JSON Schema.
Args:
value: any, the value to convert
schema_type: string, the type that value should be interpreted as
Returns:
A string representation of 'value' based on the schema_type.
"""
if schema_type == 'string':
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
elif schema_type == 'integer':
return str(int(value))
elif schema_type == 'number':
return str(float(value))
elif schema_type == 'boolean':
return str(bool(value)).lower()
else:
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
MULTIPLIERS = {
"KB": 2 ** 10,
"MB": 2 ** 20,
"GB": 2 ** 30,
"TB": 2 ** 40,
}
def _media_size_to_long(maxSize):
"""Convert a string media size, such as 10GB or 3TB into an integer."""
if len(maxSize) < 2:
return 0
units = maxSize[-2:].upper()
multiplier = MULTIPLIERS.get(units, 0)
if multiplier:
return int(maxSize[:-2]) * multiplier
else:
return int(maxSize)
def createResource(http, baseUrl, model, requestBuilder,
developerKey, resourceDesc, futureDesc, schema):
class Resource(object):
"""A class for interacting with a resource."""
def __init__(self):
self._http = http
self._baseUrl = baseUrl
self._model = model
self._developerKey = developerKey
self._requestBuilder = requestBuilder
def createMethod(theclass, methodName, methodDesc, futureDesc):
methodName = _fix_method_name(methodName)
pathUrl = methodDesc['path']
httpMethod = methodDesc['httpMethod']
methodId = methodDesc['id']
mediaPathUrl = None
accept = []
maxSize = 0
if 'mediaUpload' in methodDesc:
mediaUpload = methodDesc['mediaUpload']
mediaPathUrl = mediaUpload['protocols']['simple']['path']
mediaResumablePathUrl = mediaUpload['protocols']['resumable']['path']
accept = mediaUpload['accept']
maxSize = _media_size_to_long(mediaUpload.get('maxSize', ''))
if 'parameters' not in methodDesc:
methodDesc['parameters'] = {}
for name in STACK_QUERY_PARAMETERS:
methodDesc['parameters'][name] = {
'type': 'string',
'location': 'query'
}
if httpMethod in ['PUT', 'POST', 'PATCH'] and 'request' in methodDesc:
methodDesc['parameters']['body'] = {
'description': 'The request body.',
'type': 'object',
'required': True,
}
if 'request' in methodDesc:
methodDesc['parameters']['body'].update(methodDesc['request'])
else:
methodDesc['parameters']['body']['type'] = 'object'
if 'mediaUpload' in methodDesc:
methodDesc['parameters']['media_body'] = {
'description': 'The filename of the media request body.',
'type': 'string',
'required': False,
}
if 'body' in methodDesc['parameters']:
methodDesc['parameters']['body']['required'] = False
argmap = {} # Map from method parameter name to query parameter name
required_params = [] # Required parameters
repeated_params = [] # Repeated parameters
pattern_params = {} # Parameters that must match a regex
query_params = [] # Parameters that will be used in the query string
path_params = {} # Parameters that will be used in the base URL
param_type = {} # The type of the parameter
enum_params = {} # Allowable enumeration values for each parameter
if 'parameters' in methodDesc:
for arg, desc in methodDesc['parameters'].iteritems():
param = key2param(arg)
argmap[param] = arg
if desc.get('pattern', ''):
pattern_params[param] = desc['pattern']
if desc.get('enum', ''):
enum_params[param] = desc['enum']
if desc.get('required', False):
required_params.append(param)
if desc.get('repeated', False):
repeated_params.append(param)
if desc.get('location') == 'query':
query_params.append(param)
if desc.get('location') == 'path':
path_params[param] = param
param_type[param] = desc.get('type', 'string')
for match in URITEMPLATE.finditer(pathUrl):
for namematch in VARNAME.finditer(match.group(0)):
name = key2param(namematch.group(0))
path_params[name] = name
if name in query_params:
query_params.remove(name)
def method(self, **kwargs):
for name in kwargs.iterkeys():
if name not in argmap:
raise TypeError('Got an unexpected keyword argument "%s"' % name)
for name in required_params:
if name not in kwargs:
raise TypeError('Missing required parameter "%s"' % name)
for name, regex in pattern_params.iteritems():
if name in kwargs:
if isinstance(kwargs[name], basestring):
pvalues = [kwargs[name]]
else:
pvalues = kwargs[name]
for pvalue in pvalues:
if re.match(regex, pvalue) is None:
raise TypeError(
'Parameter "%s" value "%s" does not match the pattern "%s"' %
(name, pvalue, regex))
for name, enums in enum_params.iteritems():
if name in kwargs:
if kwargs[name] not in enums:
raise TypeError(
'Parameter "%s" value "%s" is not an allowed value in "%s"' %
(name, kwargs[name], str(enums)))
actual_query_params = {}
actual_path_params = {}
for key, value in kwargs.iteritems():
to_type = param_type.get(key, 'string')
# For repeated parameters we cast each member of the list.
if key in repeated_params and type(value) == type([]):
cast_value = [_cast(x, to_type) for x in value]
else:
cast_value = _cast(value, to_type)
if key in query_params:
actual_query_params[argmap[key]] = cast_value
if key in path_params:
actual_path_params[argmap[key]] = cast_value
body_value = kwargs.get('body', None)
media_filename = kwargs.get('media_body', None)
if self._developerKey:
actual_query_params['key'] = self._developerKey
model = self._model
# If there is no schema for the response then presume a binary blob.
if 'response' not in methodDesc:
model = RawModel()
headers = {}
headers, params, query, body = model.request(headers,
actual_path_params, actual_query_params, body_value)
expanded_url = uritemplate.expand(pathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
resumable = None
multipart_boundary = ''
if media_filename:
# Ensure we end up with a valid MediaUpload object.
if isinstance(media_filename, basestring):
(media_mime_type, encoding) = mimetypes.guess_type(media_filename)
if media_mime_type is None:
raise UnknownFileType(media_filename)
if not mimeparse.best_match([media_mime_type], ','.join(accept)):
raise UnacceptableMimeTypeError(media_mime_type)
media_upload = MediaFileUpload(media_filename, media_mime_type)
elif isinstance(media_filename, MediaUpload):
media_upload = media_filename
else:
raise TypeError('media_filename must be str or MediaUpload.')
# Check the maxSize
if maxSize > 0 and media_upload.size() > maxSize:
raise MediaUploadSizeError("Media larger than: %s" % maxSize)
# Use the media path uri for media uploads
if media_upload.resumable():
expanded_url = uritemplate.expand(mediaResumablePathUrl, params)
else:
expanded_url = uritemplate.expand(mediaPathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
if media_upload.resumable():
# This is all we need to do for resumable, if the body exists it gets
# sent in the first request, otherwise an empty body is sent.
resumable = media_upload
else:
# A non-resumable upload
if body is None:
# This is a simple media upload
headers['content-type'] = media_upload.mimetype()
body = media_upload.getbytes(0, media_upload.size())
else:
# This is a multipart/related upload.
msgRoot = MIMEMultipart('related')
# msgRoot should not write out it's own headers
setattr(msgRoot, '_write_headers', lambda self: None)
# attach the body as one part
msg = MIMENonMultipart(*headers['content-type'].split('/'))
msg.set_payload(body)
msgRoot.attach(msg)
# attach the media as the second part
msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
payload = media_upload.getbytes(0, media_upload.size())
msg.set_payload(payload)
msgRoot.attach(msg)
body = msgRoot.as_string()
multipart_boundary = msgRoot.get_boundary()
headers['content-type'] = ('multipart/related; '
'boundary="%s"') % multipart_boundary
logging.info('URL being requested: %s' % url)
return self._requestBuilder(self._http,
model.response,
url,
method=httpMethod,
body=body,
headers=headers,
methodId=methodId,
resumable=resumable)
docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
if len(argmap) > 0:
docs.append('Args:\n')
for arg in argmap.iterkeys():
if arg in STACK_QUERY_PARAMETERS:
continue
repeated = ''
if arg in repeated_params:
repeated = ' (repeated)'
required = ''
if arg in required_params:
required = ' (required)'
paramdesc = methodDesc['parameters'][argmap[arg]]
paramdoc = paramdesc.get('description', 'A parameter')
if '$ref' in paramdesc:
docs.append(
(' %s: object, %s%s%s\n The object takes the'
' form of:\n\n%s\n\n') % (arg, paramdoc, required, repeated,
schema.prettyPrintByName(paramdesc['$ref'])))
else:
paramtype = paramdesc.get('type', 'string')
docs.append(' %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
repeated))
enum = paramdesc.get('enum', [])
enumDesc = paramdesc.get('enumDescriptions', [])
if enum and enumDesc:
docs.append(' Allowed values\n')
for (name, desc) in zip(enum, enumDesc):
docs.append(' %s - %s\n' % (name, desc))
if 'response' in methodDesc:
docs.append('\nReturns:\n An object of the form\n\n ')
docs.append(schema.prettyPrintSchema(methodDesc['response']))
setattr(method, '__doc__', ''.join(docs))
setattr(theclass, methodName, method)
def createNextMethodFromFuture(theclass, methodName, methodDesc, futureDesc):
""" This is a legacy method, as only Buzz and Moderator use the future.json
functionality for generating _next methods. It will be kept around as long
as those API versions are around, but no new APIs should depend upon it.
"""
methodName = _fix_method_name(methodName)
methodId = methodDesc['id'] + '.next'
def methodNext(self, previous):
"""Retrieve the next page of results.
Takes a single argument, 'body', which is the results
from the last call, and returns the next set of items
in the collection.
Returns:
None if there are no more items in the collection.
"""
if futureDesc['type'] != 'uri':
raise UnknownLinkType(futureDesc['type'])
try:
p = previous
for key in futureDesc['location']:
p = p[key]
url = p
except (KeyError, TypeError):
return None
url = _add_query_parameter(url, 'key', self._developerKey)
headers = {}
headers, params, query, body = self._model.request(headers, {}, {}, None)
logging.info('URL being requested: %s' % url)
resp, content = self._http.request(url, method='GET', headers=headers)
return self._requestBuilder(self._http,
self._model.response,
url,
method='GET',
headers=headers,
methodId=methodId)
setattr(theclass, methodName, methodNext)
def createNextMethod(theclass, methodName, methodDesc, futureDesc):
methodName = _fix_method_name(methodName)
methodId = methodDesc['id'] + '.next'
def methodNext(self, previous_request, previous_response):
"""Retrieves the next page of results.
Args:
previous_request: The request for the previous page.
previous_response: The response from the request for the previous page.
Returns:
A request object that you can call 'execute()' on to request the next
page. Returns None if there are no more items in the collection.
"""
# Retrieve nextPageToken from previous_response
# Use as pageToken in previous_request to create new request.
if 'nextPageToken' not in previous_response:
return None
request = copy.copy(previous_request)
pageToken = previous_response['nextPageToken']
parsed = list(urlparse.urlparse(request.uri))
q = parse_qsl(parsed[4])
# Find and remove old 'pageToken' value from URI
newq = [(key, value) for (key, value) in q if key != 'pageToken']
newq.append(('pageToken', pageToken))
parsed[4] = urllib.urlencode(newq)
uri = urlparse.urlunparse(parsed)
request.uri = uri
logging.info('URL being requested: %s' % uri)
return request
setattr(theclass, methodName, methodNext)
# Add basic methods to Resource
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if futureDesc:
future = futureDesc['methods'].get(methodName, {})
else:
future = None
createMethod(Resource, methodName, methodDesc, future)
# Add in nested resources
if 'resources' in resourceDesc:
def createResourceMethod(theclass, methodName, methodDesc, futureDesc):
methodName = _fix_method_name(methodName)
def methodResource(self):
return createResource(self._http, self._baseUrl, self._model,
self._requestBuilder, self._developerKey,
methodDesc, futureDesc, schema)
setattr(methodResource, '__doc__', 'A collection resource.')
setattr(methodResource, '__is_resource__', True)
setattr(theclass, methodName, methodResource)
for methodName, methodDesc in resourceDesc['resources'].iteritems():
if futureDesc and 'resources' in futureDesc:
future = futureDesc['resources'].get(methodName, {})
else:
future = {}
createResourceMethod(Resource, methodName, methodDesc, future)
# Add <m>_next() methods to Resource
if futureDesc and 'methods' in futureDesc:
for methodName, methodDesc in futureDesc['methods'].iteritems():
if 'next' in methodDesc and methodName in resourceDesc['methods']:
createNextMethodFromFuture(Resource, methodName + '_next',
resourceDesc['methods'][methodName],
methodDesc['next'])
# Add _next() methods
# Look for response bodies in schema that contain nextPageToken, and methods
# that take a pageToken parameter.
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if 'response' in methodDesc:
responseSchema = methodDesc['response']
if '$ref' in responseSchema:
responseSchema = schema.get(responseSchema['$ref'])
hasNextPageToken = 'nextPageToken' in responseSchema.get('properties',
{})
hasPageToken = 'pageToken' in methodDesc.get('parameters', {})
if hasNextPageToken and hasPageToken:
createNextMethod(Resource, methodName + '_next',
resourceDesc['methods'][methodName],
methodName)
return Resource()
| bsd-3-clause |
aquilesC/UUTrack | UUTrack/View/Monitor/workerThread.py | 1 | 1460 | """
UUTrack.View.Camera.workerThread
================================
Thread that acquires continuously data until a variable is changed. This enables to acquire at any frame rate without freezing the GUI or overloading it with data being acquired too fast.
"""
from pyqtgraph.Qt import QtCore
class workThread(QtCore.QThread):
"""Thread for acquiring from the camera. If the exposure time is long, this is
needed to avoid freezing the GUI.
"""
def __init__(self,_session,camera):
QtCore.QThread.__init__(self)
self._session = _session
self.camera = camera
self.origin = None
self.keep_acquiring = True
def __del__(self):
self.wait()
def run(self):
""" Triggers the Monitor to acquire a new Image.
the QThread defined .start() method is a special method that sets up the thread and
calls our implementation of the run() method.
"""
first = True
while self.keep_acquiring:
if self.origin == 'snap':
self.keep_acquiring = False
if first:
self.camera.setAcquisitionMode(self.camera.MODE_CONTINUOUS)
self.camera.triggerCamera() # Triggers the camera only once
first = False
img = self.camera.readCamera()
self.emit(QtCore.SIGNAL('image'), img, self.origin)
self.camera.stopAcq()
return
| mit |
raghavtan/youtube-dl | youtube_dl/extractor/soompi.py | 102 | 4948 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .crunchyroll import CrunchyrollIE
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
int_or_none,
remove_start,
xpath_text,
)
class SoompiBaseIE(InfoExtractor):
def _get_episodes(self, webpage, episode_filter=None):
episodes = self._parse_json(
self._search_regex(
r'VIDEOS\s*=\s*(\[.+?\]);', webpage, 'episodes JSON'),
None)
return list(filter(episode_filter, episodes))
class SoompiIE(SoompiBaseIE, CrunchyrollIE):
IE_NAME = 'soompi'
_VALID_URL = r'https?://tv\.soompi\.com/(?:en/)?watch/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://tv.soompi.com/en/watch/29235',
'info_dict': {
'id': '29235',
'ext': 'mp4',
'title': 'Episode 1096',
'description': '2015-05-20'
},
'params': {
'skip_download': True,
},
}]
def _get_episode(self, webpage, video_id):
return self._get_episodes(webpage, lambda x: x['id'] == video_id)[0]
def _get_subtitles(self, config, video_id):
sub_langs = {}
for subtitle in config.findall('./{default}preload/subtitles/subtitle'):
sub_langs[subtitle.attrib['id']] = subtitle.attrib['title']
subtitles = {}
for s in config.findall('./{default}preload/subtitle'):
lang_code = sub_langs.get(s.attrib['id'])
if not lang_code:
continue
sub_id = s.get('id')
data = xpath_text(s, './data', 'data')
iv = xpath_text(s, './iv', 'iv')
if not id or not iv or not data:
continue
subtitle = self._decrypt_subtitles(data, iv, sub_id).decode('utf-8')
subtitles[lang_code] = self._extract_subtitles(subtitle)
return subtitles
def _real_extract(self, url):
video_id = self._match_id(url)
try:
webpage = self._download_webpage(
url, video_id, 'Downloading episode page')
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
webpage = ee.cause.read()
block_message = self._html_search_regex(
r'(?s)<div class="block-message">(.+?)</div>', webpage,
'block message', default=None)
if block_message:
raise ExtractorError(block_message, expected=True)
raise
formats = []
config = None
for format_id in re.findall(r'\?quality=([0-9a-zA-Z]+)', webpage):
config = self._download_xml(
'http://tv.soompi.com/en/show/_/%s-config.xml?mode=hls&quality=%s' % (video_id, format_id),
video_id, 'Downloading %s XML' % format_id)
m3u8_url = xpath_text(
config, './{default}preload/stream_info/file',
'%s m3u8 URL' % format_id)
if not m3u8_url:
continue
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', m3u8_id=format_id))
self._sort_formats(formats)
episode = self._get_episode(webpage, video_id)
title = episode['name']
description = episode.get('description')
duration = int_or_none(episode.get('duration'))
thumbnails = [{
'id': thumbnail_id,
'url': thumbnail_url,
} for thumbnail_id, thumbnail_url in episode.get('img_url', {}).items()]
subtitles = self.extract_subtitles(config, video_id)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnails': thumbnails,
'duration': duration,
'formats': formats,
'subtitles': subtitles
}
class SoompiShowIE(SoompiBaseIE):
IE_NAME = 'soompi:show'
_VALID_URL = r'https?://tv\.soompi\.com/en/shows/(?P<id>[0-9a-zA-Z\-_]+)'
_TESTS = [{
'url': 'http://tv.soompi.com/en/shows/liar-game',
'info_dict': {
'id': 'liar-game',
'title': 'Liar Game',
'description': 'md5:52c02bce0c1a622a95823591d0589b66',
},
'playlist_count': 14,
}]
def _real_extract(self, url):
show_id = self._match_id(url)
webpage = self._download_webpage(
url, show_id, 'Downloading show page')
title = remove_start(self._og_search_title(webpage), 'SoompiTV | ')
description = self._og_search_description(webpage)
entries = [
self.url_result('http://tv.soompi.com/en/watch/%s' % episode['id'], 'Soompi')
for episode in self._get_episodes(webpage)]
return self.playlist_result(entries, show_id, title, description)
| unlicense |
joone/chromium-crosswalk | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py | 8 | 22282 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit testing base class for Port implementations."""
import collections
import errno
import logging
import os
import socket
import sys
import time
import unittest
from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.port.base import Port, TestConfiguration
from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
from webkitpy.tool.mocktool import MockOptions
# FIXME: get rid of this fixture
class TestWebKitPort(Port):
port_name = "testwebkitport"
def __init__(self, port_name=None, symbols_string=None,
expectations_file=None, skips_file=None, host=None, config=None,
**kwargs):
port_name = port_name or TestWebKitPort.port_name
self.symbols_string = symbols_string # Passing "" disables all staticly-detectable features.
host = host or MockSystemHost()
super(TestWebKitPort, self).__init__(host, port_name=port_name, **kwargs)
def all_test_configurations(self):
return [self.test_configuration()]
def _symbols_string(self):
return self.symbols_string
def _tests_for_disabled_features(self):
return ["accessibility", ]
class FakePrinter(object):
def write_update(self, msg):
pass
def write_throttled_update(self, msg):
pass
class PortTestCase(unittest.TestCase):
"""Tests that all Port implementations must pass."""
HTTP_PORTS = (8000, 8080, 8443)
WEBSOCKET_PORTS = (8880,)
# Subclasses override this to point to their Port subclass.
os_name = None
os_version = None
port_maker = TestWebKitPort
port_name = None
full_port_name = None
def make_port(self, host=None, port_name=None, options=None, os_name=None, os_version=None, **kwargs):
host = host or MockSystemHost(os_name=(os_name or self.os_name), os_version=(os_version or self.os_version))
options = options or MockOptions(configuration='Release')
port_name = port_name or self.port_name
port_name = self.port_maker.determine_full_port_name(host, options, port_name)
port = self.port_maker(host, port_name, options=options, **kwargs)
port._config.build_directory = lambda configuration: '/mock-build'
return port
def make_wdiff_available(self, port):
port._wdiff_available = True
def test_check_build(self):
port = self.make_port()
port._check_file_exists = lambda path, desc: True
if port._dump_reader:
port._dump_reader.check_is_functional = lambda: True
port._options.build = True
port._check_driver_build_up_to_date = lambda config: True
port.check_httpd = lambda: True
oc = OutputCapture()
try:
oc.capture_output()
self.assertEqual(port.check_build(needs_http=True, printer=FakePrinter()),
test_run_results.OK_EXIT_STATUS)
finally:
out, err, logs = oc.restore_output()
self.assertIn('pretty patches', logs) # We should get a warning about PrettyPatch being missing,
self.assertNotIn('build requirements', logs) # but not the driver itself.
port._check_file_exists = lambda path, desc: False
port._check_driver_build_up_to_date = lambda config: False
try:
oc.capture_output()
self.assertEqual(port.check_build(needs_http=True, printer=FakePrinter()),
test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
finally:
out, err, logs = oc.restore_output()
self.assertIn('pretty patches', logs) # And, here we should get warnings about both.
self.assertIn('build requirements', logs)
def test_default_batch_size(self):
port = self.make_port()
# Test that we set a finite batch size for sanitizer builds.
port._options.enable_sanitizer = True
sanitized_batch_size = port.default_batch_size()
self.assertIsNotNone(sanitized_batch_size)
def test_default_child_processes(self):
port = self.make_port()
num_workers = port.default_child_processes()
self.assertGreaterEqual(num_workers, 1)
def test_default_max_locked_shards(self):
port = self.make_port()
port.default_child_processes = lambda: 16
self.assertEqual(port.default_max_locked_shards(), 4)
port.default_child_processes = lambda: 2
self.assertEqual(port.default_max_locked_shards(), 1)
def test_default_timeout_ms(self):
self.assertEqual(self.make_port(options=MockOptions(configuration='Release')).default_timeout_ms(), 6000)
self.assertEqual(self.make_port(options=MockOptions(configuration='Debug')).default_timeout_ms(), 18000)
def test_default_pixel_tests(self):
self.assertEqual(self.make_port().default_pixel_tests(), True)
def test_driver_cmd_line(self):
port = self.make_port()
self.assertTrue(len(port.driver_cmd_line()))
options = MockOptions(additional_driver_flag=['--foo=bar', '--foo=baz'])
port = self.make_port(options=options)
cmd_line = port.driver_cmd_line()
self.assertTrue('--foo=bar' in cmd_line)
self.assertTrue('--foo=baz' in cmd_line)
def assert_servers_are_down(self, host, ports):
for port in ports:
try:
test_socket = socket.socket()
test_socket.connect((host, port))
self.fail()
except IOError, e:
self.assertTrue(e.errno in (errno.ECONNREFUSED, errno.ECONNRESET))
finally:
test_socket.close()
def assert_servers_are_up(self, host, ports):
for port in ports:
try:
test_socket = socket.socket()
test_socket.connect((host, port))
except IOError, e:
self.fail('failed to connect to %s:%d' % (host, port))
finally:
test_socket.close()
def test_diff_image__missing_both(self):
port = self.make_port()
self.assertEqual(port.diff_image(None, None), (None, None))
self.assertEqual(port.diff_image(None, ''), (None, None))
self.assertEqual(port.diff_image('', None), (None, None))
self.assertEqual(port.diff_image('', ''), (None, None))
def test_diff_image__missing_actual(self):
port = self.make_port()
self.assertEqual(port.diff_image(None, 'foo'), ('foo', None))
self.assertEqual(port.diff_image('', 'foo'), ('foo', None))
def test_diff_image__missing_expected(self):
port = self.make_port()
self.assertEqual(port.diff_image('foo', None), ('foo', None))
self.assertEqual(port.diff_image('foo', ''), ('foo', None))
def test_diff_image(self):
def _path_to_image_diff():
return "/path/to/image_diff"
port = self.make_port()
port._path_to_image_diff = _path_to_image_diff
mock_image_diff = "MOCK Image Diff"
def mock_run_command(args):
port._filesystem.write_binary_file(args[4], mock_image_diff)
return 1
# Images are different.
port._executive = MockExecutive2(run_command_fn=mock_run_command)
self.assertEqual(mock_image_diff, port.diff_image("EXPECTED", "ACTUAL")[0])
# Images are the same.
port._executive = MockExecutive2(exit_code=0)
self.assertEqual(None, port.diff_image("EXPECTED", "ACTUAL")[0])
# There was some error running image_diff.
port._executive = MockExecutive2(exit_code=2)
exception_raised = False
try:
port.diff_image("EXPECTED", "ACTUAL")
except ValueError, e:
exception_raised = True
self.assertFalse(exception_raised)
def test_diff_image_crashed(self):
port = self.make_port()
port._executive = MockExecutive2(exit_code=2)
self.assertEqual(port.diff_image("EXPECTED", "ACTUAL"), (None, 'Image diff returned an exit code of 2. See http://crbug.com/278596'))
def test_check_wdiff(self):
port = self.make_port()
port.check_wdiff()
def test_wdiff_text_fails(self):
host = MockSystemHost(os_name=self.os_name, os_version=self.os_version)
host.executive = MockExecutive(should_throw=True)
port = self.make_port(host=host)
port._executive = host.executive # AndroidPortTest.make_port sets its own executive, so reset that as well.
# This should raise a ScriptError that gets caught and turned into the
# error text, and also mark wdiff as not available.
self.make_wdiff_available(port)
self.assertTrue(port.wdiff_available())
diff_txt = port.wdiff_text("/tmp/foo.html", "/tmp/bar.html")
self.assertEqual(diff_txt, port._wdiff_error_html)
self.assertFalse(port.wdiff_available())
def test_missing_symbol_to_skipped_tests(self):
# Test that we get the chromium skips and not the webkit default skips
port = self.make_port()
skip_dict = port._missing_symbol_to_skipped_tests()
if port.PORT_HAS_AUDIO_CODECS_BUILT_IN:
self.assertEqual(skip_dict, {})
else:
self.assertTrue('ff_mp3_decoder' in skip_dict)
self.assertFalse('WebGLShader' in skip_dict)
def test_test_configuration(self):
port = self.make_port()
self.assertTrue(port.test_configuration())
def test_all_test_configurations(self):
"""Validate the complete set of configurations this port knows about."""
port = self.make_port()
self.assertEqual(set(port.all_test_configurations()), set([
TestConfiguration('snowleopard', 'x86', 'debug'),
TestConfiguration('snowleopard', 'x86', 'release'),
TestConfiguration('lion', 'x86', 'debug'),
TestConfiguration('lion', 'x86', 'release'),
TestConfiguration('retina', 'x86', 'debug'),
TestConfiguration('retina', 'x86', 'release'),
TestConfiguration('mountainlion', 'x86', 'debug'),
TestConfiguration('mountainlion', 'x86', 'release'),
TestConfiguration('mavericks', 'x86', 'debug'),
TestConfiguration('mavericks', 'x86', 'release'),
TestConfiguration('mac10.10', 'x86', 'debug'),
TestConfiguration('mac10.10', 'x86', 'release'),
TestConfiguration('xp', 'x86', 'debug'),
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'debug'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('win10', 'x86', 'debug'),
TestConfiguration('win10', 'x86', 'release'),
TestConfiguration('linux32', 'x86', 'debug'),
TestConfiguration('linux32', 'x86', 'release'),
TestConfiguration('precise', 'x86_64', 'debug'),
TestConfiguration('precise', 'x86_64', 'release'),
TestConfiguration('trusty', 'x86_64', 'debug'),
TestConfiguration('trusty', 'x86_64', 'release'),
TestConfiguration('icecreamsandwich', 'x86', 'debug'),
TestConfiguration('icecreamsandwich', 'x86', 'release'),
]))
def test_get_crash_log(self):
port = self.make_port()
self.assertEqual(port._get_crash_log(None, None, None, None, newer_than=None),
(None,
'crash log for <unknown process name> (pid <unknown>):\n'
'STDOUT: <empty>\n'
'STDERR: <empty>\n'))
self.assertEqual(port._get_crash_log('foo', 1234, 'out bar\nout baz', 'err bar\nerr baz\n', newer_than=None),
('err bar\nerr baz\n',
'crash log for foo (pid 1234):\n'
'STDOUT: out bar\n'
'STDOUT: out baz\n'
'STDERR: err bar\n'
'STDERR: err baz\n'))
self.assertEqual(port._get_crash_log('foo', 1234, 'foo\xa6bar', 'foo\xa6bar', newer_than=None),
('foo\xa6bar',
u'crash log for foo (pid 1234):\n'
u'STDOUT: foo\ufffdbar\n'
u'STDERR: foo\ufffdbar\n'))
self.assertEqual(port._get_crash_log('foo', 1234, 'foo\xa6bar', 'foo\xa6bar', newer_than=1.0),
('foo\xa6bar',
u'crash log for foo (pid 1234):\n'
u'STDOUT: foo\ufffdbar\n'
u'STDERR: foo\ufffdbar\n'))
def assert_build_path(self, options, dirs, expected_path):
port = self.make_port(options=options)
for directory in dirs:
port.host.filesystem.maybe_make_directory(directory)
self.assertEqual(port._build_path(), expected_path)
def test_expectations_files(self):
port = self.make_port()
generic_path = port.path_to_generic_test_expectations_file()
never_fix_tests_path = port._filesystem.join(port.layout_tests_dir(), 'NeverFixTests')
stale_tests_path = port._filesystem.join(port.layout_tests_dir(), 'StaleTestExpectations')
slow_tests_path = port._filesystem.join(port.layout_tests_dir(), 'SlowTests')
skia_overrides_path = port.path_from_chromium_base(
'skia', 'skia_test_expectations.txt')
port._filesystem.write_text_file(skia_overrides_path, 'dummy text')
port._options.builder_name = 'DUMMY_BUILDER_NAME'
self.assertEqual(port.expectations_files(),
[generic_path, skia_overrides_path,
never_fix_tests_path, stale_tests_path, slow_tests_path])
port._options.builder_name = 'builder (deps)'
self.assertEqual(port.expectations_files(),
[generic_path, skia_overrides_path,
never_fix_tests_path, stale_tests_path, slow_tests_path])
# A builder which does NOT observe the Chromium test_expectations,
# but still observes the Skia test_expectations...
port._options.builder_name = 'builder'
self.assertEqual(port.expectations_files(),
[generic_path, skia_overrides_path,
never_fix_tests_path, stale_tests_path, slow_tests_path])
def test_check_sys_deps(self):
port = self.make_port()
port._executive = MockExecutive2(exit_code=0)
self.assertEqual(port.check_sys_deps(needs_http=False), test_run_results.OK_EXIT_STATUS)
port._executive = MockExecutive2(exit_code=1, output='testing output failure')
self.assertEqual(port.check_sys_deps(needs_http=False), test_run_results.SYS_DEPS_EXIT_STATUS)
def test_expectations_ordering(self):
port = self.make_port()
for path in port.expectations_files():
port._filesystem.write_text_file(path, '')
ordered_dict = port.expectations_dict()
self.assertEqual(port.path_to_generic_test_expectations_file(), ordered_dict.keys()[0])
options = MockOptions(additional_expectations=['/tmp/foo', '/tmp/bar'])
port = self.make_port(options=options)
for path in port.expectations_files():
port._filesystem.write_text_file(path, '')
port._filesystem.write_text_file('/tmp/foo', 'foo')
port._filesystem.write_text_file('/tmp/bar', 'bar')
ordered_dict = port.expectations_dict()
self.assertEqual(ordered_dict.keys()[-2:], options.additional_expectations) # pylint: disable=E1101
self.assertEqual(ordered_dict.values()[-2:], ['foo', 'bar'])
def test_skipped_directories_for_symbols(self):
# This first test confirms that the commonly found symbols result in the expected skipped directories.
symbols_string = " ".join(["fooSymbol"])
expected_directories = set([
"webaudio/codec-tests/mp3",
"webaudio/codec-tests/aac",
])
result_directories = set(TestWebKitPort(symbols_string=symbols_string)._skipped_tests_for_unsupported_features(test_list=['webaudio/codec-tests/mp3/foo.html']))
self.assertEqual(result_directories, expected_directories)
# Test that the nm string parsing actually works:
symbols_string = """
000000000124f498 s __ZZN7WebCore13ff_mp3_decoder12replaceChildEPS0_S1_E19__PRETTY_FUNCTION__
000000000124f500 s __ZZN7WebCore13ff_mp3_decoder13addChildAboveEPS0_S1_E19__PRETTY_FUNCTION__
000000000124f670 s __ZZN7WebCore13ff_mp3_decoder13addChildBelowEPS0_S1_E19__PRETTY_FUNCTION__
"""
# Note 'compositing' is not in the list of skipped directories (hence the parsing of GraphicsLayer worked):
expected_directories = set([
"webaudio/codec-tests/aac",
])
result_directories = set(TestWebKitPort(symbols_string=symbols_string)._skipped_tests_for_unsupported_features(test_list=['webaudio/codec-tests/mp3/foo.html']))
self.assertEqual(result_directories, expected_directories)
def _assert_config_file_for_platform(self, port, platform, config_file):
port.host.platform = MockPlatformInfo(os_name=platform)
self.assertEqual(port._apache_config_file_name_for_platform(), config_file)
def _assert_config_file_for_linux_distribution(self, port, distribution, config_file):
port.host.platform = MockPlatformInfo(os_name='linux', linux_distribution=distribution)
self.assertEqual(port._apache_config_file_name_for_platform(), config_file)
def test_apache_config_file_name_for_platform(self):
port = TestWebKitPort()
self._assert_config_file_for_platform(port, 'cygwin', 'cygwin-httpd.conf')
port._apache_version = lambda: '2.2'
self._assert_config_file_for_platform(port, 'linux', 'apache2-httpd-2.2.conf')
self._assert_config_file_for_linux_distribution(port, 'arch', 'arch-httpd-2.2.conf')
self._assert_config_file_for_linux_distribution(port, 'debian', 'debian-httpd-2.2.conf')
self._assert_config_file_for_linux_distribution(port, 'slackware', 'apache2-httpd-2.2.conf')
self._assert_config_file_for_linux_distribution(port, 'redhat', 'redhat-httpd-2.2.conf')
self._assert_config_file_for_platform(port, 'mac', 'apache2-httpd-2.2.conf')
self._assert_config_file_for_platform(port, 'win32', 'apache2-httpd-2.2.conf') # win32 isn't a supported sys.platform. AppleWin/WinCairo/WinCE ports all use cygwin.
self._assert_config_file_for_platform(port, 'barf', 'apache2-httpd-2.2.conf')
def test_path_to_apache_config_file(self):
port = TestWebKitPort()
saved_environ = os.environ.copy()
try:
os.environ['WEBKIT_HTTP_SERVER_CONF_PATH'] = '/path/to/httpd.conf'
self.assertRaises(IOError, port.path_to_apache_config_file)
port._filesystem.write_text_file('/existing/httpd.conf', 'Hello, world!')
os.environ['WEBKIT_HTTP_SERVER_CONF_PATH'] = '/existing/httpd.conf'
self.assertEqual(port.path_to_apache_config_file(), '/existing/httpd.conf')
finally:
os.environ = saved_environ.copy()
# Mock out _apache_config_file_name_for_platform to avoid mocking platform info
port._apache_config_file_name_for_platform = lambda: 'httpd.conf'
self.assertEqual(port.path_to_apache_config_file(), '/mock-checkout/third_party/WebKit/LayoutTests/http/conf/httpd.conf')
# Check that even if we mock out _apache_config_file_name, the environment variable takes precedence.
saved_environ = os.environ.copy()
try:
os.environ['WEBKIT_HTTP_SERVER_CONF_PATH'] = '/existing/httpd.conf'
self.assertEqual(port.path_to_apache_config_file(), '/existing/httpd.conf')
finally:
os.environ = saved_environ.copy()
def test_additional_platform_directory(self):
port = self.make_port(options=MockOptions(additional_platform_directory=['/tmp/foo']))
self.assertEqual(port.baseline_search_path()[0], '/tmp/foo')
def test_virtual_test_suites(self):
# We test that we can load the real LayoutTests/VirtualTestSuites file properly, so we
# use a real SystemHost(). We don't care what virtual_test_suites() returns as long
# as it is iterable.
port = self.make_port(host=SystemHost(), port_name=self.full_port_name)
self.assertTrue(isinstance(port.virtual_test_suites(), collections.Iterable))
| bsd-3-clause |
soarpenguin/python-scripts | stylechecker.py | 1 | 2336 | #!/usr/bin/python
"""usage: python stylechecker.py /path/to/the/c/code"""
import os
import sys
import string
import re
WHITE = '\033[97m'
CYAN = '\033[96m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
def check_file(file):
if re.search('\.[c|h]$', file) == None:
return
f = open(file)
i = 1
file_name_printed = False
for line in f:
line = line.replace('\n', '')
# check the number of columns greater than 80
if len(line) > 80:
if not file_name_printed:
print RED + file + ':' + ENDC
file_name_printed = True
print (GREEN + ' [>80]:' + BLUE + ' #%d(%d)' + WHITE + ':%s') % (i, len(line), line) + ENDC
# check the last space in the end of line
if re.match(r'.*\s$', line):
if not file_name_printed:
print RED + file + ':' + ENDC
file_name_printed = True
print (GREEN + ' [SPACE]:' + BLUE + ' #%d(%d)' + WHITE + ':%s') % (i, len(line), line) + ENDC
# check the TAB key
if string.find(line, '\t') >= 0:
if not file_name_printed:
print RED + file + ':' + ENDC
file_name_printed = True
print (YELLOW + ' [TAB]:' + BLUE + ' #%d(%d)' + WHITE + ':%s') % (i, len(line), line) + ENDC
# check blank lines
if line.isspace():
if not file_name_printed:
print RED + file + ':' + ENDC
file_name_printed = True
print (CYAN + ' [BLK]:' + BLUE + ' #%d(%d)' + WHITE + ':%s') % (i, len(line), line) + ENDC
i = i + 1
f.close()
def walk_dir(dir):
for root, dirs, files in os.walk(dir):
for f in files:
s = root + '/' + f
check_file(s)
for d in dirs:
walk_dir(d)
def usage():
print """
Usage: stylechecker.py file or dir
python stylechecker.py /path/to/the/c/code
or
python stylechecker.py /file/of/c/code """
sys.exit(1)
### main
if len(sys.argv) == 2:
PATH = sys.argv[1]
if os.path.isfile(PATH):
check_file(PATH)
elif os.path.isdir(PATH):
walk_dir(PATH)
else:
print RED + "Check the %s is file or dir" % PATH + ENDC
else:
usage()
| gpl-3.0 |
dims/cinder | cinder/volume/drivers/prophetstor/dpl_iscsi.py | 1 | 7014 | # Copyright (c) 2014 ProphetStor, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _, _LI, _LW
import cinder.volume.driver
from cinder.volume.drivers.prophetstor import dplcommon
LOG = logging.getLogger(__name__)
class DPLISCSIDriver(dplcommon.DPLCOMMONDriver,
cinder.volume.driver.ISCSIDriver):
def __init__(self, *args, **kwargs):
super(DPLISCSIDriver, self).__init__(*args, **kwargs)
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
properties = {}
properties['target_lun'] = None
properties['target_discovered'] = True
properties['target_portal'] = ''
properties['target_iqn'] = None
properties['volume_id'] = volume['id']
dpl_server = self.configuration.san_ip
dpl_iscsi_port = self.configuration.iscsi_port
ret, output = self.dpl.assign_vdev(self._conver_uuid2hex(
volume['id']), connector['initiator'].lower(), volume['id'],
'%s:%d' % (dpl_server, dpl_iscsi_port), 0)
if ret == errno.EAGAIN:
ret, event_uuid = self._get_event_uuid(output)
if len(event_uuid):
ret = 0
status = self._wait_event(
self.dpl.get_vdev_status, self._conver_uuid2hex(
volume['id']), event_uuid)
if status['state'] == 'error':
ret = errno.EFAULT
msg = _('Flexvisor failed to assign volume %(id)s: '
'%(status)s.') % {'id': volume['id'],
'status': status}
raise exception.VolumeBackendAPIException(data=msg)
else:
ret = errno.EFAULT
msg = _('Flexvisor failed to assign volume %(id)s due to '
'unable to query status by event '
'id.') % {'id': volume['id']}
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor assign volume failed.:%(id)s:'
'%(status)s.') % {'id': volume['id'], 'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
if ret == 0:
ret, output = self.dpl.get_vdev(
self._conver_uuid2hex(volume['id']))
if ret == 0:
for tgInfo in output['exports']['Network/iSCSI']:
if tgInfo['permissions'] and \
isinstance(tgInfo['permissions'][0], dict):
for assign in tgInfo['permissions']:
if connector['initiator'].lower() in assign.keys():
for tgportal in tgInfo.get('portals', {}):
properties['target_portal'] = tgportal
break
properties['target_lun'] = \
assign[connector['initiator'].lower()]
break
if properties['target_portal'] != '':
properties['target_iqn'] = tgInfo['target_identifier']
break
else:
if connector['initiator'].lower() in tgInfo['permissions']:
for tgportal in tgInfo.get('portals', {}):
properties['target_portal'] = tgportal
break
if properties['target_portal'] != '':
properties['target_lun'] = \
tgInfo['logical_unit_number']
properties['target_iqn'] = \
tgInfo['target_identifier']
break
if not (ret == 0 or properties['target_portal']):
msg = _('Flexvisor failed to assign volume %(volume)s '
'iqn %(iqn)s.') % {'volume': volume['id'],
'iqn': connector['initiator']}
raise exception.VolumeBackendAPIException(data=msg)
return {'driver_volume_type': 'iscsi', 'data': properties}
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
ret, output = self.dpl.unassign_vdev(
self._conver_uuid2hex(volume['id']),
connector['initiator'])
if ret == errno.EAGAIN:
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(
self.dpl.get_vdev_status, volume['id'], event_uuid)
if status['state'] == 'error':
ret = errno.EFAULT
msg = _('Flexvisor failed to unassign volume %(id)s:'
' %(status)s.') % {'id': volume['id'],
'status': status}
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to unassign volume (get event) '
'%(id)s.') % {'id': volume['id']}
raise exception.VolumeBackendAPIException(data=msg)
elif ret == errno.ENODATA:
LOG.info(_LI('Flexvisor already unassigned volume '
'%(id)s.'), {'id': volume['id']})
elif ret != 0:
msg = _('Flexvisor failed to unassign volume:%(id)s:'
'%(status)s.') % {'id': volume['id'], 'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
def get_volume_stats(self, refresh=False):
if refresh:
try:
data = super(DPLISCSIDriver, self).get_volume_stats(refresh)
if data:
data['storage_protocol'] = 'iSCSI'
backend_name = \
self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = \
(backend_name or 'DPLISCSIDriver')
self._stats = data
except Exception as exc:
LOG.warning(_LW('Cannot get volume status '
'%(exc)s.'), {'exc': exc})
return self._stats
| apache-2.0 |
everaldo/example-code | 21-class-metaprog/bulkfood/model_v6.py | 7 | 1563 | import abc
class AutoStorage:
__counter = 0
def __init__(self):
cls = self.__class__
prefix = cls.__name__
index = cls.__counter
self.storage_name = '_{}#{}'.format(prefix, index)
cls.__counter += 1
def __get__(self, instance, owner):
if instance is None:
return self
else:
return getattr(instance, self.storage_name)
def __set__(self, instance, value):
setattr(instance, self.storage_name, value)
class Validated(abc.ABC, AutoStorage):
def __set__(self, instance, value):
value = self.validate(instance, value)
super().__set__(instance, value)
@abc.abstractmethod
def validate(self, instance, value):
"""return validated value or raise ValueError"""
class Quantity(Validated):
"""a number greater than zero"""
def validate(self, instance, value):
if value <= 0:
raise ValueError('value must be > 0')
return value
class NonBlank(Validated):
"""a string with at least one non-space character"""
def validate(self, instance, value):
value = value.strip()
if len(value) == 0:
raise ValueError('value cannot be empty or blank')
return value
# BEGIN MODEL_V6
def entity(cls): # <1>
for key, attr in cls.__dict__.items(): # <2>
if isinstance(attr, Validated): # <3>
type_name = type(attr).__name__
attr.storage_name = '_{}#{}'.format(type_name, key) # <4>
return cls # <5>
# END MODEL_V6
| mit |
mu-editor/mu | utils/adafruit_api.py | 3 | 5749 | """
The scrapy package must be installed.
Usage:
scrapy runspider adafruit_api.py -o adafruit.json
"""
import scrapy
from bs4 import BeautifulSoup
URL = "https://circuitpython.readthedocs.io/en/3.x/shared-bindings/index.html"
class AdafruitSpider(scrapy.Spider):
name = "AdafruitSpider"
start_urls = [URL]
def parse(self, response):
"""
Scrapes the list of modules associated with CircuitPython. Causes
scrapy to follow the links to the module docs and uses a different
parser to extract the API information contained therein.
"""
for next_page in response.css("div.toctree-wrapper li a"):
yield response.follow(next_page, self.parse_api)
def to_dict(self, name, args, description):
"""
Returns a dictionary representation of the API element if valid, else
returns None.
"""
if name.endswith("__"):
return None
return {"name": name, "args": args, "description": description}
def parse_api(self, response):
"""
Parses a *potential* API documentation page.
"""
# Find all the function definitions on the page:
for func in response.css("dl.function"):
# Class details are always first items in dl.
func_spec = func.css("dt")[0]
func_doc = func.css("dd")[0]
# Function name is always first dt
fn1 = BeautifulSoup(
func_spec.css("code.descclassname").extract()[0], "html.parser"
).text
fn2 = BeautifulSoup(
func_spec.css("code.descname").extract()[0], "html.parser"
).text
func_name = fn1 + fn2
# Args into function
args = []
for ems in func_spec.css("em"):
args.append(
ems.extract().replace("<em>", "").replace("</em>", "")
)
# Function description.
soup = BeautifulSoup(func_doc.extract(), "html.parser")
d = self.to_dict(func_name, args, soup.text)
if d:
yield d
# Find all the class definitions on the page:
for classes in response.css("dl.class"):
# Class details are always first items in dl.
class_spec = classes.css("dt")[0]
class_doc = classes.css("dd")[0]
# Class name is always first dt
cn1 = BeautifulSoup(
class_spec.css("code.descclassname").extract()[0],
"html.parser",
).text
cn2 = BeautifulSoup(
class_spec.css("code.descname").extract()[0], "html.parser"
).text
class_name = cn1 + cn2
# Args into __init__
init_args = []
for ems in class_spec.css("em"):
props = "property" in ems.css("::attr(class)").extract()
if not props:
init_args.append(
ems.extract().replace("<em>", "").replace("</em>", "")
)
# Class description. Everything up to and including the field-list.
soup = BeautifulSoup(class_doc.extract(), "html.parser")
contents = soup.contents[0].contents
description = ""
for child in contents:
if child.name == "p":
description += child.text + "\n\n"
if child.name == "table":
raw = child.text
rows = [r.strip() for r in raw.split("/n") if r.strip()]
description += "\n"
description += "\n".join(rows)
break
if child.name == "dl":
break
d = self.to_dict(class_name, init_args, description)
if d:
yield d
# Remaining dt are methods or attributes
for methods in classes.css("dl.method"):
# Parse and yield methods.
method_name = BeautifulSoup(
methods.css("code.descname").extract()[0], "html.parser"
).text
if method_name.startswith("__"):
break
method_name = class_name + "." + method_name
method_args = []
for ems in methods.css("em"):
method_args.append(
ems.extract().replace("<em>", "").replace("</em>", "")
)
description = BeautifulSoup(
methods.css("dd")[0].extract(), "html.parser"
).text
d = self.to_dict(method_name, method_args, description)
if d:
yield d
for data in classes.css("dl.attribute"):
name = BeautifulSoup(
data.css("code.descname").extract()[0], "html.parser"
).text
name = class_name + "." + name
description = BeautifulSoup(
data.css("dd")[0].extract(), "html.parser"
).text
d = self.to_dict(name, None, description)
if d:
yield d
for data in classes.css("dl.data"):
name = BeautifulSoup(
data.css("code.descname").extract()[0], "html.parser"
).text
name = class_name + "." + name
description = BeautifulSoup(
data.css("dd")[0].extract(), "html.parser"
).text
d = self.to_dict(name, None, description)
if d:
yield d
| gpl-3.0 |
GaussDing/django | tests/update_only_fields/tests.py | 296 | 9780 | from __future__ import unicode_literals
from django.db.models.signals import post_save, pre_save
from django.test import TestCase
from .models import Account, Employee, Person, Profile, ProxyEmployee
class UpdateOnlyFieldsTests(TestCase):
def test_update_fields_basic(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s.gender = 'M'
s.name = 'Ian'
s.save(update_fields=['name'])
s = Person.objects.get(pk=s.pk)
self.assertEqual(s.gender, 'F')
self.assertEqual(s.name, 'Ian')
def test_update_fields_deferred(self):
s = Person.objects.create(name='Sara', gender='F', pid=22)
self.assertEqual(s.gender, 'F')
s1 = Person.objects.defer("gender", "pid").get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(1):
s1.save()
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Emily")
self.assertEqual(s2.gender, "M")
def test_update_fields_only_1(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(1):
s1.save()
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Emily")
self.assertEqual(s2.gender, "M")
def test_update_fields_only_2(self):
s = Person.objects.create(name='Sara', gender='F', pid=22)
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(2):
s1.save(update_fields=['pid'])
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Sara")
self.assertEqual(s2.gender, "F")
def test_update_fields_only_repeated(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.gender = 'M'
with self.assertNumQueries(1):
s1.save()
# Test that the deferred class does not remember that gender was
# set, instead the instance should remember this.
s1 = Person.objects.only('name').get(pk=s.pk)
with self.assertNumQueries(1):
s1.save()
def test_update_fields_inheritance_defer(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('name').get(pk=e1.pk)
e1.name = 'Linda'
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).name,
'Linda')
def test_update_fields_fk_defer(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('profile').get(pk=e1.pk)
e1.profile = profile_receptionist
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_receptionist)
e1.profile_id = profile_boss.pk
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_boss)
def test_select_related_only_interaction(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('profile__salary').select_related('profile').get(pk=e1.pk)
profile_boss.name = 'Clerk'
profile_boss.salary = 1000
profile_boss.save()
# The loaded salary of 3000 gets saved, the name of 'Clerk' isn't
# overwritten.
with self.assertNumQueries(1):
e1.profile.save()
reloaded_profile = Profile.objects.get(pk=profile_boss.pk)
self.assertEqual(reloaded_profile.name, profile_boss.name)
self.assertEqual(reloaded_profile.salary, 3000)
def test_update_fields_m2m(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
a1 = Account.objects.create(num=1)
a2 = Account.objects.create(num=2)
e1.accounts = [a1, a2]
with self.assertRaises(ValueError):
e1.save(update_fields=['accounts'])
def test_update_fields_inheritance(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1.name = 'Ian'
e1.gender = 'M'
e1.save(update_fields=['name'])
e2 = Employee.objects.get(pk=e1.pk)
self.assertEqual(e2.name, 'Ian')
self.assertEqual(e2.gender, 'F')
self.assertEqual(e2.profile, profile_boss)
e2.profile = profile_receptionist
e2.name = 'Sara'
e2.save(update_fields=['profile'])
e3 = Employee.objects.get(pk=e1.pk)
self.assertEqual(e3.name, 'Ian')
self.assertEqual(e3.profile, profile_receptionist)
with self.assertNumQueries(1):
e3.profile = profile_boss
e3.save(update_fields=['profile_id'])
e4 = Employee.objects.get(pk=e3.pk)
self.assertEqual(e4.profile, profile_boss)
self.assertEqual(e4.profile_id, profile_boss.pk)
def test_update_fields_inheritance_with_proxy_model(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = ProxyEmployee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1.name = 'Ian'
e1.gender = 'M'
e1.save(update_fields=['name'])
e2 = ProxyEmployee.objects.get(pk=e1.pk)
self.assertEqual(e2.name, 'Ian')
self.assertEqual(e2.gender, 'F')
self.assertEqual(e2.profile, profile_boss)
e2.profile = profile_receptionist
e2.name = 'Sara'
e2.save(update_fields=['profile'])
e3 = ProxyEmployee.objects.get(pk=e1.pk)
self.assertEqual(e3.name, 'Ian')
self.assertEqual(e3.profile, profile_receptionist)
def test_update_fields_signals(self):
p = Person.objects.create(name='Sara', gender='F')
pre_save_data = []
def pre_save_receiver(**kwargs):
pre_save_data.append(kwargs['update_fields'])
pre_save.connect(pre_save_receiver)
post_save_data = []
def post_save_receiver(**kwargs):
post_save_data.append(kwargs['update_fields'])
post_save.connect(post_save_receiver)
p.save(update_fields=['name'])
self.assertEqual(len(pre_save_data), 1)
self.assertEqual(len(pre_save_data[0]), 1)
self.assertIn('name', pre_save_data[0])
self.assertEqual(len(post_save_data), 1)
self.assertEqual(len(post_save_data[0]), 1)
self.assertIn('name', post_save_data[0])
pre_save.disconnect(pre_save_receiver)
post_save.disconnect(post_save_receiver)
def test_update_fields_incorrect_params(self):
s = Person.objects.create(name='Sara', gender='F')
with self.assertRaises(ValueError):
s.save(update_fields=['first_name'])
with self.assertRaises(ValueError):
s.save(update_fields="name")
def test_empty_update_fields(self):
s = Person.objects.create(name='Sara', gender='F')
pre_save_data = []
def pre_save_receiver(**kwargs):
pre_save_data.append(kwargs['update_fields'])
pre_save.connect(pre_save_receiver)
post_save_data = []
def post_save_receiver(**kwargs):
post_save_data.append(kwargs['update_fields'])
post_save.connect(post_save_receiver)
# Save is skipped.
with self.assertNumQueries(0):
s.save(update_fields=[])
# Signals were skipped, too...
self.assertEqual(len(pre_save_data), 0)
self.assertEqual(len(post_save_data), 0)
pre_save.disconnect(pre_save_receiver)
post_save.disconnect(post_save_receiver)
def test_num_queries_inheritance(self):
s = Employee.objects.create(name='Sara', gender='F')
s.employee_num = 1
s.name = 'Emily'
with self.assertNumQueries(1):
s.save(update_fields=['employee_num'])
s = Employee.objects.get(pk=s.pk)
self.assertEqual(s.employee_num, 1)
self.assertEqual(s.name, 'Sara')
s.employee_num = 2
s.name = 'Emily'
with self.assertNumQueries(1):
s.save(update_fields=['name'])
s = Employee.objects.get(pk=s.pk)
self.assertEqual(s.name, 'Emily')
self.assertEqual(s.employee_num, 1)
# A little sanity check that we actually did updates...
self.assertEqual(Employee.objects.count(), 1)
self.assertEqual(Person.objects.count(), 1)
with self.assertNumQueries(2):
s.save(update_fields=['name', 'employee_num'])
| bsd-3-clause |
loli/medpy | doc/numpydoc/numpydoc/docscrape_sphinx.py | 1 | 9399 |
import sys, re, inspect, textwrap, pydoc
import sphinx
import collections
from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: str(s, 'unicode_escape')
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
self.load_config(config)
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_returns(self):
out = []
if self['Returns']:
out += self._str_field_list('Returns')
out += ['']
for param, param_type, desc in self['Returns']:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (isinstance(param_obj, collections.Callable)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
if param_obj and (pydoc.getdoc(param_obj) or not desc):
# Referenced object has a docstring
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::']
if self.class_members_toctree:
out += [' :toctree:']
out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in list(idx.items()):
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns()
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.load_config(config)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| gpl-3.0 |
jrabbit/ubotu-fr | plugins/plugins/Topic/config.py | 6 | 3589 | ###
# Copyright (c) 2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Topic', True)
class TopicFormat(registry.String):
"Value must include $topic, otherwise the actual topic would be left out."
def setValue(self, v):
if '$topic' in v or '${topic}' in v:
registry.String.setValue(self, v)
else:
self.error()
Topic = conf.registerPlugin('Topic')
conf.registerChannelValue(Topic, 'separator',
registry.StringSurroundedBySpaces(' || ', """Determines what separator is
used between individually added topics in the channel topic."""))
conf.registerChannelValue(Topic, 'format',
TopicFormat('$topic ($nick)', """Determines what format is used to add
topics in the topic. All the standard substitutes apply, in addition to
"$topic" for the topic itself."""))
conf.registerChannelValue(Topic, 'recognizeTopiclen',
registry.Boolean(True, """Determines whether the bot will recognize the
TOPICLEN value sent to it by the server and thus refuse to send TOPICs
longer than the TOPICLEN. These topics are likely to be truncated by the
server anyway, so this defaults to True."""))
conf.registerChannelValue(Topic, 'default',
registry.String('', """Determines what the default topic for the channel
is. This is used by the default command to set this topic."""))
conf.registerGroup(Topic, 'undo')
conf.registerChannelValue(Topic.undo, 'max',
registry.NonNegativeInteger(10, """Determines the number of previous
topics to keep around in case the undo command is called."""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
chubin/wttr.in | lib/buttons.py | 1 | 1958 | TWITTER_BUTTON = """
<a href="https://twitter.com/igor_chubin?ref_src=twsrc%5Etfw" class="twitter-follow-button" data-show-count="false">Follow @igor_chubin</a><script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
"""
GITHUB_BUTTON = """
<a aria-label="Star chubin/wttr.in on GitHub" data-count-aria-label="# stargazers on GitHub" data-count-api="/repos/chubin/wttr.in#stargazers_count" data-count-href="/chubin/wttr.in/stargazers" data-show-count="true" data-icon="octicon-star" href="https://github.com/chubin/wttr.in" class="github-button">wttr.in</a>
"""
GITHUB_BUTTON_2 = """
<!-- Place this tag where you want the button to render. -->
<a aria-label="Star schachmat/wego on GitHub" data-count-aria-label="# stargazers on GitHub" data-count-api="/repos/schachmat/wego#stargazers_count" data-count-href="/schachmat/wego/stargazers" data-show-count="true" data-icon="octicon-star" href="https://github.com/schachmat/wego" class="github-button">wego</a>
"""
GITHUB_BUTTON_3 = """
<!-- Place this tag where you want the button to render. -->
<a aria-label="Star chubin/pyphoon on GitHub" data-count-aria-label="# stargazers on GitHub" data-count-api="/repos/chubin/pyphoon#stargazers_count" data-count-href="/chubin/pyphoon/stargazers" data-show-count="true" data-icon="octicon-star" href="https://github.com/chubin/pyphoon" class="github-button">pyphoon</a>
"""
GITHUB_BUTTON_FOOTER = """
<!-- Place this tag right after the last button or just before your close body tag. -->
<script async defer id="github-bjs" src="https://buttons.github.io/buttons.js"></script>
"""
def add_buttons(output):
"""
Add buttons to html output
"""
return output.replace('</body>',
(TWITTER_BUTTON
+ GITHUB_BUTTON
+ GITHUB_BUTTON_3
+ GITHUB_BUTTON_2
+ GITHUB_BUTTON_FOOTER) + '</body>')
| apache-2.0 |
AesopMatt/PersephoneWiki | skins/vector/cssjanus/cssjanus.py | 165 | 21248 | #!/usr/bin/python
#
# Copyright 2008 Google Inc. All Rights Reserved.
"""Converts a LeftToRight Cascading Style Sheet into a RightToLeft one.
This is a utility script for replacing "left" oriented things in a CSS file
like float, padding, margin with "right" oriented values.
It also does the opposite.
The goal is to be able to conditionally serve one large, cat'd, compiled CSS
file appropriate for LeftToRight oriented languages and RightToLeft ones.
This utility will hopefully help your structural layout done in CSS in
terms of its RTL compatibility. It will not help with some of the more
complicated bidirectional text issues.
"""
__author__ = 'elsigh@google.com (Lindsey Simon)'
__version__ = '0.1'
import logging
import re
import sys
import getopt
import os
import csslex
logging.getLogger().setLevel(logging.INFO)
# Global for the command line flags.
SWAP_LTR_RTL_IN_URL_DEFAULT = False
SWAP_LEFT_RIGHT_IN_URL_DEFAULT = False
FLAGS = {'swap_ltr_rtl_in_url': SWAP_LTR_RTL_IN_URL_DEFAULT,
'swap_left_right_in_url': SWAP_LEFT_RIGHT_IN_URL_DEFAULT}
# Generic token delimiter character.
TOKEN_DELIMITER = '~'
# This is a temporary match token we use when swapping strings.
TMP_TOKEN = '%sTMP%s' % (TOKEN_DELIMITER, TOKEN_DELIMITER)
# Token to be used for joining lines.
TOKEN_LINES = '%sJ%s' % (TOKEN_DELIMITER, TOKEN_DELIMITER)
# Global constant text strings for CSS value matches.
LTR = 'ltr'
RTL = 'rtl'
LEFT = 'left'
RIGHT = 'right'
# This is a lookbehind match to ensure that we don't replace instances
# of our string token (left, rtl, etc...) if there's a letter in front of it.
# Specifically, this prevents replacements like 'background: url(bright.png)'.
LOOKBEHIND_NOT_LETTER = r'(?<![a-zA-Z])'
# This is a lookahead match to make sure we don't replace left and right
# in actual classnames, so that we don't break the HTML/CSS dependencies.
# Read literally, it says ignore cases where the word left, for instance, is
# directly followed by valid classname characters and a curly brace.
# ex: .column-left {float: left} will become .column-left {float: right}
LOOKAHEAD_NOT_OPEN_BRACE = (r'(?!(?:%s|%s|%s|#|\:|\.|\,|\+|>)*?{)' %
(csslex.NMCHAR, TOKEN_LINES, csslex.SPACE))
# These two lookaheads are to test whether or not we are within a
# background: url(HERE) situation.
# Ref: http://www.w3.org/TR/CSS21/syndata.html#uri
VALID_AFTER_URI_CHARS = r'[\'\"]?%s' % csslex.WHITESPACE
LOOKAHEAD_NOT_CLOSING_PAREN = r'(?!%s?%s\))' % (csslex.URL_CHARS,
VALID_AFTER_URI_CHARS)
LOOKAHEAD_FOR_CLOSING_PAREN = r'(?=%s?%s\))' % (csslex.URL_CHARS,
VALID_AFTER_URI_CHARS)
# Compile a regex to swap left and right values in 4 part notations.
# We need to match negatives and decimal numeric values.
# ex. 'margin: .25em -2px 3px 0' becomes 'margin: .25em 0 3px -2px'.
POSSIBLY_NEGATIVE_QUANTITY = r'((?:-?%s)|(?:inherit|auto))' % csslex.QUANTITY
POSSIBLY_NEGATIVE_QUANTITY_SPACE = r'%s%s%s' % (POSSIBLY_NEGATIVE_QUANTITY,
csslex.SPACE,
csslex.WHITESPACE)
FOUR_NOTATION_QUANTITY_RE = re.compile(r'%s%s%s%s' %
(POSSIBLY_NEGATIVE_QUANTITY_SPACE,
POSSIBLY_NEGATIVE_QUANTITY_SPACE,
POSSIBLY_NEGATIVE_QUANTITY_SPACE,
POSSIBLY_NEGATIVE_QUANTITY),
re.I)
COLOR = r'(%s|%s)' % (csslex.NAME, csslex.HASH)
COLOR_SPACE = r'%s%s' % (COLOR, csslex.SPACE)
FOUR_NOTATION_COLOR_RE = re.compile(r'(-color%s:%s)%s%s%s(%s)' %
(csslex.WHITESPACE,
csslex.WHITESPACE,
COLOR_SPACE,
COLOR_SPACE,
COLOR_SPACE,
COLOR),
re.I)
# Compile the cursor resize regexes
CURSOR_EAST_RE = re.compile(LOOKBEHIND_NOT_LETTER + '([ns]?)e-resize')
CURSOR_WEST_RE = re.compile(LOOKBEHIND_NOT_LETTER + '([ns]?)w-resize')
# Matches the condition where we need to replace the horizontal component
# of a background-position value when expressed in horizontal percentage.
# Had to make two regexes because in the case of position-x there is only
# one quantity, and otherwise we don't want to match and change cases with only
# one quantity.
BG_HORIZONTAL_PERCENTAGE_RE = re.compile(r'background(-position)?(%s:%s)'
'([^%%]*?)(%s)%%'
'(%s(?:%s|%s))' % (csslex.WHITESPACE,
csslex.WHITESPACE,
csslex.NUM,
csslex.WHITESPACE,
csslex.QUANTITY,
csslex.IDENT))
BG_HORIZONTAL_PERCENTAGE_X_RE = re.compile(r'background-position-x(%s:%s)'
'(%s)%%' % (csslex.WHITESPACE,
csslex.WHITESPACE,
csslex.NUM))
# Matches the opening of a body selector.
BODY_SELECTOR = r'body%s{%s' % (csslex.WHITESPACE, csslex.WHITESPACE)
# Matches anything up until the closing of a selector.
CHARS_WITHIN_SELECTOR = r'[^\}]*?'
# Matches the direction property in a selector.
DIRECTION_RE = r'direction%s:%s' % (csslex.WHITESPACE, csslex.WHITESPACE)
# These allow us to swap "ltr" with "rtl" and vice versa ONLY within the
# body selector and on the same line.
BODY_DIRECTION_LTR_RE = re.compile(r'(%s)(%s)(%s)(ltr)' %
(BODY_SELECTOR, CHARS_WITHIN_SELECTOR,
DIRECTION_RE),
re.I)
BODY_DIRECTION_RTL_RE = re.compile(r'(%s)(%s)(%s)(rtl)' %
(BODY_SELECTOR, CHARS_WITHIN_SELECTOR,
DIRECTION_RE),
re.I)
# Allows us to swap "direction:ltr" with "direction:rtl" and
# vice versa anywhere in a line.
DIRECTION_LTR_RE = re.compile(r'%s(ltr)' % DIRECTION_RE)
DIRECTION_RTL_RE = re.compile(r'%s(rtl)' % DIRECTION_RE)
# We want to be able to switch left with right and vice versa anywhere
# we encounter left/right strings, EXCEPT inside the background:url(). The next
# two regexes are for that purpose. We have alternate IN_URL versions of the
# regexes compiled in case the user passes the flag that they do
# actually want to have left and right swapped inside of background:urls.
LEFT_RE = re.compile('%s(%s)%s%s' % (LOOKBEHIND_NOT_LETTER,
LEFT,
LOOKAHEAD_NOT_CLOSING_PAREN,
LOOKAHEAD_NOT_OPEN_BRACE),
re.I)
RIGHT_RE = re.compile('%s(%s)%s%s' % (LOOKBEHIND_NOT_LETTER,
RIGHT,
LOOKAHEAD_NOT_CLOSING_PAREN,
LOOKAHEAD_NOT_OPEN_BRACE),
re.I)
LEFT_IN_URL_RE = re.compile('%s(%s)%s' % (LOOKBEHIND_NOT_LETTER,
LEFT,
LOOKAHEAD_FOR_CLOSING_PAREN),
re.I)
RIGHT_IN_URL_RE = re.compile('%s(%s)%s' % (LOOKBEHIND_NOT_LETTER,
RIGHT,
LOOKAHEAD_FOR_CLOSING_PAREN),
re.I)
LTR_IN_URL_RE = re.compile('%s(%s)%s' % (LOOKBEHIND_NOT_LETTER,
LTR,
LOOKAHEAD_FOR_CLOSING_PAREN),
re.I)
RTL_IN_URL_RE = re.compile('%s(%s)%s' % (LOOKBEHIND_NOT_LETTER,
RTL,
LOOKAHEAD_FOR_CLOSING_PAREN),
re.I)
COMMENT_RE = re.compile('(%s)' % csslex.COMMENT, re.I)
NOFLIP_TOKEN = r'\@noflip'
# The NOFLIP_TOKEN inside of a comment. For now, this requires that comments
# be in the input, which means users of a css compiler would have to run
# this script first if they want this functionality.
NOFLIP_ANNOTATION = r'/\*%s%s%s\*/' % (csslex.WHITESPACE,
NOFLIP_TOKEN,
csslex. WHITESPACE)
# After a NOFLIP_ANNOTATION, and within a class selector, we want to be able
# to set aside a single rule not to be flipped. We can do this by matching
# our NOFLIP annotation and then using a lookahead to make sure there is not
# an opening brace before the match.
NOFLIP_SINGLE_RE = re.compile(r'(%s%s[^;}]+;?)' % (NOFLIP_ANNOTATION,
LOOKAHEAD_NOT_OPEN_BRACE),
re.I)
# After a NOFLIP_ANNOTATION, we want to grab anything up until the next } which
# means the entire following class block. This will prevent all of its
# declarations from being flipped.
NOFLIP_CLASS_RE = re.compile(r'(%s%s})' % (NOFLIP_ANNOTATION,
CHARS_WITHIN_SELECTOR),
re.I)
class Tokenizer:
"""Replaces any CSS comments with string tokens and vice versa."""
def __init__(self, token_re, token_string):
"""Constructor for the Tokenizer.
Args:
token_re: A regex for the string to be replace by a token.
token_string: The string to put between token delimiters when tokenizing.
"""
logging.debug('Tokenizer::init token_string=%s' % token_string)
self.token_re = token_re
self.token_string = token_string
self.originals = []
def Tokenize(self, line):
"""Replaces any string matching token_re in line with string tokens.
By passing a function as an argument to the re.sub line below, we bypass
the usual rule where re.sub will only replace the left-most occurrence of
a match by calling the passed in function for each occurrence.
Args:
line: A line to replace token_re matches in.
Returns:
line: A line with token_re matches tokenized.
"""
line = self.token_re.sub(self.TokenizeMatches, line)
logging.debug('Tokenizer::Tokenize returns: %s' % line)
return line
def DeTokenize(self, line):
"""Replaces tokens with the original string.
Args:
line: A line with tokens.
Returns:
line with any tokens replaced by the original string.
"""
# Put all of the comments back in by their comment token.
for i, original in enumerate(self.originals):
token = '%s%s_%s%s' % (TOKEN_DELIMITER, self.token_string, i + 1,
TOKEN_DELIMITER)
line = line.replace(token, original)
logging.debug('Tokenizer::DeTokenize i:%s w/%s' % (i, token))
logging.debug('Tokenizer::DeTokenize returns: %s' % line)
return line
def TokenizeMatches(self, m):
"""Replaces matches with tokens and stores the originals.
Args:
m: A match object.
Returns:
A string token which replaces the CSS comment.
"""
logging.debug('Tokenizer::TokenizeMatches %s' % m.group(1))
self.originals.append(m.group(1))
return '%s%s_%s%s' % (TOKEN_DELIMITER,
self.token_string,
len(self.originals),
TOKEN_DELIMITER)
def FixBodyDirectionLtrAndRtl(line):
"""Replaces ltr with rtl and vice versa ONLY in the body direction.
Args:
line: A string to replace instances of ltr with rtl.
Returns:
line with direction: ltr and direction: rtl swapped only in body selector.
line = FixBodyDirectionLtrAndRtl('body { direction:ltr }')
line will now be 'body { direction:rtl }'.
"""
line = BODY_DIRECTION_LTR_RE.sub('\\1\\2\\3%s' % TMP_TOKEN, line)
line = BODY_DIRECTION_RTL_RE.sub('\\1\\2\\3%s' % LTR, line)
line = line.replace(TMP_TOKEN, RTL)
logging.debug('FixBodyDirectionLtrAndRtl returns: %s' % line)
return line
def FixLeftAndRight(line):
"""Replaces left with right and vice versa in line.
Args:
line: A string in which to perform the replacement.
Returns:
line with left and right swapped. For example:
line = FixLeftAndRight('padding-left: 2px; margin-right: 1px;')
line will now be 'padding-right: 2px; margin-left: 1px;'.
"""
line = LEFT_RE.sub(TMP_TOKEN, line)
line = RIGHT_RE.sub(LEFT, line)
line = line.replace(TMP_TOKEN, RIGHT)
logging.debug('FixLeftAndRight returns: %s' % line)
return line
def FixLeftAndRightInUrl(line):
"""Replaces left with right and vice versa ONLY within background urls.
Args:
line: A string in which to replace left with right and vice versa.
Returns:
line with left and right swapped in the url string. For example:
line = FixLeftAndRightInUrl('background:url(right.png)')
line will now be 'background:url(left.png)'.
"""
line = LEFT_IN_URL_RE.sub(TMP_TOKEN, line)
line = RIGHT_IN_URL_RE.sub(LEFT, line)
line = line.replace(TMP_TOKEN, RIGHT)
logging.debug('FixLeftAndRightInUrl returns: %s' % line)
return line
def FixLtrAndRtlInUrl(line):
"""Replaces ltr with rtl and vice versa ONLY within background urls.
Args:
line: A string in which to replace ltr with rtl and vice versa.
Returns:
line with left and right swapped. For example:
line = FixLtrAndRtlInUrl('background:url(rtl.png)')
line will now be 'background:url(ltr.png)'.
"""
line = LTR_IN_URL_RE.sub(TMP_TOKEN, line)
line = RTL_IN_URL_RE.sub(LTR, line)
line = line.replace(TMP_TOKEN, RTL)
logging.debug('FixLtrAndRtlInUrl returns: %s' % line)
return line
def FixCursorProperties(line):
"""Fixes directional CSS cursor properties.
Args:
line: A string to fix CSS cursor properties in.
Returns:
line reformatted with the cursor properties substituted. For example:
line = FixCursorProperties('cursor: ne-resize')
line will now be 'cursor: nw-resize'.
"""
line = CURSOR_EAST_RE.sub('\\1' + TMP_TOKEN, line)
line = CURSOR_WEST_RE.sub('\\1e-resize', line)
line = line.replace(TMP_TOKEN, 'w-resize')
logging.debug('FixCursorProperties returns: %s' % line)
return line
def FixFourPartNotation(line):
"""Fixes the second and fourth positions in 4 part CSS notation.
Args:
line: A string to fix 4 part CSS notation in.
Returns:
line reformatted with the 4 part notations swapped. For example:
line = FixFourPartNotation('padding: 1px 2px 3px 4px')
line will now be 'padding: 1px 4px 3px 2px'.
"""
line = FOUR_NOTATION_QUANTITY_RE.sub('\\1 \\4 \\3 \\2', line)
line = FOUR_NOTATION_COLOR_RE.sub('\\1\\2 \\5 \\4 \\3', line)
logging.debug('FixFourPartNotation returns: %s' % line)
return line
def FixBackgroundPosition(line):
"""Fixes horizontal background percentage values in line.
Args:
line: A string to fix horizontal background position values in.
Returns:
line reformatted with the 4 part notations swapped.
"""
line = BG_HORIZONTAL_PERCENTAGE_RE.sub(CalculateNewBackgroundPosition, line)
line = BG_HORIZONTAL_PERCENTAGE_X_RE.sub(CalculateNewBackgroundPositionX,
line)
logging.debug('FixBackgroundPosition returns: %s' % line)
return line
def CalculateNewBackgroundPosition(m):
"""Fixes horizontal background-position percentages.
This function should be used as an argument to re.sub since it needs to
perform replacement specific calculations.
Args:
m: A match object.
Returns:
A string with the horizontal background position percentage fixed.
BG_HORIZONTAL_PERCENTAGE_RE.sub(FixBackgroundPosition,
'background-position: 75% 50%')
will return 'background-position: 25% 50%'.
"""
# The flipped value is the offset from 100%
new_x = str(100-int(m.group(4)))
# Since m.group(1) may very well be None type and we need a string..
if m.group(1):
position_string = m.group(1)
else:
position_string = ''
return 'background%s%s%s%s%%%s' % (position_string, m.group(2), m.group(3),
new_x, m.group(5))
def CalculateNewBackgroundPositionX(m):
"""Fixes percent based background-position-x.
This function should be used as an argument to re.sub since it needs to
perform replacement specific calculations.
Args:
m: A match object.
Returns:
A string with the background-position-x percentage fixed.
BG_HORIZONTAL_PERCENTAGE_X_RE.sub(CalculateNewBackgroundPosition,
'background-position-x: 75%')
will return 'background-position-x: 25%'.
"""
# The flipped value is the offset from 100%
new_x = str(100-int(m.group(2)))
return 'background-position-x%s%s%%' % (m.group(1), new_x)
def ChangeLeftToRightToLeft(lines,
swap_ltr_rtl_in_url=None,
swap_left_right_in_url=None):
"""Turns lines into a stream and runs the fixing functions against it.
Args:
lines: An list of CSS lines.
swap_ltr_rtl_in_url: Overrides this flag if param is set.
swap_left_right_in_url: Overrides this flag if param is set.
Returns:
The same lines, but with left and right fixes.
"""
global FLAGS
# Possibly override flags with params.
logging.debug('ChangeLeftToRightToLeft swap_ltr_rtl_in_url=%s, '
'swap_left_right_in_url=%s' % (swap_ltr_rtl_in_url,
swap_left_right_in_url))
if swap_ltr_rtl_in_url is None:
swap_ltr_rtl_in_url = FLAGS['swap_ltr_rtl_in_url']
if swap_left_right_in_url is None:
swap_left_right_in_url = FLAGS['swap_left_right_in_url']
# Turns the array of lines into a single line stream.
logging.debug('LINES COUNT: %s' % len(lines))
line = TOKEN_LINES.join(lines)
# Tokenize any single line rules with the /* noflip */ annotation.
noflip_single_tokenizer = Tokenizer(NOFLIP_SINGLE_RE, 'NOFLIP_SINGLE')
line = noflip_single_tokenizer.Tokenize(line)
# Tokenize any class rules with the /* noflip */ annotation.
noflip_class_tokenizer = Tokenizer(NOFLIP_CLASS_RE, 'NOFLIP_CLASS')
line = noflip_class_tokenizer.Tokenize(line)
# Tokenize the comments so we can preserve them through the changes.
comment_tokenizer = Tokenizer(COMMENT_RE, 'C')
line = comment_tokenizer.Tokenize(line)
# Here starteth the various left/right orientation fixes.
line = FixBodyDirectionLtrAndRtl(line)
if swap_left_right_in_url:
line = FixLeftAndRightInUrl(line)
if swap_ltr_rtl_in_url:
line = FixLtrAndRtlInUrl(line)
line = FixLeftAndRight(line)
line = FixCursorProperties(line)
line = FixFourPartNotation(line)
line = FixBackgroundPosition(line)
# DeTokenize the single line noflips.
line = noflip_single_tokenizer.DeTokenize(line)
# DeTokenize the class-level noflips.
line = noflip_class_tokenizer.DeTokenize(line)
# DeTokenize the comments.
line = comment_tokenizer.DeTokenize(line)
# Rejoin the lines back together.
lines = line.split(TOKEN_LINES)
return lines
def usage():
"""Prints out usage information."""
print 'Usage:'
print ' ./cssjanus.py < file.css > file-rtl.css'
print 'Flags:'
print ' --swap_left_right_in_url: Fixes "left"/"right" string within urls.'
print ' Ex: ./cssjanus.py --swap_left_right_in_url < file.css > file_rtl.css'
print ' --swap_ltr_rtl_in_url: Fixes "ltr"/"rtl" string within urls.'
print ' Ex: ./cssjanus --swap_ltr_rtl_in_url < file.css > file_rtl.css'
def setflags(opts):
"""Parse the passed in command line arguments and set the FLAGS global.
Args:
opts: getopt iterable intercepted from argv.
"""
global FLAGS
# Parse the arguments.
for opt, arg in opts:
logging.debug('opt: %s, arg: %s' % (opt, arg))
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-d", "--debug"):
logging.getLogger().setLevel(logging.DEBUG)
elif opt == '--swap_ltr_rtl_in_url':
FLAGS['swap_ltr_rtl_in_url'] = True
elif opt == '--swap_left_right_in_url':
FLAGS['swap_left_right_in_url'] = True
def main(argv):
"""Sends stdin lines to ChangeLeftToRightToLeft and writes to stdout."""
# Define the flags.
try:
opts, args = getopt.getopt(argv, 'hd', ['help', 'debug',
'swap_left_right_in_url',
'swap_ltr_rtl_in_url'])
except getopt.GetoptError:
usage()
sys.exit(2)
# Parse and set the flags.
setflags(opts)
# Call the main routine with all our functionality.
fixed_lines = ChangeLeftToRightToLeft(sys.stdin.readlines())
sys.stdout.write(''.join(fixed_lines))
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-2.0 |
nuuuboo/odoo | addons/calendar/__init__.py | 391 | 1038 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import calendar
import controllers
import contacts
| agpl-3.0 |
stonegithubs/odoo | addons/crm/wizard/crm_partner_binding.py | 257 | 4570 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class crm_partner_binding(osv.osv_memory):
"""
Handle the partner binding or generation in any CRM wizard that requires
such feature, like the lead2opportunity wizard, or the
phonecall2opportunity wizard. Try to find a matching partner from the
CRM model's information (name, email, phone number, etc) or create a new
one on the fly.
Use it like a mixin with the wizard of your choice.
"""
_name = 'crm.partner.binding'
_description = 'Handle partner binding or generation in CRM wizards.'
_columns = {
'action': fields.selection([
('exist', 'Link to an existing customer'),
('create', 'Create a new customer'),
('nothing', 'Do not link to a customer')
], 'Related Customer', required=True),
'partner_id': fields.many2one('res.partner', 'Customer'),
}
def _find_matching_partner(self, cr, uid, context=None):
"""
Try to find a matching partner regarding the active model data, like
the customer's name, email, phone number, etc.
:return int partner_id if any, False otherwise
"""
if context is None:
context = {}
partner_id = False
partner_obj = self.pool.get('res.partner')
# The active model has to be a lead or a phonecall
if (context.get('active_model') == 'crm.lead') and context.get('active_id'):
active_model = self.pool.get('crm.lead').browse(cr, uid, context.get('active_id'), context=context)
elif (context.get('active_model') == 'crm.phonecall') and context.get('active_id'):
active_model = self.pool.get('crm.phonecall').browse(cr, uid, context.get('active_id'), context=context)
# Find the best matching partner for the active model
if (active_model):
partner_obj = self.pool.get('res.partner')
# A partner is set already
if active_model.partner_id:
partner_id = active_model.partner_id.id
# Search through the existing partners based on the lead's email
elif active_model.email_from:
partner_ids = partner_obj.search(cr, uid, [('email', '=', active_model.email_from)], context=context)
if partner_ids:
partner_id = partner_ids[0]
# Search through the existing partners based on the lead's partner or contact name
elif active_model.partner_name:
partner_ids = partner_obj.search(cr, uid, [('name', 'ilike', '%'+active_model.partner_name+'%')], context=context)
if partner_ids:
partner_id = partner_ids[0]
elif active_model.contact_name:
partner_ids = partner_obj.search(cr, uid, [
('name', 'ilike', '%'+active_model.contact_name+'%')], context=context)
if partner_ids:
partner_id = partner_ids[0]
return partner_id
def default_get(self, cr, uid, fields, context=None):
res = super(crm_partner_binding, self).default_get(cr, uid, fields, context=context)
partner_id = self._find_matching_partner(cr, uid, context=context)
if 'action' in fields and not res.get('action'):
res['action'] = partner_id and 'exist' or 'create'
if 'partner_id' in fields:
res['partner_id'] = partner_id
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mricon/grokmirror | grokmirror/fsck.py | 2 | 54687 | # -*- coding: utf-8 -*-
# Copyright (C) 2013-2020 by The Linux Foundation and contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import grokmirror
import logging
import time
import json
import random
import datetime
import shutil
import gc
import fnmatch
import io
import smtplib
from pathlib import Path
from email.message import EmailMessage
from fcntl import lockf, LOCK_EX, LOCK_UN, LOCK_NB
# default basic logger. We override it later.
logger = logging.getLogger(__name__)
def log_errors(fullpath, cmdargs, lines):
logger.critical('%s reports errors:', fullpath)
with open(os.path.join(fullpath, 'grokmirror.fsck.err'), 'w') as fh:
fh.write('# Date: %s\n' % datetime.datetime.today().strftime('%F'))
fh.write('# Cmd : git %s\n' % ' '.join(cmdargs))
count = 0
for line in lines:
fh.write('%s\n' % line)
logger.critical('\t%s', line)
count += 1
if count > 10:
logger.critical('\t [ %s more lines skipped ]', len(lines) - 10)
logger.critical('\t [ see %s/grokmirror.fsck.err ]', os.path.basename(fullpath))
break
def gen_preload_bundle(fullpath, config):
outdir = config['fsck'].get('preload_bundle_outdir')
Path(outdir).mkdir(parents=True, exist_ok=True)
bname = '%s.bundle' % os.path.basename(fullpath)[:-4]
args = ['bundle', 'create', os.path.join(outdir, bname), '--all']
logger.info(' bundling: %s', bname)
grokmirror.run_git_command(fullpath, args)
def get_blob_set(fullpath):
bset = set()
size = 0
blobcache = os.path.join(fullpath, 'grokmirror.blobs')
if os.path.exists(blobcache):
# Did it age out? Hardcode to 30 days.
expage = time.time() - 86400*30
st = os.stat(blobcache)
if st.st_mtime < expage:
os.unlink(blobcache)
try:
with open(blobcache) as fh:
while True:
line = fh.readline()
if not len(line):
break
if line[0] == '#':
continue
chunks = line.strip().split()
bhash = chunks[0]
bsize = int(chunks[1])
size += bsize
bset.add((bhash, bsize))
return bset, size
except FileNotFoundError:
pass
# This only makes sense for repos not using alternates, so make sure you check first
logger.info(' bloblist: %s', fullpath)
gitargs = ['cat-file', '--batch-all-objects', '--batch-check', '--unordered']
retcode, output, error = grokmirror.run_git_command(fullpath, gitargs)
if retcode == 0:
with open(blobcache, 'w') as fh:
fh.write('# Blobs and sizes used for sibling calculation\n')
for line in output.split('\n'):
if line.find(' blob ') < 0:
continue
chunks = line.strip().split()
fh.write(f'{chunks[0]} {chunks[2]}\n')
bhash = chunks[0]
bsize = int(chunks[2])
size += bsize
bset.add((bhash, bsize))
return bset, size
def check_sibling_repos_by_blobs(bset1, bsize1, bset2, bsize2, ratio):
iset = bset1.intersection(bset2)
if not len(iset):
return False
isize = 0
for bhash, bsize in iset:
isize += bsize
# Both repos should share at least ratio % of blobs in them
ratio1 = int(isize / bsize1 * 100)
logger.debug('isize=%s, bsize1=%s, ratio1=%s', isize, bsize1, ratio1)
ratio2 = int(isize / bsize2 * 100)
logger.debug('isize=%s, bsize2=%s ratio2=%s', isize, bsize2, ratio1)
if ratio1 >= ratio and ratio2 >= ratio:
return True
return False
def find_siblings_by_blobs(obstrepo, obstdir, ratio=75):
siblings = set()
oset, osize = get_blob_set(obstrepo)
for srepo in grokmirror.find_all_gitdirs(obstdir, normalize=True, exclude_objstore=False):
if srepo == obstrepo:
continue
logger.debug('Comparing blobs between %s and %s', obstrepo, srepo)
sset, ssize = get_blob_set(srepo)
if check_sibling_repos_by_blobs(oset, osize, sset, ssize, ratio):
logger.info(' siblings: %s and %s', os.path.basename(obstrepo), os.path.basename(srepo))
siblings.add(srepo)
return siblings
def merge_siblings(siblings, amap):
mdest = None
rcount = 0
# Who has the most remotes?
for sibling in set(siblings):
if sibling not in amap or not len(amap[sibling]):
# Orphaned sibling, ignore it -- it will get cleaned up
siblings.remove(sibling)
continue
s_remotes = grokmirror.list_repo_remotes(sibling)
if len(s_remotes) > rcount:
mdest = sibling
rcount = len(s_remotes)
# Migrate all siblings into the repo with most remotes
siblings.remove(mdest)
for sibling in siblings:
logger.info('%s: merging into %s', os.path.basename(sibling), os.path.basename(mdest))
s_remotes = grokmirror.list_repo_remotes(sibling, withurl=True)
for virtref, childpath in s_remotes:
if childpath not in amap[sibling]:
# The child repo isn't even using us
args = ['remote', 'remove', virtref]
grokmirror.run_git_command(sibling, args)
continue
logger.info(' moving: %s', childpath)
success = grokmirror.add_repo_to_objstore(mdest, childpath)
if not success:
logger.critical('Could not add %s to %s', childpath, mdest)
continue
logger.info(' : fetching into %s', os.path.basename(mdest))
success = grokmirror.fetch_objstore_repo(mdest, childpath)
if not success:
logger.critical('Failed to fetch %s from %s to %s', childpath, os.path.basename(sibling),
os.path.basename(mdest))
continue
logger.info(' : repointing alternates')
grokmirror.set_altrepo(childpath, mdest)
amap[sibling].remove(childpath)
amap[mdest].add(childpath)
args = ['remote', 'remove', virtref]
grokmirror.run_git_command(sibling, args)
logger.info(' : done')
return mdest
def check_reclone_error(fullpath, config, errors):
reclone = None
toplevel = os.path.realpath(config['core'].get('toplevel'))
errlist = config['fsck'].get('reclone_on_errors', '').split('\n')
for line in errors:
for estring in errlist:
if line.find(estring) != -1:
# is this repo used for alternates?
gitdir = '/' + os.path.relpath(fullpath, toplevel).lstrip('/')
if grokmirror.is_alt_repo(toplevel, gitdir):
logger.critical('\tused for alternates, not requesting auto-reclone')
return
else:
reclone = line
logger.critical('\trequested auto-reclone')
break
if reclone is not None:
break
if reclone is None:
return
set_repo_reclone(fullpath, reclone)
def get_repo_size(fullpath):
oi = grokmirror.get_repo_obj_info(fullpath)
kbsize = 0
for field in ['size', 'size-pack', 'size-garbage']:
try:
kbsize += int(oi[field])
except (KeyError, ValueError):
pass
logger.debug('%s size: %s kb', fullpath, kbsize)
return kbsize
def get_human_size(kbsize):
num = kbsize
for unit in ['Ki', 'Mi', 'Gi']:
if abs(num) < 1024.0:
return "%3.2f %sB" % (num, unit)
num /= 1024.0
return "%.2f%s TiB" % num
def set_repo_reclone(fullpath, reason):
rfile = os.path.join(fullpath, 'grokmirror.reclone')
# Have we already requested a reclone?
if os.path.exists(rfile):
logger.debug('Already requested repo reclone for %s', fullpath)
return
with open(rfile, 'w') as rfh:
rfh.write('Requested by grok-fsck due to error: %s' % reason)
def run_git_prune(fullpath, config):
# WARNING: We assume you've already verified that it's safe to do so
prune_ok = True
isprecious = grokmirror.is_precious(fullpath)
if isprecious:
set_precious_objects(fullpath, False)
# We set expire to yesterday in order to avoid race conditions
# in repositories that are actively being accessed at the time of
# running the prune job.
args = ['prune', '--expire=yesterday']
logger.info(' prune: pruning')
retcode, output, error = grokmirror.run_git_command(fullpath, args)
if error:
warn = remove_ignored_errors(error, config)
if warn:
prune_ok = False
log_errors(fullpath, args, warn)
check_reclone_error(fullpath, config, warn)
if isprecious:
set_precious_objects(fullpath, True)
return prune_ok
def is_safe_to_prune(fullpath, config):
if config['fsck'].get('prune', 'yes') != 'yes':
logger.debug('Pruning disabled in config file')
return False
toplevel = os.path.realpath(config['core'].get('toplevel'))
obstdir = os.path.realpath(config['core'].get('objstore'))
gitdir = '/' + os.path.relpath(fullpath, toplevel).lstrip('/')
if grokmirror.is_obstrepo(fullpath, obstdir):
# We only prune if all repos pointing to us are public
urls = set(grokmirror.list_repo_remotes(fullpath, withurl=True))
mine = set([x[1] for x in urls])
amap = grokmirror.get_altrepo_map(toplevel)
if mine != amap[fullpath]:
logger.debug('Cannot prune %s because it is used by non-public repos', gitdir)
return False
elif grokmirror.is_alt_repo(toplevel, gitdir):
logger.debug('Cannot prune %s because it is used as alternates by other repos', gitdir)
return False
logger.debug('%s should be safe to prune', gitdir)
return True
def remove_ignored_errors(output, config):
ierrors = set([x.strip() for x in config['fsck'].get('ignore_errors', '').split('\n')])
debug = list()
warn = list()
for line in output.split('\n'):
ignored = False
for estring in ierrors:
if line.find(estring) != -1:
ignored = True
debug.append(line)
break
if not ignored:
warn.append(line)
if debug:
logger.debug('Stderr: %s', '\n'.join(debug))
return warn
def run_git_repack(fullpath, config, level=1, prune=True):
# Returns false if we hit any errors on the way
repack_ok = True
obstdir = os.path.realpath(config['core'].get('objstore'))
toplevel = os.path.realpath(config['core'].get('toplevel'))
gitdir = '/' + os.path.relpath(fullpath, toplevel).lstrip('/')
if prune:
# Make sure it's safe to do so
prune = is_safe_to_prune(fullpath, config)
if config['fsck'].get('precious', '') == 'always':
always_precious = True
set_precious_objects(fullpath, enabled=True)
else:
always_precious = False
set_precious_objects(fullpath, enabled=False)
set_precious_after = False
gen_commitgraph = True
# Figure out what our repack flags should be.
repack_flags = list()
rregular = config['fsck'].get('extra_repack_flags', '').split()
if len(rregular):
repack_flags += rregular
full_repack_flags = ['-f', '--pack-kept-objects']
rfull = config['fsck'].get('extra_repack_flags_full', '').split()
if len(rfull):
full_repack_flags += rfull
if grokmirror.is_obstrepo(fullpath, obstdir):
set_precious_after = True
repack_flags.append('-a')
if not prune and not always_precious:
repack_flags.append('-k')
elif grokmirror.is_alt_repo(toplevel, gitdir):
set_precious_after = True
if grokmirror.get_altrepo(fullpath):
gen_commitgraph = False
logger.warning(' warning : has alternates and is used by others for alternates')
logger.warning(' : this can cause grandchild corruption')
repack_flags.append('-A')
repack_flags.append('-l')
else:
repack_flags.append('-a')
repack_flags.append('-b')
if not always_precious:
repack_flags.append('-k')
elif grokmirror.get_altrepo(fullpath):
# we are a "child repo"
gen_commitgraph = False
repack_flags.append('-l')
repack_flags.append('-A')
if prune:
repack_flags.append('--unpack-unreachable=yesterday')
else:
# we have no relationships with other repos
repack_flags.append('-a')
repack_flags.append('-b')
if prune:
repack_flags.append('--unpack-unreachable=yesterday')
if level > 1:
logger.info(' repack: performing a full repack for optimal deltas')
repack_flags += full_repack_flags
if not always_precious:
repack_flags.append('-d')
# If we have a logs dir, then run reflog expire
if os.path.isdir(os.path.join(fullpath, 'logs')):
args = ['reflog', 'expire', '--all', '--stale-fix']
logger.info(' reflog: expiring reflogs')
grokmirror.run_git_command(fullpath, args)
args = ['repack'] + repack_flags
logger.info(' repack: repacking with "%s"', ' '.join(repack_flags))
# We always tack on -q
args.append('-q')
retcode, output, error = grokmirror.run_git_command(fullpath, args)
# With newer versions of git, repack may return warnings that are safe to ignore
# so use the same strategy to weed out things we aren't interested in seeing
if error:
warn = remove_ignored_errors(error, config)
if warn:
repack_ok = False
log_errors(fullpath, args, warn)
check_reclone_error(fullpath, config, warn)
if not repack_ok:
# No need to repack refs if repo is broken
if set_precious_after:
set_precious_objects(fullpath, enabled=True)
return False
if gen_commitgraph and config['fsck'].get('commitgraph', 'yes') == 'yes':
grokmirror.set_git_config(fullpath, 'core.commitgraph', 'true')
run_git_commit_graph(fullpath)
# repacking refs requires a separate command, so run it now
args = ['pack-refs']
if level > 1:
logger.info(' packrefs: repacking all refs')
args.append('--all')
else:
logger.info(' packrefs: repacking refs')
retcode, output, error = grokmirror.run_git_command(fullpath, args)
# pack-refs shouldn't return anything, but use the same ignore_errors block
# to weed out any future potential benign warnings
if error:
warn = remove_ignored_errors(error, config)
if warn:
repack_ok = False
log_errors(fullpath, args, warn)
check_reclone_error(fullpath, config, warn)
if prune:
repack_ok = run_git_prune(fullpath, config)
if set_precious_after:
set_precious_objects(fullpath, enabled=True)
return repack_ok
def run_git_fsck(fullpath, config, conn_only=False):
args = ['fsck', '--no-progress', '--no-dangling', '--no-reflogs']
obstdir = os.path.realpath(config['core'].get('objstore'))
# If it's got an obstrepo, always run as connectivity-only
altrepo = grokmirror.get_altrepo(fullpath)
if altrepo and grokmirror.is_obstrepo(altrepo, obstdir):
logger.debug('Repo uses objstore, forcing connectivity-only')
conn_only = True
if conn_only:
args.append('--connectivity-only')
logger.info(' fsck: running with --connectivity-only')
else:
logger.info(' fsck: running full checks')
retcode, output, error = grokmirror.run_git_command(fullpath, args)
output = output + '\n' + error
if output:
warn = remove_ignored_errors(output, config)
if warn:
log_errors(fullpath, args, warn)
check_reclone_error(fullpath, config, warn)
def run_git_commit_graph(fullpath):
# Does our version of git support commit-graph?
if not grokmirror.git_newer_than('2.18.0'):
logger.debug('Git version too old, not generating commit-graph')
logger.info(' graph: generating commit-graph')
args = ['commit-graph', 'write']
retcode, output, error = grokmirror.run_git_command(fullpath, args)
if retcode == 0:
return True
return False
def set_precious_objects(fullpath, enabled=True):
# It's better to just set it blindly without checking first,
# as this results in one fewer shell-out.
logger.debug('Setting preciousObjects for %s', fullpath)
if enabled:
poval = 'true'
else:
poval = 'false'
grokmirror.set_git_config(fullpath, 'extensions.preciousObjects', poval)
def check_precious_objects(fullpath):
return grokmirror.is_precious(fullpath)
def fsck_mirror(config, force=False, repack_only=False, conn_only=False,
repack_all_quick=False, repack_all_full=False):
if repack_all_quick or repack_all_full:
force = True
statusfile = config['fsck'].get('statusfile')
if not statusfile:
logger.critical('Please define fsck.statusfile in the config')
return 1
st_dir = os.path.dirname(statusfile)
if not os.path.isdir(os.path.dirname(statusfile)):
logger.critical('Directory %s is absent', st_dir)
return 1
# Lock the tree to make sure we only run one instance
lockfile = os.path.join(st_dir, '.%s.lock' % os.path.basename(statusfile))
logger.debug('Attempting to obtain lock on %s', lockfile)
flockh = open(lockfile, 'w')
try:
lockf(flockh, LOCK_EX | LOCK_NB)
except IOError:
logger.info('Could not obtain exclusive lock on %s', lockfile)
logger.info('Assuming another process is running.')
return 0
manifile = config['core'].get('manifest')
logger.info('Analyzing %s', manifile)
grokmirror.manifest_lock(manifile)
manifest = grokmirror.read_manifest(manifile)
if os.path.exists(statusfile):
logger.info(' status: reading %s', statusfile)
stfh = open(statusfile, 'r')
# noinspection PyBroadException
try:
# Format of the status file:
# {
# '/full/path/to/repository': {
# 'lastcheck': 'YYYY-MM-DD' or 'never',
# 'nextcheck': 'YYYY-MM-DD',
# 'lastrepack': 'YYYY-MM-DD',
# 'fingerprint': 'sha-1',
# 's_elapsed': seconds,
# 'quick_repack_count': times,
# },
# ...
# }
status = json.loads(stfh.read())
except:
logger.critical('Failed to parse %s', statusfile)
lockf(flockh, LOCK_UN)
flockh.close()
return 1
else:
status = dict()
frequency = config['fsck'].getint('frequency', 30)
today = datetime.datetime.today()
todayiso = today.strftime('%F')
if force:
# Use randomization for next check, again
checkdelay = random.randint(1, frequency)
else:
checkdelay = frequency
commitgraph = config['fsck'].getboolean('commitgraph', True)
# Is our git version new enough to support it?
if commitgraph and not grokmirror.git_newer_than('2.18.0'):
logger.info('Git version too old to support commit graphs, disabling')
config['fsck']['commitgraph'] = 'no'
# Go through the manifest and compare with status
toplevel = os.path.realpath(config['core'].get('toplevel'))
changed = False
for gitdir in list(manifest):
fullpath = os.path.join(toplevel, gitdir.lstrip('/'))
# Does it exist?
if not os.path.isdir(fullpath):
# Remove it from manifest and status
manifest.pop(gitdir)
status.pop(fullpath)
changed = True
continue
if fullpath not in status.keys():
# Newly added repository
if not force:
# Randomize next check between now and frequency
delay = random.randint(0, frequency)
nextdate = today + datetime.timedelta(days=delay)
nextcheck = nextdate.strftime('%F')
else:
nextcheck = todayiso
status[fullpath] = {
'lastcheck': 'never',
'nextcheck': nextcheck,
'fingerprint': grokmirror.get_repo_fingerprint(toplevel, gitdir),
}
logger.info('%s:', fullpath)
logger.info(' added: next check on %s', nextcheck)
if 'manifest' in config:
pretty = config['manifest'].getboolean('pretty', False)
else:
pretty = False
if changed:
grokmirror.write_manifest(manifile, manifest, pretty=pretty)
grokmirror.manifest_unlock(manifile)
# record newly found repos in the status file
logger.debug('Updating status file in %s', statusfile)
with open(statusfile, 'w') as stfh:
stfh.write(json.dumps(status, indent=2))
# Go through status and find all repos that need work done on them.
to_process = set()
total_checked = 0
total_elapsed = 0
space_saved = 0
cfg_repack = config['fsck'].getboolean('repack', True)
# Can be "always", which is why we don't getboolean
cfg_precious = config['fsck'].get('precious', 'yes')
obstdir = os.path.realpath(config['core'].get('objstore'))
logger.info(' search: getting parent commit info from all repos, may take a while')
top_roots, obst_roots = grokmirror.get_rootsets(toplevel, obstdir)
amap = grokmirror.get_altrepo_map(toplevel)
fetched_obstrepos = set()
obst_changes = False
analyzed = 0
queued = 0
logger.info('Analyzing %s (%s repos)', toplevel, len(status))
stattime = time.time()
baselines = [x.strip() for x in config['fsck'].get('baselines', '').split('\n')]
for fullpath in list(status):
# Give me a status every 5 seconds
if time.time() - stattime >= 5:
logger.info(' ---: %s/%s analyzed, %s queued', analyzed, len(status), queued)
stattime = time.time()
start_size = get_repo_size(fullpath)
analyzed += 1
# We do obstrepos separately below, as logic is different
if grokmirror.is_obstrepo(fullpath, obstdir):
logger.debug('Skipping %s (obstrepo)')
continue
# Check to make sure it's still in the manifest
gitdir = fullpath.replace(toplevel, '', 1)
gitdir = '/' + gitdir.lstrip('/')
if gitdir not in manifest:
status.pop(fullpath)
logger.debug('%s is gone, no longer in manifest', gitdir)
continue
# Make sure FETCH_HEAD is pointing to /dev/null
fetch_headf = os.path.join(fullpath, 'FETCH_HEAD')
if not os.path.islink(fetch_headf):
logger.debug(' replacing FETCH_HEAD with symlink to /dev/null')
try:
os.unlink(fetch_headf)
except FileNotFoundError:
pass
os.symlink('/dev/null', fetch_headf)
# Objstore migration routines
# Are we using objstore?
altdir = grokmirror.get_altrepo(fullpath)
is_private = grokmirror.is_private_repo(config, gitdir)
if grokmirror.is_alt_repo(toplevel, gitdir):
# Don't prune any repos that are parents -- until migration is fully complete
m_prune = False
else:
m_prune = True
if not altdir and not os.path.exists(os.path.join(fullpath, 'grokmirror.do-not-objstore')):
# Do we match any obstdir repos?
obstrepo = grokmirror.find_best_obstrepo(fullpath, obst_roots, toplevel, baselines)
if obstrepo:
obst_changes = True
# Yes, set ourselves up to be using that obstdir
logger.info('%s: can use %s', gitdir, os.path.basename(obstrepo))
grokmirror.set_altrepo(fullpath, obstrepo)
if not is_private:
grokmirror.add_repo_to_objstore(obstrepo, fullpath)
# Fetch into the obstrepo
logger.info(' fetch: fetching %s', gitdir)
grokmirror.fetch_objstore_repo(obstrepo, fullpath)
obst_roots[obstrepo] = grokmirror.get_repo_roots(obstrepo, force=True)
run_git_repack(fullpath, config, level=1, prune=m_prune)
space_saved += start_size - get_repo_size(fullpath)
else:
# Do we have any toplevel siblings?
obstrepo = None
my_roots = grokmirror.get_repo_roots(fullpath)
top_siblings = grokmirror.find_siblings(fullpath, my_roots, top_roots)
if len(top_siblings):
# Am I a private repo?
if is_private:
# Are there any non-private siblings?
for top_sibling in top_siblings:
# Are you a private repo?
if grokmirror.is_private_repo(config, top_sibling):
continue
# Great, make an objstore repo out of this sibling
obstrepo = grokmirror.setup_objstore_repo(obstdir)
logger.info('%s: can use %s', gitdir, os.path.basename(obstrepo))
logger.info(' init: new objstore repo %s', os.path.basename(obstrepo))
grokmirror.add_repo_to_objstore(obstrepo, top_sibling)
# Fetch into the obstrepo
logger.info(' fetch: fetching %s', top_sibling)
grokmirror.fetch_objstore_repo(obstrepo, top_sibling)
obst_roots[obstrepo] = grokmirror.get_repo_roots(obstrepo, force=True)
# It doesn't matter if this fails, because repacking is still safe
# Other siblings will match in their own due course
break
else:
# Make an objstore repo out of myself
obstrepo = grokmirror.setup_objstore_repo(obstdir)
logger.info('%s: can use %s', gitdir, os.path.basename(obstrepo))
logger.info(' init: new objstore repo %s', os.path.basename(obstrepo))
grokmirror.add_repo_to_objstore(obstrepo, fullpath)
if obstrepo:
obst_changes = True
# Set alternates to the obstrepo
grokmirror.set_altrepo(fullpath, obstrepo)
if not is_private:
# Fetch into the obstrepo
logger.info(' fetch: fetching %s', gitdir)
grokmirror.fetch_objstore_repo(obstrepo, fullpath)
run_git_repack(fullpath, config, level=1, prune=m_prune)
space_saved += start_size - get_repo_size(fullpath)
obst_roots[obstrepo] = grokmirror.get_repo_roots(obstrepo, force=True)
elif not os.path.isdir(altdir):
logger.critical(' reclone: %s (alternates repo gone)', gitdir)
set_repo_reclone(fullpath, 'Alternates repository gone')
continue
elif altdir.find(obstdir) != 0:
# We have an alternates repo, but it's not an objstore repo
# Probably left over from grokmirror-1.x
# Do we have any matching obstrepos?
obstrepo = grokmirror.find_best_obstrepo(fullpath, obst_roots, toplevel, baselines)
if obstrepo:
logger.info('%s: migrating to %s', gitdir, os.path.basename(obstrepo))
if altdir not in fetched_obstrepos:
# We're already sharing objects with altdir, so no need to check if it's private
grokmirror.add_repo_to_objstore(obstrepo, altdir)
logger.info(' fetch: fetching %s (previous parent)', os.path.relpath(altdir, toplevel))
success = grokmirror.fetch_objstore_repo(obstrepo, altdir)
fetched_obstrepos.add(altdir)
if success:
set_precious_objects(altdir, enabled=False)
pre_size = get_repo_size(altdir)
run_git_repack(altdir, config, level=1, prune=False)
space_saved += pre_size - get_repo_size(altdir)
else:
logger.critical('Unsuccessful fetching %s into %s', altdir, os.path.basename(obstrepo))
obstrepo = None
else:
# Make a new obstrepo out of mommy
obstrepo = grokmirror.setup_objstore_repo(obstdir)
logger.info('%s: migrating to %s', gitdir, os.path.basename(obstrepo))
logger.info(' init: new objstore repo %s', os.path.basename(obstrepo))
grokmirror.add_repo_to_objstore(obstrepo, altdir)
logger.info(' fetch: fetching %s (previous parent)', os.path.relpath(altdir, toplevel))
success = grokmirror.fetch_objstore_repo(obstrepo, altdir)
fetched_obstrepos.add(altdir)
if success:
grokmirror.set_altrepo(altdir, obstrepo)
# mommy is no longer precious
set_precious_objects(altdir, enabled=False)
# Don't prune, because there may be objects others are still borrowing
# It can only be pruned once the full migration is completed
pre_size = get_repo_size(altdir)
run_git_repack(altdir, config, level=1, prune=False)
space_saved += pre_size - get_repo_size(altdir)
else:
logger.critical('Unsuccessful fetching %s into %s', altdir, os.path.basename(obstrepo))
obstrepo = None
if obstrepo:
obst_changes = True
if not is_private:
# Fetch into the obstrepo
grokmirror.add_repo_to_objstore(obstrepo, fullpath)
logger.info(' fetch: fetching %s', gitdir)
if grokmirror.fetch_objstore_repo(obstrepo, fullpath):
grokmirror.set_altrepo(fullpath, obstrepo)
set_precious_objects(fullpath, enabled=False)
run_git_repack(fullpath, config, level=1, prune=m_prune)
space_saved += start_size - get_repo_size(fullpath)
else:
# Grab all the objects from the previous parent, since we can't simply
# fetch ourselves into the obstrepo (we're private).
args = ['repack', '-a']
logger.info(' fetch: restoring private repo %s', gitdir)
if grokmirror.run_git_command(fullpath, args):
grokmirror.set_altrepo(fullpath, obstrepo)
set_precious_objects(fullpath, enabled=False)
# Now repack ourselves to get rid of any public objects
run_git_repack(fullpath, config, level=1, prune=m_prune)
obst_roots[obstrepo] = grokmirror.get_repo_roots(obstrepo, force=True)
elif altdir.find(obstdir) == 0 and not is_private:
# Make sure this repo is properly set up with obstrepo
# (e.g. it could have been cloned/copied and obstrepo is not tracking it yet)
obstrepo = altdir
s_remotes = grokmirror.list_repo_remotes(obstrepo, withurl=True)
found = False
for virtref, childpath in s_remotes:
if childpath == fullpath:
found = True
break
if not found:
# Set it up properly
grokmirror.add_repo_to_objstore(obstrepo, fullpath)
logger.info(' reconfig: %s to fetch into %s', gitdir, os.path.basename(obstrepo))
obj_info = grokmirror.get_repo_obj_info(fullpath)
try:
packs = int(obj_info['packs'])
count_loose = int(obj_info['count'])
except KeyError:
logger.warning('Unable to count objects in %s, skipping' % fullpath)
continue
schedcheck = datetime.datetime.strptime(status[fullpath]['nextcheck'], '%Y-%m-%d')
nextcheck = today + datetime.timedelta(days=checkdelay)
if not cfg_repack:
# don't look at me if you turned off repack
logger.debug('Not repacking because repack=no in config')
repack_level = None
elif repack_all_full and (count_loose > 0 or packs > 1):
logger.debug('repack_level=2 due to repack_all_full')
repack_level = 2
elif repack_all_quick and count_loose > 0:
logger.debug('repack_level=1 due to repack_all_quick')
repack_level = 1
elif status[fullpath].get('fingerprint') != grokmirror.get_repo_fingerprint(toplevel, gitdir):
logger.debug('Checking repack level of %s', fullpath)
repack_level = grokmirror.get_repack_level(obj_info)
else:
repack_level = None
# trigger a level-1 repack if it's regular check time and the fingerprint has changed
if (not repack_level and schedcheck <= today
and status[fullpath].get('fingerprint') != grokmirror.get_repo_fingerprint(toplevel, gitdir)):
status[fullpath]['nextcheck'] = nextcheck.strftime('%F')
logger.info(' aged: %s (forcing repack)', fullpath)
repack_level = 1
# If we're not already repacking the repo, run a prune if we find garbage in it
if obj_info['garbage'] != '0' and not repack_level and is_safe_to_prune(fullpath, config):
logger.info(' garbage: %s (%s files, %s KiB)', gitdir, obj_info['garbage'], obj_info['size-garbage'])
try:
grokmirror.lock_repo(fullpath, nonblocking=True)
run_git_prune(fullpath, config)
grokmirror.unlock_repo(fullpath)
except IOError:
pass
if repack_level and (cfg_precious == 'always' and check_precious_objects(fullpath)):
# if we have preciousObjects, then we only repack based on the same
# schedule as fsck.
logger.debug('preciousObjects is set')
# for repos with preciousObjects, we use the fsck schedule for repacking
if schedcheck <= today:
logger.debug('Time for a full periodic repack of a preciousObjects repo')
status[fullpath]['nextcheck'] = nextcheck.strftime('%F')
repack_level = 2
else:
logger.debug('Not repacking preciousObjects repo outside of schedule')
repack_level = None
if repack_level:
queued += 1
to_process.add((fullpath, 'repack', repack_level))
if repack_level > 1:
logger.info(' queued: %s (full repack)', fullpath)
else:
logger.info(' queued: %s (repack)', fullpath)
elif repack_only or repack_all_quick or repack_all_full:
continue
elif schedcheck <= today or force:
queued += 1
to_process.add((fullpath, 'fsck', None))
logger.info(' queued: %s (fsck)', fullpath)
logger.info(' done: %s analyzed, %s queued', analyzed, queued)
if obst_changes:
# Refresh the alt repo map cache
amap = grokmirror.get_altrepo_map(toplevel, refresh=True)
obstrepos = grokmirror.find_all_gitdirs(obstdir, normalize=True, exclude_objstore=False)
analyzed = 0
queued = 0
logger.info('Analyzing %s (%s repos)', obstdir, len(obstrepos))
objstore_uses_plumbing = config['core'].getboolean('objstore_uses_plumbing', False)
islandcores = [x.strip() for x in config['fsck'].get('islandcores', '').split('\n')]
stattime = time.time()
for obstrepo in obstrepos:
if time.time() - stattime >= 5:
logger.info(' ---: %s/%s analyzed, %s queued', analyzed, len(obstrepos), queued)
stattime = time.time()
analyzed += 1
logger.debug('Processing objstore repo: %s', os.path.basename(obstrepo))
my_roots = grokmirror.get_repo_roots(obstrepo)
if obstrepo in amap and len(amap[obstrepo]):
# Is it redundant with any other objstore repos?
strategy = config['fsck'].get('obstrepo_merge_strategy', 'exact')
if strategy == 'blobs':
siblings = find_siblings_by_blobs(obstrepo, obstdir, ratio=75)
else:
exact_merge = True
if strategy == 'loose':
exact_merge = False
siblings = grokmirror.find_siblings(obstrepo, my_roots, obst_roots, exact=exact_merge)
if len(siblings):
siblings.add(obstrepo)
mdest = merge_siblings(siblings, amap)
obst_changes = True
if mdest in status:
# Force full repack of merged obstrepos
status[mdest]['nextcheck'] = todayiso
# Recalculate my roots
my_roots = grokmirror.get_repo_roots(obstrepo, force=True)
obst_roots[obstrepo] = my_roots
# Not an else, because the previous step may have migrated things
if obstrepo not in amap or not len(amap[obstrepo]):
obst_changes = True
# XXX: Is there a possible race condition here if grok-pull cloned a new repo
# while we were migrating this one?
logger.info('%s: deleting (no longer used by anything)', os.path.basename(obstrepo))
if obstrepo in amap:
amap.pop(obstrepo)
shutil.rmtree(obstrepo)
continue
# Record the latest sibling info in the tracking file
telltale = os.path.join(obstrepo, 'grokmirror.objstore')
with open(telltale, 'w') as fh:
fh.write(grokmirror.OBST_PREAMBULE)
fh.write('\n'.join(sorted(list(amap[obstrepo]))) + '\n')
my_remotes = grokmirror.list_repo_remotes(obstrepo, withurl=True)
# Use the first child repo as our "reference" entry in manifest
refrepo = None
# Use for the alternateRefsPrefixes value
baseline_refs = set()
set_islandcore = False
new_islandcore = False
valid_virtrefs = set()
for virtref, childpath in my_remotes:
# Is it still relevant?
if childpath not in amap[obstrepo]:
# Remove it and let prune take care of it
grokmirror.remove_from_objstore(obstrepo, childpath)
logger.info('%s: removed remote %s (no longer used)', os.path.basename(obstrepo), childpath)
continue
valid_virtrefs.add(virtref)
# Does it need fetching?
fetch = True
l_fpf = os.path.join(obstrepo, 'grokmirror.%s.fingerprint' % virtref)
r_fpf = os.path.join(childpath, 'grokmirror.fingerprint')
try:
with open(l_fpf) as fh:
l_fp = fh.read().strip()
with open(r_fpf) as fh:
r_fp = fh.read().strip()
if l_fp == r_fp:
fetch = False
except IOError:
pass
gitdir = '/' + os.path.relpath(childpath, toplevel)
if fetch:
grokmirror.lock_repo(obstrepo, nonblocking=False)
logger.info(' fetch: %s -> %s', gitdir, os.path.basename(obstrepo))
success = grokmirror.fetch_objstore_repo(obstrepo, childpath, use_plumbing=objstore_uses_plumbing)
if not success and objstore_uses_plumbing:
# Try using git porcelain
grokmirror.fetch_objstore_repo(obstrepo, childpath)
grokmirror.unlock_repo(obstrepo)
if gitdir not in manifest:
continue
# Do we need to set any alternateRefsPrefixes?
for baseline in baselines:
# Does this repo match a baseline
if fnmatch.fnmatch(gitdir, baseline):
baseline_refs.add('refs/virtual/%s/heads/' % virtref)
break
# Do we need to set islandCore?
if not set_islandcore:
is_islandcore = False
for islandcore in islandcores:
# Does this repo match a baseline
if fnmatch.fnmatch(gitdir, islandcore):
is_islandcore = True
break
if is_islandcore:
set_islandcore = True
# is it already set to that?
entries = grokmirror.get_config_from_git(obstrepo, r'pack\.island*')
if entries.get('islandcore') != virtref:
new_islandcore = True
logger.info(' reconfig: %s (islandCore to %s)', os.path.basename(obstrepo), virtref)
grokmirror.set_git_config(obstrepo, 'pack.islandCore', virtref)
if refrepo is None:
# Legacy "reference=" setting in manifest
refrepo = gitdir
manifest[gitdir]['reference'] = None
else:
manifest[gitdir]['reference'] = refrepo
manifest[gitdir]['forkgroup'] = os.path.basename(obstrepo[:-4])
if len(baseline_refs):
# sort the list, so we have deterministic value
br = list(baseline_refs)
br.sort()
refpref = ' '.join(br)
# Go through all remotes and set their alternateRefsPrefixes
for s_virtref, s_childpath in my_remotes:
# is it already set to that?
entries = grokmirror.get_config_from_git(s_childpath, r'core\.alternate*')
if entries.get('alternaterefsprefixes') != refpref:
s_gitdir = '/' + os.path.relpath(s_childpath, toplevel)
logger.info(' reconfig: %s (baseline)', s_gitdir)
grokmirror.set_git_config(s_childpath, 'core.alternateRefsPrefixes', refpref)
repack_requested = False
if os.path.exists(os.path.join(obstrepo, 'grokmirror.repack')):
repack_requested = True
# Go through all our refs and find all stale virtrefs
args = ['for-each-ref', '--format=%(refname)', 'refs/virtual/']
trimmed_virtrefs = set()
ecode, out, err = grokmirror.run_git_command(obstrepo, args)
if ecode == 0 and out:
for line in out.split('\n'):
chunks = line.split('/')
if len(chunks) < 3:
# Where did this come from?
logger.debug('Weird ref %s in objstore repo %s', line, obstrepo)
continue
virtref = chunks[2]
if virtref not in valid_virtrefs and virtref not in trimmed_virtrefs:
logger.info(' trim: stale virtref %s', virtref)
grokmirror.objstore_trim_virtref(obstrepo, virtref)
trimmed_virtrefs.add(virtref)
if obstrepo not in status or new_islandcore or trimmed_virtrefs or repack_requested:
# We don't use obstrepo fingerprints, so we set it to None
status[obstrepo] = {
'lastcheck': 'never',
'nextcheck': todayiso,
'fingerprint': None,
}
# Always full-repack brand new obstrepos
repack_level = 2
else:
obj_info = grokmirror.get_repo_obj_info(obstrepo)
repack_level = grokmirror.get_repack_level(obj_info)
nextcheck = datetime.datetime.strptime(status[obstrepo]['nextcheck'], '%Y-%m-%d')
if repack_level > 1 and nextcheck > today:
# Don't do full repacks outside of schedule
repack_level = 1
if repack_level:
queued += 1
to_process.add((obstrepo, 'repack', repack_level))
if repack_level > 1:
logger.info(' queued: %s (full repack)', os.path.basename(obstrepo))
else:
logger.info(' queued: %s (repack)', os.path.basename(obstrepo))
elif repack_only or repack_all_quick or repack_all_full:
continue
elif (nextcheck <= today or force) and not repack_only:
queued += 1
status[obstrepo]['nextcheck'] = nextcheck.strftime('%F')
to_process.add((obstrepo, 'fsck', None))
logger.info(' queued: %s (fsck)', os.path.basename(obstrepo))
logger.info(' done: %s analyzed, %s queued', analyzed, queued)
if obst_changes:
# We keep the same mtime, because the repos themselves haven't changed
grokmirror.manifest_lock(manifile)
# Re-read manifest, so we can update reference and forkgroup data
disk_manifest = grokmirror.read_manifest(manifile)
# Go through my manifest and update and changes in forkgroup data
for gitdir in manifest:
if gitdir not in disk_manifest:
# What happened here?
continue
if 'reference' in manifest[gitdir]:
disk_manifest[gitdir]['reference'] = manifest[gitdir]['reference']
if 'forkgroup' in manifest[gitdir]:
disk_manifest[gitdir]['forkgroup'] = manifest[gitdir]['forkgroup']
grokmirror.write_manifest(manifile, disk_manifest, pretty=pretty)
grokmirror.manifest_unlock(manifile)
if not len(to_process):
logger.info('No repos need attention.')
return
# Delete some vars that are huge for large repo sets -- we no longer need them and the
# next step will likely eat lots of ram.
del obst_roots
del top_roots
gc.collect()
logger.info('Processing %s repositories', len(to_process))
for fullpath, action, repack_level in to_process:
logger.info('%s:', fullpath)
start_size = get_repo_size(fullpath)
checkdelay = frequency if not force else random.randint(1, frequency)
nextcheck = today + datetime.timedelta(days=checkdelay)
# Calculate elapsed seconds
startt = time.time()
# Wait till the repo is available and lock it for the duration of checks,
# otherwise there may be false-positives if a mirrored repo is updated
# in the middle of fsck or repack.
grokmirror.lock_repo(fullpath, nonblocking=False)
if action == 'repack':
if run_git_repack(fullpath, config, repack_level):
status[fullpath]['lastrepack'] = todayiso
if repack_level > 1:
try:
os.unlink(os.path.join(fullpath, 'grokmirror.repack'))
except FileNotFoundError:
pass
status[fullpath]['lastfullrepack'] = todayiso
status[fullpath]['lastcheck'] = todayiso
status[fullpath]['nextcheck'] = nextcheck.strftime('%F')
# Do we need to generate a preload bundle?
if config['fsck'].get('preload_bundle_outdir') and grokmirror.is_obstrepo(fullpath, obstdir):
gen_preload_bundle(fullpath, config)
logger.info(' next: %s', status[fullpath]['nextcheck'])
else:
logger.warning('Repacking %s was unsuccessful', fullpath)
grokmirror.unlock_repo(fullpath)
continue
elif action == 'fsck':
run_git_fsck(fullpath, config, conn_only)
status[fullpath]['lastcheck'] = todayiso
status[fullpath]['nextcheck'] = nextcheck.strftime('%F')
logger.info(' next: %s', status[fullpath]['nextcheck'])
gitdir = '/' + os.path.relpath(fullpath, toplevel)
status[fullpath]['fingerprint'] = grokmirror.get_repo_fingerprint(toplevel, gitdir)
# noinspection PyTypeChecker
elapsed = int(time.time()-startt)
status[fullpath]['s_elapsed'] = elapsed
# We're done with the repo now
grokmirror.unlock_repo(fullpath)
total_checked += 1
total_elapsed += elapsed
saved = start_size - get_repo_size(fullpath)
space_saved += saved
if saved > 0:
logger.info(' done: %ss, %s saved', elapsed, get_human_size(saved))
else:
logger.info(' done: %ss', elapsed)
if space_saved > 0:
logger.info(' ---: %s done, %s queued, %s saved', total_checked,
len(to_process)-total_checked, get_human_size(space_saved))
else:
logger.info(' ---: %s done, %s queued', total_checked, len(to_process)-total_checked)
# Write status file after each check, so if the process dies, we won't
# have to recheck all the repos we've already checked
logger.debug('Updating status file in %s', statusfile)
with open(statusfile, 'w') as stfh:
stfh.write(json.dumps(status, indent=2))
logger.info('Processed %s repos in %0.2fs', total_checked, total_elapsed)
with open(statusfile, 'w') as stfh:
stfh.write(json.dumps(status, indent=2))
lockf(flockh, LOCK_UN)
flockh.close()
def parse_args():
import argparse
# noinspection PyTypeChecker
op = argparse.ArgumentParser(prog='grok-fsck',
description='Optimize and check mirrored repositories',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
op.add_argument('-v', '--verbose', dest='verbose', action='store_true',
default=False,
help='Be verbose and tell us what you are doing')
op.add_argument('-f', '--force', dest='force',
action='store_true', default=False,
help='Force immediate run on all repositories')
op.add_argument('-c', '--config', dest='config',
required=True,
help='Location of the configuration file')
op.add_argument('--repack-only', dest='repack_only',
action='store_true', default=False,
help='Only find and repack repositories that need optimizing')
op.add_argument('--connectivity-only', dest='conn_only',
action='store_true', default=False,
help='Only check connectivity when running fsck checks')
op.add_argument('--repack-all-quick', dest='repack_all_quick',
action='store_true', default=False,
help='(Assumes --force): Do a quick repack of all repos')
op.add_argument('--repack-all-full', dest='repack_all_full',
action='store_true', default=False,
help='(Assumes --force): Do a full repack of all repos')
op.add_argument('--version', action='version', version=grokmirror.VERSION)
opts = op.parse_args()
if opts.repack_all_quick and opts.repack_all_full:
op.error('Pick either --repack-all-full or --repack-all-quick')
return opts
def grok_fsck(cfgfile, verbose=False, force=False, repack_only=False, conn_only=False,
repack_all_quick=False, repack_all_full=False):
global logger
config = grokmirror.load_config_file(cfgfile)
obstdir = config['core'].get('objstore', None)
if obstdir is None:
obstdir = os.path.join(config['core'].get('toplevel'), 'objstore')
config['core']['objstore'] = obstdir
logfile = config['core'].get('log', None)
if config['core'].get('loglevel', 'info') == 'debug':
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
logger = grokmirror.init_logger('fsck', logfile, loglevel, verbose)
rh = io.StringIO()
ch = logging.StreamHandler(stream=rh)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
ch.setLevel(logging.CRITICAL)
logger.addHandler(ch)
fsck_mirror(config, force, repack_only, conn_only, repack_all_quick, repack_all_full)
report = rh.getvalue()
if len(report):
msg = EmailMessage()
msg.set_content(report)
subject = config['fsck'].get('report_subject')
if not subject:
import platform
subject = 'grok-fsck errors on {} ({})'.format(platform.node(), cfgfile)
msg['Subject'] = subject
from_addr = config['fsck'].get('report_from', 'root')
msg['From'] = from_addr
report_to = config['fsck'].get('report_to', 'root')
msg['To'] = report_to
mailhost = config['fsck'].get('report_mailhost', 'localhost')
s = smtplib.SMTP(mailhost)
s.send_message(msg)
s.quit()
def command():
opts = parse_args()
return grok_fsck(opts.config, opts.verbose, opts.force, opts.repack_only, opts.conn_only,
opts.repack_all_quick, opts.repack_all_full)
if __name__ == '__main__':
command()
| gpl-3.0 |
petebachant/scipy | scipy/optimize/zeros.py | 55 | 19069 | from __future__ import division, print_function, absolute_import
import warnings
from . import _zeros
from numpy import finfo, sign, sqrt
_iter = 100
_xtol = 1e-12
_rtol = finfo(float).eps * 2
__all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth']
CONVERGED = 'converged'
SIGNERR = 'sign error'
CONVERR = 'convergence error'
flag_map = {0: CONVERGED, -1: SIGNERR, -2: CONVERR}
class RootResults(object):
""" Represents the root finding result.
Attributes
----------
root : float
Estimated root location.
iterations : int
Number of iterations needed to find the root.
function_calls : int
Number of times the function was called.
converged : bool
True if the routine converged.
flag : str
Description of the cause of termination.
"""
def __init__(self, root, iterations, function_calls, flag):
self.root = root
self.iterations = iterations
self.function_calls = function_calls
self.converged = flag == 0
try:
self.flag = flag_map[flag]
except KeyError:
self.flag = 'unknown error %d' % (flag,)
def results_c(full_output, r):
if full_output:
x, funcalls, iterations, flag = r
results = RootResults(root=x,
iterations=iterations,
function_calls=funcalls,
flag=flag)
return x, results
else:
return r
# Newton-Raphson method
def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
fprime2=None):
"""
Find a zero using the Newton-Raphson or secant method.
Find a zero of the function `func` given a nearby starting point `x0`.
The Newton-Raphson method is used if the derivative `fprime` of `func`
is provided, otherwise the secant method is used. If the second order
derivate `fprime2` of `func` is provided, parabolic Halley's method
is used.
Parameters
----------
func : function
The function whose zero is wanted. It must be a function of a
single variable of the form f(x,a,b,c...), where a,b,c... are extra
arguments that can be passed in the `args` parameter.
x0 : float
An initial estimate of the zero that should be somewhere near the
actual zero.
fprime : function, optional
The derivative of the function when available and convenient. If it
is None (default), then the secant method is used.
args : tuple, optional
Extra arguments to be used in the function call.
tol : float, optional
The allowable error of the zero value.
maxiter : int, optional
Maximum number of iterations.
fprime2 : function, optional
The second order derivative of the function when available and
convenient. If it is None (default), then the normal Newton-Raphson
or the secant method is used. If it is given, parabolic Halley's
method is used.
Returns
-------
zero : float
Estimated location where function is zero.
See Also
--------
brentq, brenth, ridder, bisect
fsolve : find zeroes in n dimensions.
Notes
-----
The convergence rate of the Newton-Raphson method is quadratic,
the Halley method is cubic, and the secant method is
sub-quadratic. This means that if the function is well behaved
the actual error in the estimated zero is approximately the square
(cube for Halley) of the requested tolerance up to roundoff
error. However, the stopping criterion used here is the step size
and there is no guarantee that a zero has been found. Consequently
the result should be verified. Safer algorithms are brentq,
brenth, ridder, and bisect, but they all require that the root
first be bracketed in an interval where the function changes
sign. The brentq algorithm is recommended for general use in one
dimensional problems when such an interval has been found.
"""
if tol <= 0:
raise ValueError("tol too small (%g <= 0)" % tol)
if fprime is not None:
# Newton-Rapheson method
# Multiply by 1.0 to convert to floating point. We don't use float(x0)
# so it still works if x0 is complex.
p0 = 1.0 * x0
fder2 = 0
for iter in range(maxiter):
myargs = (p0,) + args
fder = fprime(*myargs)
if fder == 0:
msg = "derivative was zero."
warnings.warn(msg, RuntimeWarning)
return p0
fval = func(*myargs)
if fprime2 is not None:
fder2 = fprime2(*myargs)
if fder2 == 0:
# Newton step
p = p0 - fval / fder
else:
# Parabolic Halley's method
discr = fder ** 2 - 2 * fval * fder2
if discr < 0:
p = p0 - fder / fder2
else:
p = p0 - 2*fval / (fder + sign(fder) * sqrt(discr))
if abs(p - p0) < tol:
return p
p0 = p
else:
# Secant method
p0 = x0
if x0 >= 0:
p1 = x0*(1 + 1e-4) + 1e-4
else:
p1 = x0*(1 + 1e-4) - 1e-4
q0 = func(*((p0,) + args))
q1 = func(*((p1,) + args))
for iter in range(maxiter):
if q1 == q0:
if p1 != p0:
msg = "Tolerance of %s reached" % (p1 - p0)
warnings.warn(msg, RuntimeWarning)
return (p1 + p0)/2.0
else:
p = p1 - q1*(p1 - p0)/(q1 - q0)
if abs(p - p1) < tol:
return p
p0 = p1
q0 = q1
p1 = p
q1 = func(*((p1,) + args))
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg)
def bisect(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find root of a function within an interval.
Basic bisection routine to find a zero of the function `f` between the
arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.
Slow but sure.
Parameters
----------
f : function
Python function returning a number. `f` must be continuous, and
f(a) and f(b) must have opposite signs.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
xtol : number, optional
The routine converges when a root is known to lie within `xtol` of the
value return. Should be >= 0. The routine modifies this to take into
account the relative precision of doubles.
rtol : number, optional
The routine converges when a root is known to lie within `rtol` times
the value returned of the value returned. Should be >= 0. Defaults to
``np.finfo(float).eps * 2``.
maxiter : number, optional
if convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where x is the root, and r is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
brentq, brenth, bisect, newton
fixed_point : scalar fixed-point finder
fsolve : n-dimensional root-finding
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._bisect(f,a,b,xtol,rtol,maxiter,args,full_output,disp)
return results_c(full_output, r)
def ridder(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a root of a function in an interval.
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
xtol : number, optional
The routine converges when a root is known to lie within xtol of the
value return. Should be >= 0. The routine modifies this to take into
account the relative precision of doubles.
rtol : number, optional
The routine converges when a root is known to lie within `rtol` times
the value returned of the value returned. Should be >= 0. Defaults to
``np.finfo(float).eps * 2``.
maxiter : number, optional
if convergence is not achieved in maxiter iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a RootResults object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence.
In particular, ``r.converged`` is True if the routine converged.
See Also
--------
brentq, brenth, bisect, newton : one-dimensional root-finding
fixed_point : scalar fixed-point finder
Notes
-----
Uses [Ridders1979]_ method to find a zero of the function `f` between the
arguments `a` and `b`. Ridders' method is faster than bisection, but not
generally as fast as the Brent rountines. [Ridders1979]_ provides the
classic description and source of the algorithm. A description can also be
found in any recent edition of Numerical Recipes.
The routine used here diverges slightly from standard presentations in
order to be a bit more careful of tolerance.
References
----------
.. [Ridders1979]
Ridders, C. F. J. "A New Algorithm for Computing a
Single Root of a Real Continuous Function."
IEEE Trans. Circuits Systems 26, 979-980, 1979.
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._ridder(f,a,b,xtol,rtol,maxiter,args,full_output,disp)
return results_c(full_output, r)
def brentq(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a root of a function in given interval.
Return float, a zero of `f` between `a` and `b`. `f` must be a continuous
function, and [a,b] must be a sign changing interval.
Description:
Uses the classic Brent (1973) method to find a zero of the function `f` on
the sign changing interval [a , b]. Generally considered the best of the
rootfinding routines here. It is a safe version of the secant method that
uses inverse quadratic extrapolation. Brent's method combines root
bracketing, interval bisection, and inverse quadratic interpolation. It is
sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973)
claims convergence is guaranteed for functions computable within [a,b].
[Brent1973]_ provides the classic description of the algorithm. Another
description can be found in a recent edition of Numerical Recipes, including
[PressEtal1992]_. Another description is at
http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to
understand the algorithm just by reading our code. Our code diverges a bit
from standard presentations: we choose a different formula for the
extrapolation step.
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
xtol : number, optional
The routine converges when a root is known to lie within xtol of the
value return. Should be >= 0. The routine modifies this to take into
account the relative precision of doubles.
rtol : number, optional
The routine converges when a root is known to lie within `rtol` times
the value returned of the value returned. Should be >= 0. Defaults to
``np.finfo(float).eps * 2``.
maxiter : number, optional
if convergence is not achieved in maxiter iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a RootResults object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
multivariate local optimizers
`fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg`
nonlinear least squares minimizer
`leastsq`
constrained multivariate optimizers
`fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla`
global optimizers
`basinhopping`, `brute`, `differential_evolution`
local scalar minimizers
`fminbound`, `brent`, `golden`, `bracket`
n-dimensional root-finding
`fsolve`
one-dimensional root-finding
`brentq`, `brenth`, `ridder`, `bisect`, `newton`
scalar fixed-point finder
`fixed_point`
Notes
-----
`f` must be continuous. f(a) and f(b) must have opposite signs.
References
----------
.. [Brent1973]
Brent, R. P.,
*Algorithms for Minimization Without Derivatives*.
Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4.
.. [PressEtal1992]
Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T.
*Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed.
Cambridge, England: Cambridge University Press, pp. 352-355, 1992.
Section 9.3: "Van Wijngaarden-Dekker-Brent Method."
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._brentq(f,a,b,xtol,rtol,maxiter,args,full_output,disp)
return results_c(full_output, r)
def brenth(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""Find root of f in [a,b].
A variation on the classic Brent routine to find a zero of the function f
between the arguments a and b that uses hyperbolic extrapolation instead of
inverse quadratic extrapolation. There was a paper back in the 1980's ...
f(a) and f(b) cannot have the same signs. Generally on a par with the
brent routine, but not as heavily tested. It is a safe version of the
secant method that uses hyperbolic extrapolation. The version here is by
Chuck Harris.
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
xtol : number, optional
The routine converges when a root is known to lie within xtol of the
value return. Should be >= 0. The routine modifies this to take into
account the relative precision of doubles.
rtol : number, optional
The routine converges when a root is known to lie within `rtol` times
the value returned of the value returned. Should be >= 0. Defaults to
``np.finfo(float).eps * 2``.
maxiter : number, optional
if convergence is not achieved in maxiter iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a RootResults object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
fmin, fmin_powell, fmin_cg,
fmin_bfgs, fmin_ncg : multivariate local optimizers
leastsq : nonlinear least squares minimizer
fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers
basinhopping, differential_evolution, brute : global optimizers
fminbound, brent, golden, bracket : local scalar minimizers
fsolve : n-dimensional root-finding
brentq, brenth, ridder, bisect, newton : one-dimensional root-finding
fixed_point : scalar fixed-point finder
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._brenth(f,a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r)
| bsd-3-clause |
takeflight/django | django/contrib/sites/tests.py | 16 | 4880 | from __future__ import unicode_literals
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.http import HttpRequest
from django.test import TestCase, modify_settings, override_settings
from . import models
from .middleware import CurrentSiteMiddleware
from .models import clear_site_cache, Site
from .requests import RequestSite
from .shortcuts import get_current_site
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class SitesFrameworkTests(TestCase):
def setUp(self):
self.site = Site(
id=settings.SITE_ID,
domain="example.com",
name="example.com",
)
self.site.save()
def test_save_another(self):
# Regression for #17415
# On some backends the sequence needs reset after save with explicit ID.
# Test that there is no sequence collisions by saving another site.
Site(domain="example2.com", name="example2.com").save()
def test_site_manager(self):
# Make sure that get_current() does not return a deleted Site object.
s = Site.objects.get_current()
self.assertIsInstance(s, Site)
s.delete()
self.assertRaises(ObjectDoesNotExist, Site.objects.get_current)
def test_site_cache(self):
# After updating a Site object (e.g. via the admin), we shouldn't return a
# bogus value from the SITE_CACHE.
site = Site.objects.get_current()
self.assertEqual("example.com", site.name)
s2 = Site.objects.get(id=settings.SITE_ID)
s2.name = "Example site"
s2.save()
site = Site.objects.get_current()
self.assertEqual("Example site", site.name)
def test_delete_all_sites_clears_cache(self):
# When all site objects are deleted the cache should also
# be cleared and get_current() should raise a DoesNotExist.
self.assertIsInstance(Site.objects.get_current(), Site)
Site.objects.all().delete()
self.assertRaises(Site.DoesNotExist, Site.objects.get_current)
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_get_current_site(self):
# Test that the correct Site object is returned
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
site = get_current_site(request)
self.assertIsInstance(site, Site)
self.assertEqual(site.id, settings.SITE_ID)
# Test that an exception is raised if the sites framework is installed
# but there is no matching Site
site.delete()
self.assertRaises(ObjectDoesNotExist, get_current_site, request)
# A RequestSite is returned if the sites framework is not installed
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
site = get_current_site(request)
self.assertIsInstance(site, RequestSite)
self.assertEqual(site.name, "example.com")
@override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com'])
def test_get_current_site_no_site_id(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
del settings.SITE_ID
site = get_current_site(request)
self.assertEqual(site.name, "example.com")
def test_domain_name_with_whitespaces(self):
# Regression for #17320
# Domain names are not allowed contain whitespace characters
site = Site(name="test name", domain="test test")
self.assertRaises(ValidationError, site.full_clean)
site.domain = "test\ttest"
self.assertRaises(ValidationError, site.full_clean)
site.domain = "test\ntest"
self.assertRaises(ValidationError, site.full_clean)
def test_clear_site_cache(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
self.assertEqual(models.SITE_CACHE, {})
get_current_site(request)
expected_cache = {self.site.id: self.site}
self.assertEqual(models.SITE_CACHE, expected_cache)
with self.settings(SITE_ID=''):
get_current_site(request)
expected_cache.update({self.site.domain: self.site})
self.assertEqual(models.SITE_CACHE, expected_cache)
clear_site_cache(Site, instance=self.site)
self.assertEqual(models.SITE_CACHE, {})
class MiddlewareTest(TestCase):
def test_request(self):
""" Makes sure that the request has correct `site` attribute. """
middleware = CurrentSiteMiddleware()
request = HttpRequest()
middleware.process_request(request)
self.assertEqual(request.site.id, settings.SITE_ID)
| bsd-3-clause |
BernardFW/bernard | tests/issue_0014/test_sentence.py | 1 | 1154 | from bernard.i18n.translator import (
Sentence,
SentenceGroup,
SortingDict,
TransItem,
)
def test_sentence():
item1 = TransItem('FOO', 1, 'foo 1', {})
item2 = TransItem('FOO', 1, 'foo 2', {})
s = Sentence()
assert not s.check()
s.append(item1)
s.append(item2)
assert s.check()
assert isinstance(s.render({}), str)
assert s.render({}) in ['foo 1', 'foo 2']
def test_sentence_group():
item1 = TransItem('FOO', 1, 'foo 1', {})
item2 = TransItem('FOO', 2, 'foo 2', {})
sg = SentenceGroup()
assert not sg.check()
sg.append(item2)
assert not sg.check()
sg.append(item1)
assert sg.check()
assert sg.render({}) == ['foo 1', 'foo 2']
def test_sorting_group():
item1 = TransItem('FOO', 1, 'foo 1', {})
item2 = TransItem('FOO', 2, 'foo 2', {})
item3 = TransItem('BAR', 1, 'bar', {})
sd = SortingDict()
assert sd.extract() == {}
sd.append(item1)
assert set(sd.extract().keys()) == {'FOO'}
sd.append(item3)
assert set(sd.extract().keys()) == {'FOO', 'BAR'}
data = sd.extract()
assert data['BAR'].render({}) == ['bar']
| agpl-3.0 |
factorlibre/OCB | addons/l10n_hr/__init__.py | 432 | 1164 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Module: l10n_hr
# Author: Goran Kliska
# mail: goran.kliska(AT)slobodni-programi.hr
# Copyright (C) 2011- Slobodni programi d.o.o., Zagreb
# Contributions:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/webapps/tool_shed/buildapp.py | 1 | 12585 | """
Provides factory methods to assemble the Galaxy web application
"""
import atexit
import config
import logging
import os
import sys
from inspect import isclass
from paste.request import parse_formvars
from paste.util import import_string
from paste import httpexceptions
from galaxy.util import asbool
import pkg_resources
import galaxy.webapps.tool_shed.model
import galaxy.webapps.tool_shed.model.mapping
import galaxy.web.framework.webapp
from galaxy.webapps.tool_shed.framework.middleware import hg
from galaxy import util
log = logging.getLogger( __name__ )
class CommunityWebApplication( galaxy.web.framework.webapp.WebApplication ):
pass
def add_ui_controllers( webapp, app ):
"""
Search for controllers in the 'galaxy.webapps.controllers' module and add
them to the webapp.
"""
from galaxy.web.base.controller import BaseUIController
from galaxy.web.base.controller import ControllerUnavailable
import galaxy.webapps.tool_shed.controllers
controller_dir = galaxy.webapps.tool_shed.controllers.__path__[0]
for fname in os.listdir( controller_dir ):
if not fname.startswith( "_" ) and fname.endswith( ".py" ):
name = fname[:-3]
module_name = "galaxy.webapps.tool_shed.controllers." + name
module = __import__( module_name )
for comp in module_name.split( "." )[1:]:
module = getattr( module, comp )
# Look for a controller inside the modules
for key in dir( module ):
T = getattr( module, key )
if isclass( T ) and T is not BaseUIController and issubclass( T, BaseUIController ):
webapp.add_ui_controller( name, T( app ) )
def app_factory( global_conf, **kwargs ):
"""Return a wsgi application serving the root object"""
# Create the Galaxy tool shed application unless passed in
if 'app' in kwargs:
app = kwargs.pop( 'app' )
else:
try:
from galaxy.webapps.tool_shed.app import UniverseApplication
app = UniverseApplication( global_conf=global_conf, **kwargs )
except:
import traceback, sys
traceback.print_exc()
sys.exit( 1 )
atexit.register( app.shutdown )
# Create the universe WSGI application
webapp = CommunityWebApplication( app, session_cookie='galaxycommunitysession', name="tool_shed" )
add_ui_controllers( webapp, app )
webapp.add_route( '/view/{owner}', controller='repository', action='sharable_owner' )
webapp.add_route( '/view/{owner}/{name}', controller='repository', action='sharable_repository' )
webapp.add_route( '/view/{owner}/{name}/{changeset_revision}', controller='repository', action='sharable_repository_revision' )
# Handle displaying tool help images and README file images for tools contained in repositories.
webapp.add_route( '/repository/static/images/:repository_id/:image_file',
controller='repository',
action='display_image_in_repository',
repository_id=None,
image_file=None )
webapp.add_route( '/:controller/:action', action='index' )
webapp.add_route( '/:action', controller='repository', action='index' )
webapp.add_route( '/repos/*path_info', controller='hg', action='handle_request', path_info='/' )
# Add the web API. # A good resource for RESTful services - http://routes.readthedocs.org/en/latest/restful.html
webapp.add_api_controllers( 'galaxy.webapps.tool_shed.api', app )
webapp.mapper.connect( 'api_key_retrieval',
'/api/authenticate/baseauth/',
controller='authenticate',
action='get_tool_shed_api_key',
conditions=dict( method=[ "GET" ] ) )
webapp.mapper.resource( 'category',
'categories',
controller='categories',
name_prefix='category_',
path_prefix='/api',
parent_resources=dict( member_name='category', collection_name='categories' ) )
webapp.mapper.resource( 'repository',
'repositories',
controller='repositories',
collection={ 'add_repository_registry_entry' : 'POST',
'get_repository_revision_install_info' : 'GET',
'get_ordered_installable_revisions' : 'GET',
'remove_repository_registry_entry' : 'POST',
'repository_ids_for_setting_metadata' : 'GET',
'reset_metadata_on_repositories' : 'POST',
'reset_metadata_on_repository' : 'POST' },
name_prefix='repository_',
path_prefix='/api',
new={ 'import_capsule' : 'POST' },
parent_resources=dict( member_name='repository', collection_name='repositories' ) )
webapp.mapper.resource( 'repository_revision',
'repository_revisions',
member={ 'repository_dependencies' : 'GET',
'export' : 'POST' },
controller='repository_revisions',
name_prefix='repository_revision_',
path_prefix='/api',
parent_resources=dict( member_name='repository_revision', collection_name='repository_revisions' ) )
webapp.mapper.resource( 'user',
'users',
controller='users',
name_prefix='user_',
path_prefix='/api',
parent_resources=dict( member_name='user', collection_name='users' ) )
webapp.finalize_config()
# Wrap the webapp in some useful middleware
if kwargs.get( 'middleware', True ):
webapp = wrap_in_middleware( webapp, global_conf, **kwargs )
if kwargs.get( 'static_enabled', True ):
webapp = wrap_in_static( webapp, global_conf, **kwargs )
# Close any pooled database connections before forking
try:
galaxy.webapps.tool_shed.model.mapping.metadata.engine.connection_provider._pool.dispose()
except:
pass
# Return
return webapp
def wrap_in_middleware( app, global_conf, **local_conf ):
"""Based on the configuration wrap `app` in a set of common and useful middleware."""
# Merge the global and local configurations
conf = global_conf.copy()
conf.update( local_conf )
debug = asbool( conf.get( 'debug', False ) )
# First put into place httpexceptions, which must be most closely
# wrapped around the application (it can interact poorly with
# other middleware):
app = httpexceptions.make_middleware( app, conf )
log.debug( "Enabling 'httpexceptions' middleware" )
# If we're using remote_user authentication, add middleware that
# protects Galaxy from improperly configured authentication in the
# upstream server
if asbool(conf.get( 'use_remote_user', False )):
from galaxy.webapps.tool_shed.framework.middleware.remoteuser import RemoteUser
app = RemoteUser( app, maildomain = conf.get( 'remote_user_maildomain', None ),
display_servers = util.listify( conf.get( 'display_servers', '' ) ),
admin_users = conf.get( 'admin_users', '' ).split( ',' ) )
log.debug( "Enabling 'remote user' middleware" )
# The recursive middleware allows for including requests in other
# requests or forwarding of requests, all on the server side.
if asbool(conf.get('use_recursive', True)):
from paste import recursive
app = recursive.RecursiveMiddleware( app, conf )
log.debug( "Enabling 'recursive' middleware" )
# Various debug middleware that can only be turned on if the debug
# flag is set, either because they are insecure or greatly hurt
# performance
if debug:
# Middleware to check for WSGI compliance
if asbool( conf.get( 'use_lint', True ) ):
from paste import lint
app = lint.make_middleware( app, conf )
log.debug( "Enabling 'lint' middleware" )
# Middleware to run the python profiler on each request
if asbool( conf.get( 'use_profile', False ) ):
import profile
app = profile.ProfileMiddleware( app, conf )
log.debug( "Enabling 'profile' middleware" )
# Middleware that intercepts print statements and shows them on the
# returned page
if asbool( conf.get( 'use_printdebug', True ) ):
from paste.debug import prints
app = prints.PrintDebugMiddleware( app, conf )
log.debug( "Enabling 'print debug' middleware" )
if debug and asbool( conf.get( 'use_interactive', False ) ):
# Interactive exception debugging, scary dangerous if publicly
# accessible, if not enabled we'll use the regular error printing
# middleware.
pkg_resources.require( "WebError" )
from weberror import evalexception
app = evalexception.EvalException( app, conf,
templating_formatters=build_template_error_formatters() )
log.debug( "Enabling 'eval exceptions' middleware" )
else:
# Not in interactive debug mode, just use the regular error middleware
import galaxy.web.framework.middleware.error
app = galaxy.web.framework.middleware.error.ErrorMiddleware( app, conf )
log.debug( "Enabling 'error' middleware" )
# Transaction logging (apache access.log style)
if asbool( conf.get( 'use_translogger', True ) ):
from paste.translogger import TransLogger
app = TransLogger( app )
log.debug( "Enabling 'trans logger' middleware" )
# X-Forwarded-Host handling
from galaxy.web.framework.middleware.xforwardedhost import XForwardedHostMiddleware
app = XForwardedHostMiddleware( app )
log.debug( "Enabling 'x-forwarded-host' middleware" )
app = hg.Hg( app, conf )
log.debug( "Enabling hg middleware" )
return app
def wrap_in_static( app, global_conf, **local_conf ):
from paste.urlmap import URLMap
from galaxy.web.framework.middleware.static import CacheableStaticURLParser as Static
urlmap = URLMap()
# Merge the global and local configurations
conf = global_conf.copy()
conf.update(local_conf)
# Get cache time in seconds
cache_time = conf.get( "static_cache_time", None )
if cache_time is not None:
cache_time = int( cache_time )
# Send to dynamic app by default
urlmap["/"] = app
# Define static mappings from config
urlmap["/static"] = Static( conf.get( "static_dir", "./static/" ), cache_time )
urlmap["/images"] = Static( conf.get( "static_images_dir", "./static/images" ), cache_time )
urlmap["/static/scripts"] = Static( conf.get( "static_scripts_dir", "./static/scripts/" ), cache_time )
urlmap["/static/style"] = Static( conf.get( "static_style_dir", "./static/style/blue" ), cache_time )
urlmap["/favicon.ico"] = Static( conf.get( "static_favicon_dir", "./static/favicon.ico" ), cache_time )
urlmap["/robots.txt"] = Static( conf.get( "static_robots_txt", "./static/robots.txt" ), cache_time )
# URL mapper becomes the root webapp
return urlmap
def build_template_error_formatters():
"""
Build a list of template error formatters for WebError. When an error
occurs, WebError pass the exception to each function in this list until
one returns a value, which will be displayed on the error page.
"""
formatters = []
# Formatter for mako
import mako.exceptions
def mako_html_data( exc_value ):
if isinstance( exc_value, ( mako.exceptions.CompileException, mako.exceptions.SyntaxException ) ):
return mako.exceptions.html_error_template().render( full=False, css=False )
if isinstance( exc_value, AttributeError ) and exc_value.args[0].startswith( "'Undefined' object has no attribute" ):
return mako.exceptions.html_error_template().render( full=False, css=False )
formatters.append( mako_html_data )
return formatters
| gpl-3.0 |
glennw/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/ihatexml.py | 1727 | 16581 | from __future__ import absolute_import, division, unicode_literals
import re
import warnings
from .constants import DataLossWarning
baseChar = """
[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] |
[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] |
[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] |
[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 |
[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] |
[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] |
[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] |
[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] |
[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 |
[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] |
[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] |
[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D |
[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] |
[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] |
[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] |
[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] |
[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] |
[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] |
[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 |
[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] |
[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] |
[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] |
[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] |
[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] |
[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] |
[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] |
[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] |
[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] |
[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] |
[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A |
#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 |
#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] |
#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] |
[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] |
[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C |
#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 |
[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] |
[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] |
[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 |
[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] |
[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B |
#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE |
[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] |
[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 |
[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] |
[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
combiningCharacter = """
[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] |
[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 |
[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] |
[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] |
#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] |
[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] |
[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 |
#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] |
[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC |
[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] |
#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] |
[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] |
[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] |
[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] |
[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] |
[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] |
#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 |
[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] |
#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] |
[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] |
[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] |
#x3099 | #x309A"""
digit = """
[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] |
[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] |
[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] |
[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
extender = """
#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 |
#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
letter = " | ".join([baseChar, ideographic])
# Without the
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
extender])
nameFirst = " | ".join([letter, "_"])
reChar = re.compile(r"#x([\d|A-F]{4,4})")
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
def charStringToList(chars):
charRanges = [item.strip() for item in chars.split(" | ")]
rv = []
for item in charRanges:
foundMatch = False
for regexp in (reChar, reCharRange):
match = regexp.match(item)
if match is not None:
rv.append([hexToInt(item) for item in match.groups()])
if len(rv[-1]) == 1:
rv[-1] = rv[-1] * 2
foundMatch = True
break
if not foundMatch:
assert len(item) == 1
rv.append([ord(item)] * 2)
rv = normaliseCharList(rv)
return rv
def normaliseCharList(charList):
charList = sorted(charList)
for item in charList:
assert item[1] >= item[0]
rv = []
i = 0
while i < len(charList):
j = 1
rv.append(charList[i])
while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1:
rv[-1][1] = charList[i + j][1]
j += 1
i += j
return rv
# We don't really support characters above the BMP :(
max_unicode = int("FFFF", 16)
def missingRanges(charList):
rv = []
if charList[0] != 0:
rv.append([0, charList[0][0] - 1])
for i, item in enumerate(charList[:-1]):
rv.append([item[1] + 1, charList[i + 1][0] - 1])
if charList[-1][1] != max_unicode:
rv.append([charList[-1][1] + 1, max_unicode])
return rv
def listToRegexpStr(charList):
rv = []
for item in charList:
if item[0] == item[1]:
rv.append(escapeRegexp(chr(item[0])))
else:
rv.append(escapeRegexp(chr(item[0])) + "-" +
escapeRegexp(chr(item[1])))
return "[%s]" % "".join(rv)
def hexToInt(hex_str):
return int(hex_str, 16)
def escapeRegexp(string):
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
"[", "]", "|", "(", ")", "-")
for char in specialCharacters:
string = string.replace(char, "\\" + char)
return string
# output from the above
nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
# Simpler things
nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\-\'()+,./:=?;!*#@$_%]")
class InfosetFilter(object):
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
def __init__(self, replaceChars=None,
dropXmlnsLocalName=False,
dropXmlnsAttrNs=False,
preventDoubleDashComments=False,
preventDashAtCommentEnd=False,
replaceFormFeedCharacters=True,
preventSingleQuotePubid=False):
self.dropXmlnsLocalName = dropXmlnsLocalName
self.dropXmlnsAttrNs = dropXmlnsAttrNs
self.preventDoubleDashComments = preventDoubleDashComments
self.preventDashAtCommentEnd = preventDashAtCommentEnd
self.replaceFormFeedCharacters = replaceFormFeedCharacters
self.preventSingleQuotePubid = preventSingleQuotePubid
self.replaceCache = {}
def coerceAttribute(self, name, namespace=None):
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
warnings.warn("Attributes cannot begin with xmlns", DataLossWarning)
return None
elif (self.dropXmlnsAttrNs and
namespace == "http://www.w3.org/2000/xmlns/"):
warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning)
return None
else:
return self.toXmlName(name)
def coerceElement(self, name, namespace=None):
return self.toXmlName(name)
def coerceComment(self, data):
if self.preventDoubleDashComments:
while "--" in data:
warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning)
data = data.replace("--", "- -")
return data
def coerceCharacters(self, data):
if self.replaceFormFeedCharacters:
for i in range(data.count("\x0C")):
warnings.warn("Text cannot contain U+000C", DataLossWarning)
data = data.replace("\x0C", " ")
# Other non-xml characters
return data
def coercePubid(self, data):
dataOutput = data
for char in nonPubidCharRegexp.findall(data):
warnings.warn("Coercing non-XML pubid", DataLossWarning)
replacement = self.getReplacementCharacter(char)
dataOutput = dataOutput.replace(char, replacement)
if self.preventSingleQuotePubid and dataOutput.find("'") >= 0:
warnings.warn("Pubid cannot contain single quote", DataLossWarning)
dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'"))
return dataOutput
def toXmlName(self, name):
nameFirst = name[0]
nameRest = name[1:]
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
if m:
warnings.warn("Coercing non-XML name", DataLossWarning)
nameFirstOutput = self.getReplacementCharacter(nameFirst)
else:
nameFirstOutput = nameFirst
nameRestOutput = nameRest
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
for char in replaceChars:
warnings.warn("Coercing non-XML name", DataLossWarning)
replacement = self.getReplacementCharacter(char)
nameRestOutput = nameRestOutput.replace(char, replacement)
return nameFirstOutput + nameRestOutput
def getReplacementCharacter(self, char):
if char in self.replaceCache:
replacement = self.replaceCache[char]
else:
replacement = self.escapeChar(char)
return replacement
def fromXmlName(self, name):
for item in set(self.replacementRegexp.findall(name)):
name = name.replace(item, self.unescapeChar(item))
return name
def escapeChar(self, char):
replacement = "U%05X" % ord(char)
self.replaceCache[char] = replacement
return replacement
def unescapeChar(self, charcode):
return chr(int(charcode[1:], 16))
| mpl-2.0 |
atosatto/ansible | test/sanity/validate-modules/schema.py | 11 | 4251 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Matt Martz <matt@sivel.net>
# Copyright (C) 2015 Rackspace US, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from voluptuous import PREVENT_EXTRA, Any, Required, Schema
from ansible.module_utils.six import string_types
list_string_types = list(string_types)
suboption_schema = Schema(
{
Required('description'): Any(list_string_types, *string_types),
'required': bool,
'choices': list,
'aliases': Any(list, *string_types),
'version_added': Any(float, *string_types),
'default': Any(None, float, int, bool, list, dict, *string_types),
# Note: Types are strings, not literal bools, such as True or False
'type': Any(None, "bool")
},
extra=PREVENT_EXTRA
)
# This generates list of dicts with keys from string_types and suboption_schema value
# for example in Python 3: {str: suboption_schema}
list_dict_suboption_schema = [{str_type: suboption_schema} for str_type in string_types]
option_schema = Schema(
{
Required('description'): Any(list_string_types, *string_types),
'required': bool,
'choices': list,
'aliases': Any(list, *string_types),
'version_added': Any(float, *string_types),
'default': Any(None, float, int, bool, list, dict, *string_types),
'suboptions': Any(None, *list_dict_suboption_schema),
# Note: Types are strings, not literal bools, such as True or False
'type': Any(None, "bool")
},
extra=PREVENT_EXTRA
)
# This generates list of dicts with keys from string_types and option_schema value
# for example in Python 3: {str: option_schema}
list_dict_option_schema = [{str_type: option_schema} for str_type in string_types]
def doc_schema(module_name):
if module_name.startswith('_'):
module_name = module_name[1:]
return Schema(
{
Required('module'): module_name,
'deprecated': Any(*string_types),
Required('short_description'): Any(*string_types),
Required('description'): Any(list_string_types, *string_types),
Required('version_added'): Any(float, *string_types),
Required('author'): Any(None, list_string_types, *string_types),
'notes': Any(None, list_string_types),
'requirements': list_string_types,
'todo': Any(None, list_string_types, *string_types),
'options': Any(None, *list_dict_option_schema),
'extends_documentation_fragment': Any(list_string_types, *string_types)
},
extra=PREVENT_EXTRA
)
def metadata_schema(deprecated):
valid_status = Any('stableinterface', 'preview', 'deprecated', 'removed')
if deprecated:
valid_status = Any('deprecated')
return Schema(
{
Required('status'): [valid_status],
Required('metadata_version'): '1.0',
Required('supported_by'): Any('core', 'community', 'curated')
}
)
# Things to add soon
####################
# 1) Validate RETURN, including `contains` if `type: complex`
# This will improve documentation, though require fair amount of module tidyup
# Possible Future Enhancements
##############################
# 1) Don't allow empty options for choices, aliases, etc
# 2) If type: bool ensure choices isn't set - perhaps use Exclusive
# 3) both version_added should be quoted floats
# 4) Use Recursive Schema: https://github.com/alecthomas/voluptuous/issues/128 though don't allow two layers
# Tool that takes JSON and generates RETURN skeleton (needs to support complex structures)
| gpl-3.0 |
ClearCorp/odoo-clearcorp | TODO-7.0/account_parser_type/account_parser_type.py | 4 | 1762 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import netsvc
from osv import fields, orm
import tools
from tools.translate import _
from account_banking.parsers import models
def parser_types(*args, **kwargs):
'''Delay evaluation of parser types until start of wizard, to allow
depending modules to initialize and add their parsers to the list
'''
return models.parser_type.get_parser_types()
class BankAccounts(orm.Model):
_inherit = "res.partner.bank"
_columns = {
'parser': fields.selection(
parser_types, 'Parser type',
states={
'ready': [('readonly', True)],
'error': [('readonly', True)],
}, help="Parser type for import bank extract",
),
}
| agpl-3.0 |
pschmitt/home-assistant | homeassistant/components/tahoma/__init__.py | 15 | 5024 | """Support for Tahoma devices."""
from collections import defaultdict
import logging
from requests.exceptions import RequestException
from tahoma_api import Action, TahomaApi
import voluptuous as vol
from homeassistant.const import CONF_EXCLUDE, CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DOMAIN = "tahoma"
TAHOMA_ID_FORMAT = "{}_{}"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_EXCLUDE, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
TAHOMA_COMPONENTS = ["binary_sensor", "cover", "lock", "scene", "sensor", "switch"]
TAHOMA_TYPES = {
"io:AwningValanceIOComponent": "cover",
"io:ExteriorVenetianBlindIOComponent": "cover",
"io:DiscreteGarageOpenerIOComponent": "cover",
"io:DiscreteGarageOpenerWithPartialPositionIOComponent": "cover",
"io:HorizontalAwningIOComponent": "cover",
"io:GarageOpenerIOComponent": "cover",
"io:LightIOSystemSensor": "sensor",
"io:OnOffIOComponent": "switch",
"io:OnOffLightIOComponent": "switch",
"io:RollerShutterGenericIOComponent": "cover",
"io:RollerShutterUnoIOComponent": "cover",
"io:RollerShutterVeluxIOComponent": "cover",
"io:RollerShutterWithLowSpeedManagementIOComponent": "cover",
"io:SomfyBasicContactIOSystemSensor": "sensor",
"io:SomfyContactIOSystemSensor": "sensor",
"io:TemperatureIOSystemSensor": "sensor",
"io:VerticalExteriorAwningIOComponent": "cover",
"io:VerticalInteriorBlindVeluxIOComponent": "cover",
"io:WindowOpenerVeluxIOComponent": "cover",
"opendoors:OpenDoorsSmartLockComponent": "lock",
"rtds:RTDSContactSensor": "sensor",
"rtds:RTDSMotionSensor": "sensor",
"rtds:RTDSSmokeSensor": "smoke",
"rts:BlindRTSComponent": "cover",
"rts:CurtainRTSComponent": "cover",
"rts:DualCurtainRTSComponent": "cover",
"rts:ExteriorVenetianBlindRTSComponent": "cover",
"rts:GarageDoor4TRTSComponent": "switch",
"rts:LightRTSComponent": "switch",
"rts:RollerShutterRTSComponent": "cover",
"rts:OnOffRTSComponent": "switch",
"rts:VenetianBlindRTSComponent": "cover",
"somfythermostat:SomfyThermostatTemperatureSensor": "sensor",
"somfythermostat:SomfyThermostatHumiditySensor": "sensor",
"zwave:OnOffLightZWaveComponent": "switch",
}
def setup(hass, config):
"""Activate Tahoma component."""
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
exclude = conf.get(CONF_EXCLUDE)
try:
api = TahomaApi(username, password)
except RequestException:
_LOGGER.exception("Error when trying to log in to the Tahoma API")
return False
try:
api.get_setup()
devices = api.get_devices()
scenes = api.get_action_groups()
except RequestException:
_LOGGER.exception("Error when getting devices from the Tahoma API")
return False
hass.data[DOMAIN] = {"controller": api, "devices": defaultdict(list), "scenes": []}
for device in devices:
_device = api.get_device(device)
if all(ext not in _device.type for ext in exclude):
device_type = map_tahoma_device(_device)
if device_type is None:
_LOGGER.warning(
"Unsupported type %s for Tahoma device %s",
_device.type,
_device.label,
)
continue
hass.data[DOMAIN]["devices"][device_type].append(_device)
for scene in scenes:
hass.data[DOMAIN]["scenes"].append(scene)
for component in TAHOMA_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
def map_tahoma_device(tahoma_device):
"""Map Tahoma device types to Home Assistant components."""
return TAHOMA_TYPES.get(tahoma_device.type)
class TahomaDevice(Entity):
"""Representation of a Tahoma device entity."""
def __init__(self, tahoma_device, controller):
"""Initialize the device."""
self.tahoma_device = tahoma_device
self.controller = controller
self._name = self.tahoma_device.label
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return {"tahoma_device_id": self.tahoma_device.url}
def apply_action(self, cmd_name, *args):
"""Apply Action to Device."""
action = Action(self.tahoma_device.url)
action.add_command(cmd_name, *args)
self.controller.apply_actions("HomeAssistant", [action])
| apache-2.0 |
funkaoshi/randomcharacter | fifth/backgrounds.py | 3 | 15697 | import random
from fifth.languages import LANGUAGES
from fifth.processor import CharacterProcessor
class AbstractBackground(object):
def name(self):
return self.NAME if hasattr(self, 'NAME') else self.__class__.__name__
def personality_trait(self):
return random.choice(self.PERSONALITY_TRAIT)
def ideal(self):
return random.choice(self.IDEAL)
def flaw(self):
return random.choice(self.FLAW)
def bond(self):
return random.choice(self.BOND)
def specialty(self):
return random.choice(self.SPECIALTY) if hasattr(self, 'SPECIALTY') else ""
def defining_event(self):
return random.choice(self.DEFINING_EVENT) if hasattr(self, 'DEFINING_EVENT') else ""
def proficiencies(self):
return self.PROFICIENCIES
def languages(self):
return []
def equipment(self):
return self.EQUIPMENT
class Acolyte(AbstractBackground):
PERSONALITY_TRAIT = [
"I idolize a particular hero of my faith, and constantly refer to that person's deeds and example.",
"I can find common ground between the fiercest enemies, empathizing with them and always working toward peace.",
"I see omens in every event and action. The gods try to speak to us, we just need to listen",
"Nothing can shake my optimistic attitude.",
"I quote sacred texts and proverbs in almost every situation.",
"I misquote sacred texts and proverbs in almost every situation."
"I am tolerant of other faiths and respect the worship of other gods.",
"I am intolerant of other faiths and condemn the worship of other gods.",
"I've spent so long in the temple that I have little practical experience dealing with people in the outside world.",
"I've enjoyed fine food, drink, and high society among my temple's elite. Rough living grates on me."
]
IDEAL = [
"Tradition. The ancient traditions of worship and sacrifice must be preserved and upheld.",
"Charity. I always try to help those in need, no matter what the personal cost.",
"Power. I hope to one day rise to the top of my faith's religious hierarchy.",
"Aspiration. I seek to prove myself worthy of my god's favour by matching my actions against his or her teachings.",
"Change. We must help bring about the changes the gods are constantly working in the world.",
"Faith. I trust that my deity will guide my actions. I have faith that if I work hard, things will go well.",
]
BOND = [
"I would die to recover an ancient relic of my faith that was lost long ago.",
"I will someday get revenge on the corrupt temple hierarchy who branded me a heretic.",
"I owe my life to the priest who took me in when my parents died.",
"Everything I do is for the common people.",
"I will do anything to protect the temple where I served.",
"I seek to preserve a sacred text that my enemies consider heretical and seek to destroy.",
]
FLAW = [
"I judge others harshly, and myself even more severely.",
"I put too much trust in those who wield power within my temple's hierarchy.",
"My piety sometimes leads me to blindly trust those that profess faith in my god.",
"I am suspicious of strangers and expect the worst of them.",
"Once I pick a goal, I become obsessed with it to the detriment of everything else in my life.",
"I am inflexible in my thinking.",
]
PROFICIENCIES = ['Insight', 'Religion']
def languages(self):
return random.sample(LANGUAGES, 2)
def equipment(self):
return [
'Holy symbol',
random.choice(['Prayer book', 'Prayer wheel']),
'5 sticks of incense',
'Set of common clothes',
'15gp'
]
class Criminal(AbstractBackground):
SPECIALTY = [
"Blackmailer",
"Burglar",
"Enforcer",
"Fence",
"Highway robber",
"Hired killer",
"Pickpocket",
"Smuggler"
]
PERSONALITY_TRAIT = [
"I always have a plan for what to do when things go wrong.",
"I am always calm, no matter what the situation. I never raise my voice or let my emotions control me.",
"The first thing I do in a new place is note the locations of everything valuable—or where such things could be hidden.",
"I would rather make a new friend than a new enemy.",
"I am incredibly slow to trust. Those who seem the fairest often have the most to hide.",
"I don't pay attention to the risks in a situation. Never tell me the odds.",
"The best way to get me to do something is to tell me I can't do it.",
"I blow up at the slightest insult."
]
IDEAL = [
"Honor. I don't steal from others in the trade.",
"Freedom. Chains are meant to be broken, as are those who would forge them.",
"Charity. I steal from the wealthy so that I can help people in need.",
"Greed. I will do whatever it takes to become wealthy.",
"People. I'm loyal to my friends, not to any ideals, and everyone else can take a trip down the Styx for all I care.",
"Redemption. There's a spark of good in everyone."
]
BOND = [
"I'm trying to pay off an old debt I owe to a generous benefactor.",
"My ill-gotten gains go to support my family.",
"Something important was taken from me, and I aim to steal it back.",
"I will become the greatest thief that ever lived.",
"I'm guilty of a terrible crime. I hope I can redeem myself for it.",
"Someone I loved died because of I mistake I made. That will never happen again."
]
FLAW = [
"When I see something valuable, I can't think about anything but how to steal it.",
"When faced with a choice between money and my friends, I usually choose the money.",
"If there's a plan, I'll forget it. If I don't forget it, I'll ignore it.",
"I have a 'tell' that reveals when I'm lying.",
"I turn tail and run when things look bad.",
"An innocent person is in prison for a crime that I committed. I'm okay with that."
]
PROFICIENCIES = ['Deception', 'Stealth']
EQUIPMENT = ['crowbar' 'dark clothes with hood', '15gp']
class FolkHero(AbstractBackground):
NAME = "Folk Hero"
DEFINING_EVENT = [
"I stood up to a tyrant's agents.",
"I saved people during a natural disaster.",
"I stood alone against a terrible monster.",
"I stole from a corrupt merchant to help the poor.",
"I led a militia to fight off an invading army.",
"I broke into a tyrant's castle and stole weapons to arm the people.",
"I trained the peasantry to use farm implements as weapons against a tyrant's soldiers.",
"A lord rescinded an unpopular decree after I led a symbolic act of protest against it.",
"A celestial, fey, or similar creature gave me a blessing or revealed my secret origin.",
"Recruited into a lord's army, I rose to leadership and was commended for my heroism."
]
PERSONALITY_TRAIT = [
"I judge people by their actions, not their words.",
"If someone is in trouble, I'm always ready to lend help.",
"When I set my mind to something, I follow through no matter what gets in my way.",
"I have a strong sense of fair play and always try to find the most equitable solution to arguments.",
"I'm confident in my own abilities and do what I can to instill confidence in others.",
"Thinking is for other people. I prefer action.",
"I misuse long words in an attempt to sound smarter.",
"I get bored easily. When am I going to get on with my destiny?"
]
IDEAL = [
"Respect. People deserve to be treated with dignity and respect.",
"Fairness. No one should get preferential treatment before the law, and no one is above the law.",
"Freedom. Tyrants must not be allowed to oppress the people.",
"Might. If I become strong, I can take what I want—what I deserve.",
"Sincerity. There's no good in pretending to be something I'm not.",
"Destiny. Nothing and no one can steer me away from my higher calling."
]
BOND = [
"I have a family, but I have no idea where they are. One day, I hope to see them again.",
"I worked the land, I love the land, and I will protect the land.",
"A proud noble once gave me a horrible beating, and I will take my revenge on any bully I encounter.",
"My tools are symbols of my past life, and I carry them so that I will never forget my roots.",
"I protect those who cannot protect themselves.",
"I wish my childhood sweetheart had come with me to pursue my destiny."
]
FLAW = [
"The tyrant who rules my land will stop at nothing to see me killed.",
"I'm convinced of the significance of my destiny, and blind to my shortcomings and the risk of failure.",
"The people who knew me when I was young know my shameful secret, so I can never go home again.",
"I have a weakness for the vices of the city, especially hard drink.",
"Secretly, I believe that things would be better if I were a tyrant lording over the land.",
"I have trouble trusting in my allies."
]
PROFICIENCIES = ["Animal Handling", "Survival"]
EQUIPMENT = ["shovel", "iron pot", "common clothes", "10gp"]
class Sage(AbstractBackground):
SPECIALTY = [
"Alchemist",
"Astronomer",
"Discredited academic",
"Librarian",
"Professor",
"Researcher",
"Wizard's apprentice",
"Scribe"
]
PERSONALITY_TRAIT = [
"I use polysyllabic words that convey the impression of great erudition.",
"I've read every book in the world's greatest libraries— or I like to boast that I have.",
"I'm used to helping out those who aren't as smart as I am, and I patiently explain anything and everything to others.",
"There's nothing I like more than a good mystery. ",
"I'm willing to listen to every side of an argument before I make my own judgment.",
"I . . . speak . . . slowly . . . when talking . . . to idiots, . . . which . . . almost . . . everyone . . . is . . . compared . . . to me.",
"I am horribly, horribly awkward in social situations.",
"I'm convinced that people are always trying to steal my secrets."
]
IDEAL = [
"Knowledge. The path to power and self-improvement is through knowledge.",
"Beauty. What is beautiful points us beyond itself toward what is true.",
"Logic. Emotions must not cloud our logical thinking.",
"No Limits. Nothing should fetter the infinite possibility inherent in all existence.",
"Power. Knowledge is the path to power and domination.",
"Self-Improvement. The goal of a life of study is the betterment of oneself."
]
BOND = [
"It is my duty to protect my students.",
"I have an ancient text that holds terrible secrets that must not fall into the wrong hands.",
"I work to preserve a library, university, scriptorium, or monastery.",
"My life's work is a series of tomes related to a specific field of lore.",
"I've been searching my whole life for the answer to a certain question.",
"I sold my soul for knowledge. I hope to do great deeds and win it back."
]
FLAW = [
"I am easily distracted by the promise of information.",
"Most people scream and run when they see a demon. I stop and take notes on its anatomy.",
"Unlocking an ancient mystery is worth the price of a civilization.",
"I overlook obvious solutions in favor of complicated ones.",
"I speak without really thinking through my words, invariably insulting others.",
"I can't keep a secret to save my life, or anyone else's."
]
PROFICIENCIES = ['Arcana', 'History']
EQUIPMENT = [
"bottle of black ink",
"quill",
"small knife",
"a set of common clothes",
"letter from dead colleague posing a question you have not been able to answer"
]
def languages(self):
return random.sample(LANGUAGES, 2)
class Soldier(AbstractBackground):
SPECIALTY = [
"Officer",
"Scout",
"Infantry",
"Cavalry",
"Healer",
"Quartermaster",
"Standard bearer"
]
PERSONALITY_TRAIT = [
"I'm always polite and respectful.",
"I'm haunted by memories of war. I can't get the images of violence out of my mind.",
"I've lost too many friends, and I'm slow to make new ones.",
"I'm full of inspiring and cautionary tales from my military experience relevant to almost every combat situation.",
"I can stare down a hell hound without flinching.",
"I enjoy being strong and like breaking things.",
"I have a crude sense of humour.",
"I face problems head-on. A simple, direct solution is the best path to success."
]
IDEAL = [
"Greater Good. Our lot is to lay down our lives in defence of others.",
"Responsibility. I do what I must and obey just authority.",
"Independence. When people follow orders blindly, they embrace a kind of tyranny.",
"Might. In life as in war, the stronger force wins.",
"Live and Let Live. IDEALs aren't worth killing over or going to war for.",
"Nation. My city, nation, or people are all that matter."
]
BOND = [
"I would still lay down my life for the people I served with.",
"Someone saved my life on the battlefield. To this day, I will never leave a friend behind.",
"My honour is my life.",
"I'll never forget the crushing defeat my company suffered or the enemies who dealt it.",
"Those who fight beside me are those worth dying for.",
"I fight for those who cannot fight for themselves."
]
FLAW = [
"The monstrous enemy we faced in battle still leaves me quivering with fear.",
"I have little respect for anyone who is not a proven warrior.",
"I made a terrible mistake in battle that cost many lives—and I would do anything to keep that mistake secret.",
"My hatred of my enemies is blind and unreasoning.",
"I obey the law, even if the law causes misery.",
"I'd rather eat my armour than admit when I'm wrong."
]
PROFICIENCIES = ['Athletics', 'Intimidation']
def equipment(self):
return [
"insignia of rank",
"%s from fallen enemy" % random.choice(['dagger', 'broken blade', 'banner']),
random.choice(["set of bone dice", "deck of cards"]),
"common clothes",
"10gp"
]
class Background(CharacterProcessor):
def process(self):
# , Soldier
Background = random.choice([Acolyte, Criminal, FolkHero, Sage])
bg = Background()
self.character.background = bg.specialty() or bg.name()
self.character.defining_event = bg.defining_event()
self.character.personality_trait = bg.personality_trait()
self.character.ideal = bg.ideal()
self.character.bond = bg.bond()
self.character.flaw = bg.flaw()
self.character.proficiencies.union(bg.proficiencies())
self.character.equipment.extend(bg.equipment())
self.character.languages.union(bg.languages())
| mit |
odoomrp/server-tools | mass_editing/models/mass_object.py | 52 | 5755 | # -*- coding: utf-8 -*-
##############################################################################
#
# This module uses OpenERP, Open Source Management Solution Framework.
# Copyright (C):
# 2012-Today Serpent Consulting Services (<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import orm, fields
from openerp.tools.translate import _
class MassObject(orm.Model):
_name = "mass.object"
_columns = {
'name': fields.char("Name", size=64, required=True, select=1),
'model_id': fields.many2one(
'ir.model', 'Model', required=True, select=1),
'field_ids': fields.many2many(
'ir.model.fields', 'mass_field_rel', 'mass_id', 'field_id',
'Fields'),
'ref_ir_act_window': fields.many2one(
'ir.actions.act_window', 'Sidebar Action', readonly=True,
help="Sidebar action to make this template available on records \
of the related document model"),
'ref_ir_value': fields.many2one(
'ir.values', 'Sidebar Button', readonly=True,
help="Sidebar button to open the sidebar action"),
'model_ids': fields.many2many('ir.model', string='Model List')
}
_sql_constraints = [
('name_uniq', 'unique (name)', _('Name must be unique!')),
]
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
if context is None:
context = {}
if not model_id:
return {'value': {'model_ids': [(6, 0, [])]}}
model_ids = [model_id]
model_obj = self.pool['ir.model']
active_model_obj = self.pool.get(model_obj.browse(
cr, uid, model_id).model)
if active_model_obj._inherits:
for key, val in active_model_obj._inherits.items():
found_model_ids = model_obj.search(
cr, uid, [('model', '=', key)], context=context)
model_ids += found_model_ids
return {'value': {'model_ids': [(6, 0, model_ids)]}}
def create_action(self, cr, uid, ids, context=None):
vals = {}
action_obj = self.pool['ir.actions.act_window']
ir_values_obj = self.pool['ir.values']
for data in self.browse(cr, uid, ids, context=context):
src_obj = data.model_id.model
button_name = _('Mass Editing (%s)') % data.name
vals['ref_ir_act_window'] = action_obj.create(
cr, SUPERUSER_ID,
{
'name': button_name,
'type': 'ir.actions.act_window',
'res_model': 'mass.editing.wizard',
'src_model': src_obj,
'view_type': 'form',
'context': "{'mass_editing_object' : %d}" % (data.id),
'view_mode': 'form,tree',
'target': 'new',
'auto_refresh': 1,
},
context)
vals['ref_ir_value'] = ir_values_obj.create(
cr, SUPERUSER_ID,
{
'name': button_name,
'model': src_obj,
'key2': 'client_action_multi',
'value': (
"ir.actions.act_window," +
str(vals['ref_ir_act_window'])),
'object': True,
},
context)
self.write(
cr, uid, ids,
{
'ref_ir_act_window': vals.get('ref_ir_act_window', False),
'ref_ir_value': vals.get('ref_ir_value', False),
},
context)
return True
def unlink_action(self, cr, uid, ids, context=None):
for template in self.browse(cr, uid, ids, context=context):
try:
if template.ref_ir_act_window:
act_window_obj = self.pool['ir.actions.act_window']
act_window_obj.unlink(
cr, SUPERUSER_ID, [template.ref_ir_act_window.id],
context=context)
if template.ref_ir_value:
ir_values_obj = self.pool['ir.values']
ir_values_obj.unlink(
cr, SUPERUSER_ID, template.ref_ir_value.id,
context=context)
except:
raise orm.except_orm(
_("Warning"),
_("Deletion of the action record failed."))
return True
def unlink(self, cr, uid, ids, context=None):
self.unlink_action(cr, uid, ids, context=context)
return super(MassObject, self).unlink(cr, uid, ids, context=context)
def copy(self, cr, uid, record_id, default=None, context=None):
if default is None:
default = {}
default.update({'name': '', 'field_ids': []})
return super(MassObject, self).copy(
cr, uid, record_id, default, context=context)
| agpl-3.0 |
geekboxzone/mmallow_external_deqp | scripts/testset.py | 2 | 7773 | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import sys
import random
import string
import subprocess
from optparse import OptionParser
def all (results, predicate):
for result in results:
if not predicate(result):
return False
return True
def any (results, predicate):
for result in results:
if predicate(result):
return True
return False
class FilterRule:
def __init__ (self, name, description, filters):
self.name = name
self.description = description
self.filters = filters
class TestCaseResult:
def __init__ (self, name, results):
self.name = name
self.results = results
class Group:
def __init__ (self, name):
self.name = name
self.cases = []
def readCaseList (filename):
f = open(filename, 'rb')
cases = []
for line in f:
if line[:6] == "TEST: ":
case = line[6:].strip()
if len(case) > 0:
cases.append(case)
return cases
def toResultList (caselist):
results = []
for case in caselist:
results.append(TestCaseResult(case, []))
return results
def addResultsToCaseList (caselist, results):
resultMap = {}
caseListRes = toResultList(caselist)
for res in caseListRes:
resultMap[res.name] = res
for result in results:
if result.name in resultMap:
resultMap[result.name].results += result.results
return caseListRes
def readTestResults (filename):
f = open(filename, 'rb')
csvData = f.read()
csvLines = csvData.splitlines()
results = []
f.close()
for line in csvLines[1:]:
args = line.split(',')
if len(args) == 1:
continue # Ignore
results.append(TestCaseResult(args[0], args[1:]))
if len(results) == 0:
raise Exception("Empty result list")
# Sanity check for results
numResultItems = len(results[0].results)
seenResults = set()
for result in results:
if result.name in seenResults:
raise Exception("Duplicate result row for test case '%s'" % result.name)
if len(result.results) != numResultItems:
raise Exception("Found %d results for test case '%s', expected %d" % (len(result.results), result.name, numResultItems))
seenResults.add(result.name)
return results
def readGroupList (filename):
f = open(filename, 'rb')
groups = []
for line in f:
group = line.strip()
if group != "":
groups.append(group)
return groups
def createGroups (results, groupNames):
groups = []
matched = set()
for groupName in groupNames:
group = Group(groupName)
groups.append(group)
prefix = groupName + "."
prefixLen = len(prefix)
for case in results:
if case.name[:prefixLen] == prefix:
if case in matched:
die("Case '%s' matched by multiple groups (when processing '%s')" % (case.name, group.name))
group.cases.append(case)
matched.add(case)
return groups
def createLeafGroups (results):
groups = []
groupMap = {}
for case in results:
parts = case.name.split('.')
groupName = string.join(parts[:-1], ".")
if not groupName in groupMap:
group = Group(groupName)
groups.append(group)
groupMap[groupName] = group
else:
group = groupMap[groupName]
group.cases.append(case)
return groups
def filterList (results, condition):
filtered = []
for case in results:
if condition(case.results):
filtered.append(case)
return filtered
def getFilter (list, name):
for filter in list:
if filter.name == name:
return filter
return None
def getNumCasesInGroups (groups):
numCases = 0
for group in groups:
numCases += len(group.cases)
return numCases
def getCasesInSet (results, caseSet):
filtered = []
for case in results:
if case in caseSet:
filtered.append(case)
return filtered
def selectCasesInGroups (results, groups):
casesInGroups = set()
for group in groups:
for case in group.cases:
casesInGroups.add(case)
return getCasesInSet(results, casesInGroups)
def selectRandomSubset (results, groups, limit, seed):
selectedCases = set()
numSelect = min(limit, getNumCasesInGroups(groups))
random.seed(seed)
random.shuffle(groups)
groupNdx = 0
while len(selectedCases) < numSelect:
group = groups[groupNdx]
if len(group.cases) == 0:
del groups[groupNdx]
if groupNdx == len(groups):
groupNdx -= 1
continue # Try next
selected = random.choice(group.cases)
selectedCases.add(selected)
group.cases.remove(selected)
groupNdx = (groupNdx + 1) % len(groups)
return getCasesInSet(results, selectedCases)
def die (msg):
print msg
sys.exit(-1)
# Named filter lists
FILTER_RULES = [
FilterRule("all", "No filtering", []),
FilterRule("all-pass", "All results must be 'Pass'", [lambda l: all(l, lambda r: r == 'Pass')]),
FilterRule("any-pass", "Any of results is 'Pass'", [lambda l: any(l, lambda r: r == 'Pass')]),
FilterRule("any-fail", "Any of results is not 'Pass' or 'NotSupported'", [lambda l: not all(l, lambda r: r == 'Pass' or r == 'NotSupported')]),
FilterRule("prev-failing", "Any except last result is failure", [lambda l: l[-1] == 'Pass' and not all(l[:-1], lambda r: r == 'Pass')]),
FilterRule("prev-passing", "Any except last result is 'Pass'", [lambda l: l[-1] != 'Pass' and any(l[:-1], lambda r: r == 'Pass')])
]
if __name__ == "__main__":
parser = OptionParser(usage = "usage: %prog [options] [caselist] [result csv file]")
parser.add_option("-f", "--filter", dest="filter", default="all", help="filter rule name")
parser.add_option("-l", "--list", action="store_true", dest="list", default=False, help="list available rules")
parser.add_option("-n", "--num", dest="limit", default=0, help="limit number of cases")
parser.add_option("-s", "--seed", dest="seed", default=0, help="use selected seed for random selection")
parser.add_option("-g", "--groups", dest="groups_file", default=None, help="select cases based on group list file")
(options, args) = parser.parse_args()
if options.list:
print "Available filter rules:"
for filter in FILTER_RULES:
print " %s: %s" % (filter.name, filter.description)
sys.exit(0)
if len(args) == 0:
die("No input files specified")
elif len(args) > 2:
die("Too many arguments")
# Fetch filter
filter = getFilter(FILTER_RULES, options.filter)
if filter == None:
die("Unknown filter '%s'" % options.filter)
# Read case list
caselist = readCaseList(args[0])
if len(args) > 1:
results = readTestResults(args[1])
results = addResultsToCaseList(caselist, results)
else:
results = toResultList(caselist)
# Execute filters for results
for rule in filter.filters:
results = filterList(results, rule)
if options.limit != 0:
if options.groups_file != None:
groups = createGroups(results, readGroupList(options.groups_file))
else:
groups = createLeafGroups(results)
results = selectRandomSubset(results, groups, int(options.limit), int(options.seed))
elif options.groups_file != None:
groups = createGroups(results, readGroupList(options.groups_file))
results = selectCasesInGroups(results, groups)
# Print test set
for result in results:
print result.name
| apache-2.0 |
trivoldus28/pulsarch-verilog | tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/test/test_univnewlines.py | 9 | 3434 | # Tests universal newline support for both reading and parsing files.
import unittest
import os
import sys
from test import test_support
if not hasattr(sys.stdin, 'newlines'):
raise test_support.TestSkipped, \
"This Python does not have universal newline support"
FATX = 'x' * (2**14)
DATA_TEMPLATE = [
"line1=1",
"line2='this is a very long line designed to go past the magic " +
"hundred character limit that is inside fileobject.c and which " +
"is meant to speed up the common case, but we also want to test " +
"the uncommon case, naturally.'",
"def line3():pass",
"line4 = '%s'" % FATX,
]
DATA_LF = "\n".join(DATA_TEMPLATE) + "\n"
DATA_CR = "\r".join(DATA_TEMPLATE) + "\r"
DATA_CRLF = "\r\n".join(DATA_TEMPLATE) + "\r\n"
# Note that DATA_MIXED also tests the ability to recognize a lone \r
# before end-of-file.
DATA_MIXED = "\n".join(DATA_TEMPLATE) + "\r"
DATA_SPLIT = [x + "\n" for x in DATA_TEMPLATE]
del x
class TestGenericUnivNewlines(unittest.TestCase):
# use a class variable DATA to define the data to write to the file
# and a class variable NEWLINE to set the expected newlines value
READMODE = 'U'
WRITEMODE = 'wb'
def setUp(self):
fp = open(test_support.TESTFN, self.WRITEMODE)
fp.write(self.DATA)
fp.close()
def tearDown(self):
try:
os.unlink(test_support.TESTFN)
except:
pass
def test_read(self):
fp = open(test_support.TESTFN, self.READMODE)
data = fp.read()
self.assertEqual(data, DATA_LF)
self.assertEqual(`fp.newlines`, `self.NEWLINE`)
def test_readlines(self):
fp = open(test_support.TESTFN, self.READMODE)
data = fp.readlines()
self.assertEqual(data, DATA_SPLIT)
self.assertEqual(`fp.newlines`, `self.NEWLINE`)
def test_readline(self):
fp = open(test_support.TESTFN, self.READMODE)
data = []
d = fp.readline()
while d:
data.append(d)
d = fp.readline()
self.assertEqual(data, DATA_SPLIT)
self.assertEqual(`fp.newlines`, `self.NEWLINE`)
def test_seek(self):
fp = open(test_support.TESTFN, self.READMODE)
fp.readline()
pos = fp.tell()
data = fp.readlines()
self.assertEqual(data, DATA_SPLIT[1:])
fp.seek(pos)
data = fp.readlines()
self.assertEqual(data, DATA_SPLIT[1:])
def test_execfile(self):
namespace = {}
execfile(test_support.TESTFN, namespace)
func = namespace['line3']
self.assertEqual(func.func_code.co_firstlineno, 3)
self.assertEqual(namespace['line4'], FATX)
class TestNativeNewlines(TestGenericUnivNewlines):
NEWLINE = None
DATA = DATA_LF
READMODE = 'r'
WRITEMODE = 'w'
class TestCRNewlines(TestGenericUnivNewlines):
NEWLINE = '\r'
DATA = DATA_CR
class TestLFNewlines(TestGenericUnivNewlines):
NEWLINE = '\n'
DATA = DATA_LF
class TestCRLFNewlines(TestGenericUnivNewlines):
NEWLINE = '\r\n'
DATA = DATA_CRLF
class TestMixedNewlines(TestGenericUnivNewlines):
NEWLINE = ('\r', '\n')
DATA = DATA_MIXED
def test_main():
test_support.run_unittest(
TestNativeNewlines,
TestCRNewlines,
TestLFNewlines,
TestCRLFNewlines,
TestMixedNewlines
)
if __name__ == '__main__':
test_main()
| gpl-2.0 |
AccelAI/accel.ai | flask-aws/lib/python2.7/site-packages/sqlalchemy/testing/suite/test_reflection.py | 54 | 19895 |
import sqlalchemy as sa
from sqlalchemy import exc as sa_exc
from sqlalchemy import types as sql_types
from sqlalchemy import inspect
from sqlalchemy import MetaData, Integer, String
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.testing import engines, fixtures
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.testing import eq_, assert_raises_message
from sqlalchemy import testing
from .. import config
import operator
from sqlalchemy.schema import DDL, Index
from sqlalchemy import event
metadata, users = None, None
class HasTableTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('test_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50))
)
def test_has_table(self):
with config.db.begin() as conn:
assert config.db.dialect.has_table(conn, "test_table")
assert not config.db.dialect.has_table(conn, "nonexistent_table")
class ComponentReflectionTest(fixtures.TablesTest):
run_inserts = run_deletes = None
__backend__ = True
@classmethod
def define_tables(cls, metadata):
cls.define_reflected_tables(metadata, None)
if testing.requires.schemas.enabled:
cls.define_reflected_tables(metadata, "test_schema")
@classmethod
def define_reflected_tables(cls, metadata, schema):
if schema:
schema_prefix = schema + "."
else:
schema_prefix = ""
if testing.requires.self_referential_foreign_keys.enabled:
users = Table('users', metadata,
Column('user_id', sa.INT, primary_key=True),
Column('test1', sa.CHAR(5), nullable=False),
Column('test2', sa.Float(5), nullable=False),
Column('parent_user_id', sa.Integer,
sa.ForeignKey('%susers.user_id' %
schema_prefix)),
schema=schema,
test_needs_fk=True,
)
else:
users = Table('users', metadata,
Column('user_id', sa.INT, primary_key=True),
Column('test1', sa.CHAR(5), nullable=False),
Column('test2', sa.Float(5), nullable=False),
schema=schema,
test_needs_fk=True,
)
Table("dingalings", metadata,
Column('dingaling_id', sa.Integer, primary_key=True),
Column('address_id', sa.Integer,
sa.ForeignKey('%semail_addresses.address_id' %
schema_prefix)),
Column('data', sa.String(30)),
schema=schema,
test_needs_fk=True,
)
Table('email_addresses', metadata,
Column('address_id', sa.Integer),
Column('remote_user_id', sa.Integer,
sa.ForeignKey(users.c.user_id)),
Column('email_address', sa.String(20)),
sa.PrimaryKeyConstraint('address_id', name='email_ad_pk'),
schema=schema,
test_needs_fk=True,
)
if testing.requires.index_reflection.enabled:
cls.define_index(metadata, users)
if testing.requires.view_column_reflection.enabled:
cls.define_views(metadata, schema)
@classmethod
def define_index(cls, metadata, users):
Index("users_t_idx", users.c.test1, users.c.test2)
Index("users_all_idx", users.c.user_id, users.c.test2, users.c.test1)
@classmethod
def define_views(cls, metadata, schema):
for table_name in ('users', 'email_addresses'):
fullname = table_name
if schema:
fullname = "%s.%s" % (schema, table_name)
view_name = fullname + '_v'
query = "CREATE VIEW %s AS SELECT * FROM %s" % (
view_name, fullname)
event.listen(
metadata,
"after_create",
DDL(query)
)
event.listen(
metadata,
"before_drop",
DDL("DROP VIEW %s" % view_name)
)
@testing.requires.schema_reflection
def test_get_schema_names(self):
insp = inspect(testing.db)
self.assert_('test_schema' in insp.get_schema_names())
@testing.requires.schema_reflection
def test_dialect_initialize(self):
engine = engines.testing_engine()
assert not hasattr(engine.dialect, 'default_schema_name')
inspect(engine)
assert hasattr(engine.dialect, 'default_schema_name')
@testing.requires.schema_reflection
def test_get_default_schema_name(self):
insp = inspect(testing.db)
eq_(insp.default_schema_name, testing.db.dialect.default_schema_name)
@testing.provide_metadata
def _test_get_table_names(self, schema=None, table_type='table',
order_by=None):
meta = self.metadata
users, addresses, dingalings = self.tables.users, \
self.tables.email_addresses, self.tables.dingalings
insp = inspect(meta.bind)
if table_type == 'view':
table_names = insp.get_view_names(schema)
table_names.sort()
answer = ['email_addresses_v', 'users_v']
eq_(sorted(table_names), answer)
else:
table_names = insp.get_table_names(schema,
order_by=order_by)
if order_by == 'foreign_key':
answer = ['users', 'email_addresses', 'dingalings']
eq_(table_names, answer)
else:
answer = ['dingalings', 'email_addresses', 'users']
eq_(sorted(table_names), answer)
@testing.requires.table_reflection
def test_get_table_names(self):
self._test_get_table_names()
@testing.requires.table_reflection
@testing.requires.foreign_key_constraint_reflection
def test_get_table_names_fks(self):
self._test_get_table_names(order_by='foreign_key')
@testing.requires.table_reflection
@testing.requires.schemas
def test_get_table_names_with_schema(self):
self._test_get_table_names('test_schema')
@testing.requires.view_column_reflection
def test_get_view_names(self):
self._test_get_table_names(table_type='view')
@testing.requires.view_column_reflection
@testing.requires.schemas
def test_get_view_names_with_schema(self):
self._test_get_table_names('test_schema', table_type='view')
@testing.requires.table_reflection
@testing.requires.view_column_reflection
def test_get_tables_and_views(self):
self._test_get_table_names()
self._test_get_table_names(table_type='view')
def _test_get_columns(self, schema=None, table_type='table'):
meta = MetaData(testing.db)
users, addresses, dingalings = self.tables.users, \
self.tables.email_addresses, self.tables.dingalings
table_names = ['users', 'email_addresses']
if table_type == 'view':
table_names = ['users_v', 'email_addresses_v']
insp = inspect(meta.bind)
for table_name, table in zip(table_names, (users,
addresses)):
schema_name = schema
cols = insp.get_columns(table_name, schema=schema_name)
self.assert_(len(cols) > 0, len(cols))
# should be in order
for i, col in enumerate(table.columns):
eq_(col.name, cols[i]['name'])
ctype = cols[i]['type'].__class__
ctype_def = col.type
if isinstance(ctype_def, sa.types.TypeEngine):
ctype_def = ctype_def.__class__
# Oracle returns Date for DateTime.
if testing.against('oracle') and ctype_def \
in (sql_types.Date, sql_types.DateTime):
ctype_def = sql_types.Date
# assert that the desired type and return type share
# a base within one of the generic types.
self.assert_(len(set(ctype.__mro__).
intersection(ctype_def.__mro__).
intersection([
sql_types.Integer,
sql_types.Numeric,
sql_types.DateTime,
sql_types.Date,
sql_types.Time,
sql_types.String,
sql_types._Binary,
])) > 0, '%s(%s), %s(%s)' %
(col.name, col.type, cols[i]['name'], ctype))
if not col.primary_key:
assert cols[i]['default'] is None
@testing.requires.table_reflection
def test_get_columns(self):
self._test_get_columns()
@testing.provide_metadata
def _type_round_trip(self, *types):
t = Table('t', self.metadata,
*[
Column('t%d' % i, type_)
for i, type_ in enumerate(types)
]
)
t.create()
return [
c['type'] for c in
inspect(self.metadata.bind).get_columns('t')
]
@testing.requires.table_reflection
def test_numeric_reflection(self):
for typ in self._type_round_trip(
sql_types.Numeric(18, 5),
):
assert isinstance(typ, sql_types.Numeric)
eq_(typ.precision, 18)
eq_(typ.scale, 5)
@testing.requires.table_reflection
def test_varchar_reflection(self):
typ = self._type_round_trip(sql_types.String(52))[0]
assert isinstance(typ, sql_types.String)
eq_(typ.length, 52)
@testing.requires.table_reflection
@testing.provide_metadata
def test_nullable_reflection(self):
t = Table('t', self.metadata,
Column('a', Integer, nullable=True),
Column('b', Integer, nullable=False))
t.create()
eq_(
dict(
(col['name'], col['nullable'])
for col in inspect(self.metadata.bind).get_columns('t')
),
{"a": True, "b": False}
)
@testing.requires.table_reflection
@testing.requires.schemas
def test_get_columns_with_schema(self):
self._test_get_columns(schema='test_schema')
@testing.requires.view_column_reflection
def test_get_view_columns(self):
self._test_get_columns(table_type='view')
@testing.requires.view_column_reflection
@testing.requires.schemas
def test_get_view_columns_with_schema(self):
self._test_get_columns(schema='test_schema', table_type='view')
@testing.provide_metadata
def _test_get_pk_constraint(self, schema=None):
meta = self.metadata
users, addresses = self.tables.users, self.tables.email_addresses
insp = inspect(meta.bind)
users_cons = insp.get_pk_constraint(users.name, schema=schema)
users_pkeys = users_cons['constrained_columns']
eq_(users_pkeys, ['user_id'])
addr_cons = insp.get_pk_constraint(addresses.name, schema=schema)
addr_pkeys = addr_cons['constrained_columns']
eq_(addr_pkeys, ['address_id'])
with testing.requires.reflects_pk_names.fail_if():
eq_(addr_cons['name'], 'email_ad_pk')
@testing.requires.primary_key_constraint_reflection
def test_get_pk_constraint(self):
self._test_get_pk_constraint()
@testing.requires.table_reflection
@testing.requires.primary_key_constraint_reflection
@testing.requires.schemas
def test_get_pk_constraint_with_schema(self):
self._test_get_pk_constraint(schema='test_schema')
@testing.requires.table_reflection
@testing.provide_metadata
def test_deprecated_get_primary_keys(self):
meta = self.metadata
users = self.tables.users
insp = Inspector(meta.bind)
assert_raises_message(
sa_exc.SADeprecationWarning,
"Call to deprecated method get_primary_keys."
" Use get_pk_constraint instead.",
insp.get_primary_keys, users.name
)
@testing.provide_metadata
def _test_get_foreign_keys(self, schema=None):
meta = self.metadata
users, addresses, dingalings = self.tables.users, \
self.tables.email_addresses, self.tables.dingalings
insp = inspect(meta.bind)
expected_schema = schema
# users
if testing.requires.self_referential_foreign_keys.enabled:
users_fkeys = insp.get_foreign_keys(users.name,
schema=schema)
fkey1 = users_fkeys[0]
with testing.requires.named_constraints.fail_if():
self.assert_(fkey1['name'] is not None)
eq_(fkey1['referred_schema'], expected_schema)
eq_(fkey1['referred_table'], users.name)
eq_(fkey1['referred_columns'], ['user_id', ])
if testing.requires.self_referential_foreign_keys.enabled:
eq_(fkey1['constrained_columns'], ['parent_user_id'])
# addresses
addr_fkeys = insp.get_foreign_keys(addresses.name,
schema=schema)
fkey1 = addr_fkeys[0]
with testing.requires.named_constraints.fail_if():
self.assert_(fkey1['name'] is not None)
eq_(fkey1['referred_schema'], expected_schema)
eq_(fkey1['referred_table'], users.name)
eq_(fkey1['referred_columns'], ['user_id', ])
eq_(fkey1['constrained_columns'], ['remote_user_id'])
@testing.requires.foreign_key_constraint_reflection
def test_get_foreign_keys(self):
self._test_get_foreign_keys()
@testing.requires.foreign_key_constraint_reflection
@testing.requires.schemas
def test_get_foreign_keys_with_schema(self):
self._test_get_foreign_keys(schema='test_schema')
@testing.provide_metadata
def _test_get_indexes(self, schema=None):
meta = self.metadata
users, addresses, dingalings = self.tables.users, \
self.tables.email_addresses, self.tables.dingalings
# The database may decide to create indexes for foreign keys, etc.
# so there may be more indexes than expected.
insp = inspect(meta.bind)
indexes = insp.get_indexes('users', schema=schema)
expected_indexes = [
{'unique': False,
'column_names': ['test1', 'test2'],
'name': 'users_t_idx'},
{'unique': False,
'column_names': ['user_id', 'test2', 'test1'],
'name': 'users_all_idx'}
]
index_names = [d['name'] for d in indexes]
for e_index in expected_indexes:
assert e_index['name'] in index_names
index = indexes[index_names.index(e_index['name'])]
for key in e_index:
eq_(e_index[key], index[key])
@testing.requires.index_reflection
def test_get_indexes(self):
self._test_get_indexes()
@testing.requires.index_reflection
@testing.requires.schemas
def test_get_indexes_with_schema(self):
self._test_get_indexes(schema='test_schema')
@testing.requires.unique_constraint_reflection
def test_get_unique_constraints(self):
self._test_get_unique_constraints()
@testing.requires.unique_constraint_reflection
@testing.requires.schemas
def test_get_unique_constraints_with_schema(self):
self._test_get_unique_constraints(schema='test_schema')
@testing.provide_metadata
def _test_get_unique_constraints(self, schema=None):
uniques = sorted(
[
{'name': 'unique_a', 'column_names': ['a']},
{'name': 'unique_a_b_c', 'column_names': ['a', 'b', 'c']},
{'name': 'unique_c_a_b', 'column_names': ['c', 'a', 'b']},
{'name': 'unique_asc_key', 'column_names': ['asc', 'key']},
],
key=operator.itemgetter('name')
)
orig_meta = self.metadata
table = Table(
'testtbl', orig_meta,
Column('a', sa.String(20)),
Column('b', sa.String(30)),
Column('c', sa.Integer),
# reserved identifiers
Column('asc', sa.String(30)),
Column('key', sa.String(30)),
schema=schema
)
for uc in uniques:
table.append_constraint(
sa.UniqueConstraint(*uc['column_names'], name=uc['name'])
)
orig_meta.create_all()
inspector = inspect(orig_meta.bind)
reflected = sorted(
inspector.get_unique_constraints('testtbl', schema=schema),
key=operator.itemgetter('name')
)
for orig, refl in zip(uniques, reflected):
eq_(orig, refl)
@testing.provide_metadata
def _test_get_view_definition(self, schema=None):
meta = self.metadata
users, addresses, dingalings = self.tables.users, \
self.tables.email_addresses, self.tables.dingalings
view_name1 = 'users_v'
view_name2 = 'email_addresses_v'
insp = inspect(meta.bind)
v1 = insp.get_view_definition(view_name1, schema=schema)
self.assert_(v1)
v2 = insp.get_view_definition(view_name2, schema=schema)
self.assert_(v2)
@testing.requires.view_reflection
def test_get_view_definition(self):
self._test_get_view_definition()
@testing.requires.view_reflection
@testing.requires.schemas
def test_get_view_definition_with_schema(self):
self._test_get_view_definition(schema='test_schema')
@testing.only_on("postgresql", "PG specific feature")
@testing.provide_metadata
def _test_get_table_oid(self, table_name, schema=None):
meta = self.metadata
users, addresses, dingalings = self.tables.users, \
self.tables.email_addresses, self.tables.dingalings
insp = inspect(meta.bind)
oid = insp.get_table_oid(table_name, schema)
self.assert_(isinstance(oid, int))
def test_get_table_oid(self):
self._test_get_table_oid('users')
@testing.requires.schemas
def test_get_table_oid_with_schema(self):
self._test_get_table_oid('users', schema='test_schema')
@testing.requires.table_reflection
@testing.provide_metadata
def test_autoincrement_col(self):
"""test that 'autoincrement' is reflected according to sqla's policy.
Don't mark this test as unsupported for any backend !
(technically it fails with MySQL InnoDB since "id" comes before "id2")
A backend is better off not returning "autoincrement" at all,
instead of potentially returning "False" for an auto-incrementing
primary key column.
"""
meta = self.metadata
insp = inspect(meta.bind)
for tname, cname in [
('users', 'user_id'),
('email_addresses', 'address_id'),
('dingalings', 'dingaling_id'),
]:
cols = insp.get_columns(tname)
id_ = dict((c['name'], c) for c in cols)[cname]
assert id_.get('autoincrement', True)
__all__ = ('ComponentReflectionTest', 'HasTableTest')
| mit |
ujjwalwahi/odoo | openerp/addons/base/module/report/__init__.py | 463 | 1089 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ir_module_reference_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tisba/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/exitfuncs.py | 61 | 2402 | """SCons.exitfuncs
Register functions which are executed when SCons exits for any reason.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/exitfuncs.py 5134 2010/08/16 23:02:40 bdeegan"
_exithandlers = []
def _run_exitfuncs():
"""run any registered exit functions
_exithandlers is traversed in reverse order so functions are executed
last in, first out.
"""
while _exithandlers:
func, targs, kargs = _exithandlers.pop()
func(*targs, **kargs)
def register(func, *targs, **kargs):
"""register a function to be executed upon normal program termination
func - function to be called at exit
targs - optional arguments to pass to func
kargs - optional keyword arguments to pass to func
"""
_exithandlers.append((func, targs, kargs))
import sys
try:
x = sys.exitfunc
# if x isn't our own exit func executive, assume it's another
# registered exit function - append it to our list...
if x != _run_exitfuncs:
register(x)
except AttributeError:
pass
# make our exit function get run by python when it exits:
sys.exitfunc = _run_exitfuncs
del sys
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
shanemcd/ansible | lib/ansible/modules/network/netvisor/pn_ospf.py | 72 | 8856 | #!/usr/bin/python
""" PN-CLI vrouter-ospf-add/remove """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_ospf
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
short_description: CLI command to add/remove ospf protocol to a vRouter.
description:
- Execute vrouter-ospf-add, vrouter-ospf-remove command.
- This command adds/removes Open Shortest Path First(OSPF) routing
protocol to a virtual router(vRouter) service.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
state:
description:
- Assert the state of the ospf. Use 'present' to add ospf
and 'absent' to remove ospf.
required: True
default: present
choices: ['present', 'absent']
pn_vrouter_name:
description:
- Specify the name of the vRouter.
required: True
pn_network_ip:
description:
- Specify the network IP (IPv4 or IPv6) address.
required: True
pn_ospf_area:
description:
- Stub area number for the configuration. Required for vrouter-ospf-add.
"""
EXAMPLES = """
- name: "Add OSPF to vrouter"
pn_ospf:
state: present
pn_vrouter_name: name-string
pn_network_ip: 192.168.11.2/24
pn_ospf_area: 1.0.0.0
- name: "Remove OSPF from vrouter"
pn_ospf:
state: absent
pn_vrouter_name: name-string
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the ospf command.
returned: always
type: list
stderr:
description: The set of error responses from the ospf command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
VROUTER_EXISTS = None
NETWORK_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks if vRouter exists on the target node.
This method also checks for idempotency using the vrouter-ospf-show command.
If the given vRouter exists, return VROUTER_EXISTS as True else False.
If an OSPF network with the given ip exists on the given vRouter,
return NETWORK_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VROUTER_EXISTS, NETWORK_EXISTS
"""
vrouter_name = module.params['pn_vrouter_name']
network_ip = module.params['pn_network_ip']
# Global flags
global VROUTER_EXISTS, NETWORK_EXISTS
# Check for vRouter
check_vrouter = cli + ' vrouter-show format name no-show-headers '
check_vrouter = shlex.split(check_vrouter)
out = module.run_command(check_vrouter)[1]
out = out.split()
if vrouter_name in out:
VROUTER_EXISTS = True
else:
VROUTER_EXISTS = False
# Check for OSPF networks
show = cli + ' vrouter-ospf-show vrouter-name %s ' % vrouter_name
show += 'format network no-show-headers'
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
if network_ip in out:
NETWORK_EXISTS = True
else:
NETWORK_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-ospf-add'
if state == 'absent':
command = 'vrouter-ospf-remove'
return command
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(type='str', default='present', choices=['present',
'absent']),
pn_vrouter_name=dict(required=True, type='str'),
pn_network_ip=dict(required=True, type='str'),
pn_ospf_area=dict(type='str')
),
required_if=(
['state', 'present',
['pn_network_ip', 'pn_ospf_area']],
['state', 'absent', ['pn_network_ip']]
)
)
# Accessing the arguments
state = module.params['state']
vrouter_name = module.params['pn_vrouter_name']
network_ip = module.params['pn_network_ip']
ospf_area = module.params['pn_ospf_area']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
check_cli(module, cli)
if state == 'present':
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NETWORK_EXISTS is True:
module.exit_json(
skipped=True,
msg=('OSPF with network ip %s already exists on %s'
% (network_ip, vrouter_name))
)
cli += (' %s vrouter-name %s network %s ospf-area %s'
% (command, vrouter_name, network_ip, ospf_area))
if state == 'absent':
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NETWORK_EXISTS is False:
module.exit_json(
skipped=True,
msg=('OSPF with network ip %s already exists on %s'
% (network_ip, vrouter_name))
)
cli += (' %s vrouter-name %s network %s'
% (command, vrouter_name, network_ip))
run_cli(module, cli)
# AnsibleModule boilerplate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
BrainPad/FindYourCandy | webapp/candysorter/ext/google/cloud/ml/training.py | 1 | 2357 | # Copyright 2017 BrainPad Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function, unicode_literals
class ScaleTier(object):
BASIC = 'BASIC'
STANDARD_1 = 'STANDARD_1'
PREMIUM_1 = 'PREMIUM_1'
BASIC_GPU = 'BASIC_GPU'
CUSTOM = 'CUSTOM'
class TrainingInput(object):
def __init__(self, package_uris, python_module, scale_tier=ScaleTier.BASIC,
region='us-central1'):
self.package_uris = package_uris
self.python_module = python_module
self.scale_tier = scale_tier
self.region = region
self._properties = {}
@classmethod
def from_api_repr(cls, resource):
training_input = cls(package_uris=resource.get('packageUris'),
python_module=resource.get('pythonModule'),
scale_tier=resource.get('scaleTier'),
region=resource.get('region'))
if 'args' in resource:
training_input._properties['args'] = resource['args']
return training_input
def to_api_repr(self):
resource = {
'scaleTier': self.scale_tier,
'packageUris': self.package_uris,
'pythonModule': self.python_module,
'region': self.region,
}
_args = self._properties.get('args')
if _args is not None:
resource['args'] = _args
return resource
@property
def args(self, value):
return self._properties.get('args')
@args.setter
def args(self, value):
self._properties['args'] = value
def with_args(self, *args):
_args = self._properties.setdefault('args', [])
_args.extend(args)
| apache-2.0 |
dharmabumstead/ansible | test/units/module_utils/basic/test_deprecate_warn.py | 127 | 1596 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import json
import pytest
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_warn(am, capfd):
am.warn('warning1')
with pytest.raises(SystemExit):
am.exit_json(warnings=['warning2'])
out, err = capfd.readouterr()
assert json.loads(out)['warnings'] == ['warning1', 'warning2']
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_deprecate(am, capfd):
am.deprecate('deprecation1')
am.deprecate('deprecation2', '2.3')
with pytest.raises(SystemExit):
am.exit_json(deprecations=['deprecation3', ('deprecation4', '2.4')])
out, err = capfd.readouterr()
output = json.loads(out)
assert ('warnings' not in output or output['warnings'] == [])
assert output['deprecations'] == [
{u'msg': u'deprecation1', u'version': None},
{u'msg': u'deprecation2', u'version': '2.3'},
{u'msg': u'deprecation3', u'version': None},
{u'msg': u'deprecation4', u'version': '2.4'},
]
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_deprecate_without_list(am, capfd):
with pytest.raises(SystemExit):
am.exit_json(deprecations='Simple deprecation warning')
out, err = capfd.readouterr()
output = json.loads(out)
assert ('warnings' not in output or output['warnings'] == [])
assert output['deprecations'] == [
{u'msg': u'Simple deprecation warning', u'version': None},
]
| gpl-3.0 |
LockScreen/Backend | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/chardetect.py | 1786 | 2504 | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from io import open
from chardet import __version__
from chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
'''
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
| mit |
leilihh/cinder | cinder/tests/unit/test_v7000_fcp.py | 15 | 20809 | # Copyright 2015 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Violin Memory 7000 Series All-Flash Array Fibrechannel Driver
"""
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_vmem_client as vmemclient
from cinder.volume import configuration as conf
from cinder.volume.drivers.violin import v7000_common
from cinder.volume.drivers.violin import v7000_fcp
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
VOLUME = {
"name": "volume-" + VOLUME_ID,
"id": VOLUME_ID,
"display_name": "fake_volume",
"size": 2,
"host": "myhost",
"volume_type": None,
"volume_type_id": None,
}
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
SNAPSHOT = {
"name": "snapshot-" + SNAPSHOT_ID,
"id": SNAPSHOT_ID,
"volume_id": VOLUME_ID,
"volume_name": "volume-" + VOLUME_ID,
"volume_size": 2,
"display_name": "fake_snapshot",
"volume": VOLUME,
}
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
SRC_VOL = {
"name": "volume-" + SRC_VOL_ID,
"id": SRC_VOL_ID,
"display_name": "fake_src_vol",
"size": 2,
"host": "myhost",
"volume_type": None,
"volume_type_id": None,
}
INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
CONNECTOR = {
"initiator": INITIATOR_IQN,
"host": "irrelevant",
'wwpns': ['50014380186b3f65', '50014380186b3f67'],
}
FC_TARGET_WWPNS = [
'31000024ff45fb22', '21000024ff45fb23',
'51000024ff45f1be', '41000024ff45f1bf'
]
FC_INITIATOR_WWPNS = [
'50014380186b3f65', '50014380186b3f67'
]
FC_FABRIC_MAP = {
'fabricA':
{'target_port_wwn_list': [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[0]]},
'fabricB':
{'target_port_wwn_list': [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]],
'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[1]]}
}
FC_INITIATOR_TARGET_MAP = {
FC_INITIATOR_WWPNS[0]: [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
FC_INITIATOR_WWPNS[1]: [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]]
}
PHY_DEVICES_RESPONSE = {
'data':
{'physical_devices':
[{'availsize': 1099504287744,
'availsize_mb': 524284,
'category': 'Virtual Device',
'connection_type': 'block',
'firmware': 'v1.0',
'guid': '3cc4d6dd-166d-77d2-4967-00005463f597',
'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN01 v1.0',
'is_foreign': True,
'name': 'BKSC:OTHDISK-MFCN01.000',
'object_id': '84b834fb-1f4d-5d3b-b7ae-5796f9868151',
'owner': 'example.com',
'pool': None,
'product': 'OTHDISK-MFCN01',
'scsi_address':
{'adapter': '98',
'channel': '0',
'id': '0',
'lun': '0',
'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'},
'size': 1099504287744,
'size_mb': 1048569,
'type': 'Direct-Access',
'usedsize': 0,
'usedsize_mb': 0,
'vendor': 'BKSC',
'wwid': 'BKSC OTHDISK-MFCN01 v1.0-0-0-00'},
{'availsize': 1099504287744,
'availsize_mb': 524284,
'category': 'Virtual Device',
'connection_type': 'block',
'firmware': 'v1.0',
'guid': '283b2694-192b-4745-6768-00005463f673',
'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN08 v1.0',
'is_foreign': False,
'name': 'BKSC:OTHDISK-MFCN08.000',
'object_id': '8555b888-bf43-5083-a433-f0c7b0282370',
'owner': 'example.com',
'pool':
{'name': 'mga-pool',
'object_id': '0818d3de-4437-535f-9cac-cc100a2c9313'},
'product': 'OTHDISK-MFCN08',
'scsi_address':
{'adapter': '98',
'channel': '0',
'id': '11',
'lun': '0',
'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'},
'size': 1099504287744,
'size_mb': 1048569,
'type': 'Direct-Access',
'usedsize': 0,
'usedsize_mb': 0,
'vendor': 'BKSC',
'wwid': 'BKSC OTHDISK-MFCN08 v1.0-0-0-00'},
{'availsize': 1099504287744,
'availsize_mb': 1048569,
'category': 'Virtual Device',
'connection_type': 'block',
'firmware': 'v1.0',
'guid': '7f47db19-019c-707d-0df1-00005463f949',
'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN09 v1.0',
'is_foreign': False,
'name': 'BKSC:OTHDISK-MFCN09.000',
'object_id': '62a98898-f8b8-5837-af2b-764f5a72e291',
'owner': 'a.b.c.d',
'pool':
{'name': 'mga-pool',
'object_id': '0818d3de-4437-535f-9cac-cc100a2c9313'},
'product': 'OTHDISK-MFCN09',
'scsi_address':
{'adapter': '98',
'channel': '0',
'id': '12',
'lun': '0',
'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'},
'size': 1099504287744,
'size_mb': 524284,
'type': 'Direct-Access',
'usedsize': 0,
'usedsize_mb': 0,
'vendor': 'BKSC',
'wwid': 'BKSC OTHDISK-MFCN09 v1.0-0-0-00'}],
'total_physical_devices': 3},
'msg': 'Successful',
'success': True
}
# The FC_INFO dict returned by the backend is keyed on
# object_id of the FC adapter and the values are the
# wwmns
FC_INFO = {
'1a3cdb6a-383d-5ba6-a50b-4ba598074510': ['2100001b9745e25e'],
'4a6bc10a-5547-5cc0-94f2-76222a8f8dff': ['2100001b9745e230'],
'b21bfff5-d89e-51ff-9920-d990a061d722': ['2100001b9745e25f'],
'b508cc6b-f78a-51f9-81cf-47c1aaf53dd1': ['2100001b9745e231']
}
CLIENT_INFO = {
'FCPolicy':
{'AS400enabled': False,
'VSAenabled': False,
'initiatorWWPNList': ['50-01-43-80-18-6b-3f-66',
'50-01-43-80-18-6b-3f-64']},
'FibreChannelDevices':
[{'access': 'ReadWrite',
'id': 'v0000004',
'initiatorWWPN': '*',
'lun': '8',
'name': 'abcdabcd-1234-abcd-1234-abcdeffedcba',
'sizeMB': 10240,
'targetWWPN': '*',
'type': 'SAN'}]
}
CLIENT_INFO1 = {
'FCPolicy':
{'AS400enabled': False,
'VSAenabled': False,
'initiatorWWPNList': ['50-01-43-80-18-6b-3f-66',
'50-01-43-80-18-6b-3f-64']},
'FibreChannelDevices': []
}
class V7000FCPDriverTestCase(test.TestCase):
"""Test cases for VMEM FCP driver."""
def setUp(self):
super(V7000FCPDriverTestCase, self).setUp()
self.conf = self.setup_configuration()
self.driver = v7000_fcp.V7000FCPDriver(configuration=self.conf)
self.driver.common.container = 'myContainer'
self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022'
self.driver.gateway_fc_wwns = FC_TARGET_WWPNS
self.stats = {}
self.driver.set_initialized()
def tearDown(self):
super(V7000FCPDriverTestCase, self).tearDown()
def setup_configuration(self):
config = mock.Mock(spec=conf.Configuration)
config.volume_backend_name = 'v7000_fcp'
config.san_ip = '8.8.8.8'
config.san_login = 'admin'
config.san_password = ''
config.san_thin_provision = False
config.san_is_local = False
config.request_timeout = 300
config.container = 'myContainer'
return config
def setup_mock_concerto(self, m_conf=None):
"""Create a fake Concerto communication object."""
_m_concerto = mock.Mock(name='Concerto',
version='1.1.1',
spec=vmemclient.mock_client_conf)
if m_conf:
_m_concerto.configure_mock(**m_conf)
return _m_concerto
@mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error')
def test_check_for_setup_error(self, m_setup_func):
"""No setup errors are found."""
result = self.driver.check_for_setup_error()
m_setup_func.assert_called_with()
self.assertIsNone(result)
@mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error')
def test_check_for_setup_error_no_wwn_config(self, m_setup_func):
"""No wwns were found during setup."""
self.driver.gateway_fc_wwns = []
failure = exception.ViolinInvalidBackendConfig
self.assertRaises(failure, self.driver.check_for_setup_error)
def test_create_volume(self):
"""Volume created successfully."""
self.driver.common._create_lun = mock.Mock()
result = self.driver.create_volume(VOLUME)
self.driver.common._create_lun.assert_called_with(VOLUME)
self.assertIsNone(result)
def test_create_volume_from_snapshot(self):
self.driver.common._create_volume_from_snapshot = mock.Mock()
result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT)
self.driver.common._create_volume_from_snapshot.assert_called_with(
SNAPSHOT, VOLUME)
self.assertIsNone(result)
def test_create_cloned_volume(self):
self.driver.common._create_lun_from_lun = mock.Mock()
result = self.driver.create_cloned_volume(VOLUME, SRC_VOL)
self.driver.common._create_lun_from_lun.assert_called_with(
SRC_VOL, VOLUME)
self.assertIsNone(result)
def test_delete_volume(self):
"""Volume deleted successfully."""
self.driver.common._delete_lun = mock.Mock()
result = self.driver.delete_volume(VOLUME)
self.driver.common._delete_lun.assert_called_with(VOLUME)
self.assertIsNone(result)
def test_extend_volume(self):
"""Volume extended successfully."""
new_size = 10
self.driver.common._extend_lun = mock.Mock()
result = self.driver.extend_volume(VOLUME, new_size)
self.driver.common._extend_lun.assert_called_with(VOLUME, new_size)
self.assertIsNone(result)
def test_create_snapshot(self):
self.driver.common._create_lun_snapshot = mock.Mock()
result = self.driver.create_snapshot(SNAPSHOT)
self.driver.common._create_lun_snapshot.assert_called_with(SNAPSHOT)
self.assertIsNone(result)
def test_delete_snapshot(self):
self.driver.common._delete_lun_snapshot = mock.Mock()
result = self.driver.delete_snapshot(SNAPSHOT)
self.driver.common._delete_lun_snapshot.assert_called_with(SNAPSHOT)
self.assertIsNone(result)
def test_get_volume_stats(self):
self.driver._update_volume_stats = mock.Mock()
self.driver._update_volume_stats()
result = self.driver.get_volume_stats(True)
self.driver._update_volume_stats.assert_called_with()
self.assertEqual(self.driver.stats, result)
@mock.patch('socket.gethostbyaddr')
def test_update_volume_stats(self, mock_gethost):
"""Test Update Volume Stats.
Makes a mock query to the backend to collect stats on all physical
devices.
"""
def gethostbyaddr(addr):
if addr == '8.8.8.8' or addr == 'example.com':
return ('example.com', [], ['8.8.8.8'])
else:
return ('a.b.c.d', [], addr)
mock_gethost.side_effect = gethostbyaddr
backend_name = self.conf.volume_backend_name
vendor_name = "Violin Memory, Inc."
tot_gb = 2046
free_gb = 1022
phy_devices = "/batch/physicalresource/physicaldevice"
conf = {
'basic.get.side_effect': [PHY_DEVICES_RESPONSE, ],
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
result = self.driver._update_volume_stats()
calls = [mock.call(phy_devices)]
self.driver.common.vmem_mg.basic.get.assert_has_calls(calls)
self.assertEqual(tot_gb, self.driver.stats['total_capacity_gb'])
self.assertEqual(free_gb, self.driver.stats['free_capacity_gb'])
self.assertEqual(backend_name,
self.driver.stats['volume_backend_name'])
self.assertEqual(vendor_name, self.driver.stats['vendor_name'])
self.assertIsNone(result)
def test_get_active_fc_targets(self):
"""Test Get Active FC Targets.
Makes a mock query to the backend to collect all the physical
adapters and extract the WWNs.
"""
conf = {
'adapter.get_fc_info.return_value': FC_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
result = self.driver._get_active_fc_targets()
self.assertEqual(['2100001b9745e230', '2100001b9745e25f',
'2100001b9745e231', '2100001b9745e25e'],
result)
def test_initialize_connection(self):
lun_id = 1
target_wwns = self.driver.gateway_fc_wwns
init_targ_map = {}
conf = {
'client.create_client.return_value': None,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._export_lun = mock.Mock(return_value=lun_id)
self.driver._build_initiator_target_map = mock.Mock(
return_value=(target_wwns, init_targ_map))
props = self.driver.initialize_connection(VOLUME, CONNECTOR)
self.driver.common.vmem_mg.client.create_client.assert_called_with(
name=CONNECTOR['host'], proto='FC', fc_wwns=CONNECTOR['wwpns'])
self.driver._export_lun.assert_called_with(VOLUME, CONNECTOR)
self.driver._build_initiator_target_map.assert_called_with(
CONNECTOR)
self.assertEqual("fibre_channel", props['driver_volume_type'])
self.assertEqual(True, props['data']['target_discovered'])
self.assertEqual(self.driver.gateway_fc_wwns,
props['data']['target_wwn'])
self.assertEqual(lun_id, props['data']['target_lun'])
def test_terminate_connection(self):
target_wwns = self.driver.gateway_fc_wwns
init_targ_map = {}
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver._unexport_lun = mock.Mock()
self.driver._is_initiator_connected_to_array = mock.Mock(
return_value=False)
self.driver._build_initiator_target_map = mock.Mock(
return_value=(target_wwns, init_targ_map))
props = self.driver.terminate_connection(VOLUME, CONNECTOR)
self.driver._unexport_lun.assert_called_with(VOLUME, CONNECTOR)
self.driver._is_initiator_connected_to_array.assert_called_with(
CONNECTOR)
self.driver._build_initiator_target_map.assert_called_with(
CONNECTOR)
self.assertEqual("fibre_channel", props['driver_volume_type'])
self.assertEqual(target_wwns, props['data']['target_wwn'])
self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
def test_export_lun(self):
lun_id = '1'
response = {'success': True, 'msg': 'Assign SAN client successfully'}
conf = {
'client.get_client_info.return_value': CLIENT_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver.common._send_cmd_and_verify = mock.Mock(
return_value=response)
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
result = self.driver._export_lun(VOLUME, CONNECTOR)
self.driver.common._send_cmd_and_verify.assert_called_with(
self.driver.common.vmem_mg.lun.assign_lun_to_client,
self.driver._is_lun_id_ready,
'Assign SAN client successfully',
[VOLUME['id'], CONNECTOR['host'], "ReadWrite"],
[VOLUME['id'], CONNECTOR['host']])
self.driver._get_lun_id.assert_called_with(
VOLUME['id'], CONNECTOR['host'])
self.assertEqual(lun_id, result)
def test_export_lun_fails_with_exception(self):
lun_id = '1'
response = {'status': False, 'msg': 'Generic error'}
failure = exception.ViolinBackendErr
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver.common._send_cmd_and_verify = mock.Mock(
side_effect=exception.ViolinBackendErr(response['msg']))
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
self.assertRaises(failure, self.driver._export_lun, VOLUME, CONNECTOR)
def test_unexport_lun(self):
response = {'success': True, 'msg': 'Unassign SAN client successfully'}
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver.common._send_cmd = mock.Mock(
return_value=response)
result = self.driver._unexport_lun(VOLUME, CONNECTOR)
self.driver.common._send_cmd.assert_called_with(
self.driver.common.vmem_mg.lun.unassign_client_lun,
"Unassign SAN client successfully",
VOLUME['id'], CONNECTOR['host'], True)
self.assertIsNone(result)
def test_get_lun_id(self):
conf = {
'client.get_client_info.return_value': CLIENT_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
result = self.driver._get_lun_id(VOLUME['id'], CONNECTOR['host'])
self.assertEqual(8, result)
def test_is_lun_id_ready(self):
lun_id = '1'
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
result = self.driver._is_lun_id_ready(
VOLUME['id'], CONNECTOR['host'])
self.assertTrue(result)
def test_build_initiator_target_map(self):
"""Successfully build a map when zoning is enabled."""
expected_targ_wwns = FC_TARGET_WWPNS
self.driver.lookup_service = mock.Mock()
(self.driver.lookup_service.get_device_mapping_from_network.
return_value) = FC_FABRIC_MAP
result = self.driver._build_initiator_target_map(CONNECTOR)
(targ_wwns, init_targ_map) = result
(self.driver.lookup_service.get_device_mapping_from_network.
assert_called_with(CONNECTOR['wwpns'], self.driver.gateway_fc_wwns))
self.assertEqual(set(expected_targ_wwns), set(targ_wwns))
i = FC_INITIATOR_WWPNS[0]
self.assertIn(FC_TARGET_WWPNS[0], init_targ_map[i])
self.assertIn(FC_TARGET_WWPNS[1], init_targ_map[i])
self.assertEqual(2, len(init_targ_map[i]))
i = FC_INITIATOR_WWPNS[1]
self.assertIn(FC_TARGET_WWPNS[2], init_targ_map[i])
self.assertIn(FC_TARGET_WWPNS[3], init_targ_map[i])
self.assertEqual(2, len(init_targ_map[i]))
self.assertEqual(2, len(init_targ_map))
def test_build_initiator_target_map_no_lookup_service(self):
"""Successfully build a map when zoning is disabled."""
expected_targ_wwns = FC_TARGET_WWPNS
expected_init_targ_map = {
CONNECTOR['wwpns'][0]: FC_TARGET_WWPNS,
CONNECTOR['wwpns'][1]: FC_TARGET_WWPNS
}
self.driver.lookup_service = None
targ_wwns, init_targ_map = self.driver._build_initiator_target_map(
CONNECTOR)
self.assertEqual(expected_targ_wwns, targ_wwns)
self.assertEqual(expected_init_targ_map, init_targ_map)
def test_is_initiator_connected_to_array(self):
"""Successfully finds an initiator with remaining active session."""
conf = {
'client.get_client_info.return_value': CLIENT_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.assertTrue(self.driver._is_initiator_connected_to_array(
CONNECTOR))
self.driver.common.vmem_mg.client.get_client_info.assert_called_with(
CONNECTOR['host'])
def test_is_initiator_connected_to_array_empty_response(self):
"""Successfully finds no initiators with remaining active sessions."""
conf = {
'client.get_client_info.return_value': CLIENT_INFO1
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.assertFalse(self.driver._is_initiator_connected_to_array(
CONNECTOR))
| apache-2.0 |
kaxel/tdsftp | vendor/ruby/1.9.1/gems/nokogiri-1.6.1/ext/nokogiri/tmp/x86_64-apple-darwin13.0.0/ports/libxml2/2.8.0/libxml2-2.8.0/python/tests/ctxterror.py | 87 | 1318 | #!/usr/bin/python -u
#
# This test exercise the redirection of error messages with a
# functions defined in Python.
#
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
expect="""--> (3) xmlns: URI foo is not absolute
--> (4) Opening and ending tag mismatch: x line 0 and y
"""
err=""
def callback(arg,msg,severity,reserved):
global err
err = err + "%s (%d) %s" % (arg,severity,msg)
s = """<x xmlns="foo"></y>"""
parserCtxt = libxml2.createPushParser(None,"",0,"test.xml")
parserCtxt.setErrorHandler(callback, "-->")
if parserCtxt.getErrorHandler() != (callback,"-->"):
print "getErrorHandler failed"
sys.exit(1)
parserCtxt.parseChunk(s,len(s),1)
doc = parserCtxt.doc()
doc.freeDoc()
parserCtxt = None
if err != expect:
print "error"
print "received %s" %(err)
print "expected %s" %(expect)
sys.exit(1)
i = 10000
while i > 0:
parserCtxt = libxml2.createPushParser(None,"",0,"test.xml")
parserCtxt.setErrorHandler(callback, "-->")
parserCtxt.parseChunk(s,len(s),1)
doc = parserCtxt.doc()
doc.freeDoc()
parserCtxt = None
err = ""
i = i - 1
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| apache-2.0 |
MungoRae/home-assistant | homeassistant/components/media_player/mpd.py | 1 | 8894 | """
Support to interact with a Music Player Daemon.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.mpd/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, PLATFORM_SCHEMA,
SUPPORT_PREVIOUS_TRACK, SUPPORT_STOP,
SUPPORT_VOLUME_SET, SUPPORT_PLAY_MEDIA, SUPPORT_PLAY, MEDIA_TYPE_PLAYLIST,
SUPPORT_SELECT_SOURCE, SUPPORT_CLEAR_PLAYLIST, SUPPORT_SHUFFLE_SET,
SUPPORT_SEEK, MediaPlayerDevice)
from homeassistant.const import (
STATE_OFF, STATE_ON, STATE_PAUSED, STATE_PLAYING,
CONF_PORT, CONF_PASSWORD, CONF_HOST, CONF_NAME)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
REQUIREMENTS = ['python-mpd2==0.5.5']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'MPD'
DEFAULT_PORT = 6600
PLAYLIST_UPDATE_INTERVAL = timedelta(seconds=120)
SUPPORT_MPD = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_PLAY_MEDIA | SUPPORT_PLAY | SUPPORT_SELECT_SOURCE | \
SUPPORT_CLEAR_PLAYLIST | SUPPORT_SHUFFLE_SET | SUPPORT_SEEK | \
SUPPORT_STOP
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the MPD platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
password = config.get(CONF_PASSWORD)
device = MpdDevice(host, port, password, name)
add_devices([device], True)
class MpdDevice(MediaPlayerDevice):
"""Representation of a MPD server."""
# pylint: disable=no-member
def __init__(self, server, port, password, name):
"""Initialize the MPD device."""
import mpd
self.server = server
self.port = port
self._name = name
self.password = password
self._status = None
self._currentsong = None
self._playlists = []
self._currentplaylist = None
self._is_connected = False
# set up MPD client
self._client = mpd.MPDClient()
self._client.timeout = 5
self._client.idletimeout = None
if password is not None:
self._client.password(password)
def _connect(self):
"""Connect to MPD."""
import mpd
try:
self._client.connect(self.server, self.port)
except mpd.ConnectionError:
return
self._is_connected = True
def _disconnect(self):
"""Disconnect from MPD."""
import mpd
try:
self._client.disconnect()
except mpd.ConnectionError:
pass
self._is_connected = False
self._status = None
def _fetch_status(self):
"""Fetch status from MPD."""
self._status = self._client.status()
self._currentsong = self._client.currentsong()
self._update_playlists()
@property
def available(self):
"""True if MPD is available and connected."""
return self._is_connected
def update(self):
"""Get the latest data and update the state."""
import mpd
try:
if not self._is_connected:
self._connect()
self._fetch_status()
except (mpd.ConnectionError, OSError, BrokenPipeError, ValueError):
# Cleanly disconnect in case connection is not in valid state
self._disconnect()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the media state."""
if self._status is None:
return STATE_OFF
elif self._status['state'] == 'play':
return STATE_PLAYING
elif self._status['state'] == 'pause':
return STATE_PAUSED
return STATE_ON
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return self._currentsong.get('file')
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
# Time does not exist for streams
return self._currentsong.get('time')
@property
def media_title(self):
"""Return the title of current playing media."""
name = self._currentsong.get('name', None)
title = self._currentsong.get('title', None)
if name is None and title is None:
return "None"
elif name is None:
return title
elif title is None:
return name
return '{}: {}'.format(name, title)
@property
def media_artist(self):
"""Return the artist of current playing media (Music track only)."""
return self._currentsong.get('artist')
@property
def media_album_name(self):
"""Return the album of current playing media (Music track only)."""
return self._currentsong.get('album')
@property
def volume_level(self):
"""Return the volume level."""
return int(self._status['volume'])/100
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_MPD
@property
def source(self):
"""Name of the current input source."""
return self._currentplaylist
@property
def source_list(self):
"""Return the list of available input sources."""
return self._playlists
def select_source(self, source):
"""Choose a different available playlist and play it."""
self.play_media(MEDIA_TYPE_PLAYLIST, source)
@Throttle(PLAYLIST_UPDATE_INTERVAL)
def _update_playlists(self, **kwargs):
"""Update available MPD playlists."""
self._playlists = []
for playlist_data in self._client.listplaylists():
self._playlists.append(playlist_data['playlist'])
def set_volume_level(self, volume):
"""Set volume of media player."""
self._client.setvol(int(volume * 100))
def volume_up(self):
"""Service to send the MPD the command for volume up."""
current_volume = int(self._status['volume'])
if current_volume <= 100:
self._client.setvol(current_volume + 5)
def volume_down(self):
"""Service to send the MPD the command for volume down."""
current_volume = int(self._status['volume'])
if current_volume >= 0:
self._client.setvol(current_volume - 5)
def media_play(self):
"""Service to send the MPD the command for play/pause."""
self._client.pause(0)
def media_pause(self):
"""Service to send the MPD the command for play/pause."""
self._client.pause(1)
def media_stop(self):
"""Service to send the MPD the command for stop."""
self._client.stop()
def media_next_track(self):
"""Service to send the MPD the command for next track."""
self._client.next()
def media_previous_track(self):
"""Service to send the MPD the command for previous track."""
self._client.previous()
def play_media(self, media_type, media_id, **kwargs):
"""Send the media player the command for playing a playlist."""
_LOGGER.debug(str.format("Playing playlist: {0}", media_id))
if media_type == MEDIA_TYPE_PLAYLIST:
if media_id in self._playlists:
self._currentplaylist = media_id
else:
self._currentplaylist = None
_LOGGER.warning(str.format("Unknown playlist name %s.",
media_id))
self._client.clear()
self._client.load(media_id)
self._client.play()
else:
self._client.clear()
self._client.add(media_id)
self._client.play()
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return bool(self._status['random'])
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
self._client.random(int(shuffle))
def clear_playlist(self):
"""Clear players playlist."""
self._client.clear()
def media_seek(self, position):
"""Send seek command."""
self._client.seekcur(position)
| apache-2.0 |
40223103/w16b_test | static/Brython3.1.3-20150514-095342/Lib/unittest/result.py | 727 | 6397 | """Test result object"""
import io
import sys
import traceback
from . import util
from functools import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(object):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def printErrors(self):
"Called by TestRunner after test run"
#fixme brython
pass
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
def _setupStdout(self):
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = io.StringIO()
self._stdout_buffer = io.StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
self._restoreStdout()
self._mirrorOutput = False
def _restoreStdout(self):
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
#fix me brython
#return '__unittest' in tb.tb_frame.f_globals
return True #for now, lets just return False
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return ("<%s run=%i errors=%i failures=%i>" %
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures)))
| agpl-3.0 |
kvaps/vdsm | vdsm_hooks/ipv6/ipv6.py | 4 | 1669 | #!/usr/bin/env python
# Copyright 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import hooking
import traceback
def main():
"""Forward IPv6 configuration from the network 'custom' properties
to VDSM API."""
setup_nets_config = hooking.read_json()
for network, attrs in setup_nets_config['request']['networks'].items():
if 'remove' in attrs:
continue
elif 'custom' in attrs:
_process_network(attrs)
hooking.write_json(setup_nets_config)
def _process_network(attrs):
for property_name in ('ipv6addr', 'ipv6gateway', 'ipv6autoconf', 'dhcpv6'):
value = attrs['custom'].get(property_name)
if value is not None:
attrs[property_name] = value
if __name__ == '__main__':
try:
main()
except:
hooking.exit_hook('ipv6 hook: [unexpected error]: %s\n' %
traceback.format_exc())
| gpl-2.0 |
vrsource/mapproxy | mapproxy/service/wms.py | 2 | 32285 | # This file is part of the MapProxy project.
# Copyright (C) 2010-2014 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
WMS service handler
"""
from mapproxy.compat import iteritems
from mapproxy.compat.itertools import chain
from functools import partial
from mapproxy.cache.tile import CacheInfo
from mapproxy.request.wms import (wms_request, WMS111LegendGraphicRequest,
mimetype_from_infotype, infotype_from_mimetype, switch_bbox_epsg_axis_order)
from mapproxy.srs import SRS, TransformationError
from mapproxy.service.base import Server
from mapproxy.response import Response
from mapproxy.source import SourceError
from mapproxy.exception import RequestError
from mapproxy.image import bbox_position_in_image, SubImageSource, BlankImageSource
from mapproxy.image.merge import concat_legends, LayerMerger
from mapproxy.image.opts import ImageOptions
from mapproxy.image.message import attribution_image, message_image
from mapproxy.layer import BlankImage, MapQuery, InfoQuery, LegendQuery, MapError, LimitedLayer
from mapproxy.layer import MapBBOXError, merge_layer_extents, merge_layer_res_ranges
from mapproxy.util import async
from mapproxy.util.py import cached_property, reraise
from mapproxy.util.coverage import load_limited_to
from mapproxy.util.ext.odict import odict
from mapproxy.template import template_loader, bunch, recursive_bunch
from mapproxy.service import template_helper
from mapproxy.layer import DefaultMapExtent, MapExtent
get_template = template_loader(__name__, 'templates', namespace=template_helper.__dict__)
class PERMIT_ALL_LAYERS(object):
pass
class WMSServer(Server):
service = 'wms'
fi_transformers = None
def __init__(self, root_layer, md, srs, image_formats,
request_parser=None, tile_layers=None, attribution=None,
info_types=None, strict=False, on_error='raise',
concurrent_layer_renderer=1, max_output_pixels=None,
srs_extents=None, max_tile_age=None,
versions=None,
inspire_md=None,
):
Server.__init__(self)
self.request_parser = request_parser or partial(wms_request, strict=strict, versions=versions)
self.root_layer = root_layer
self.layers = root_layer.child_layers()
self.tile_layers = tile_layers or {}
self.strict = strict
self.attribution = attribution
self.md = md
self.on_error = on_error
self.concurrent_layer_renderer = concurrent_layer_renderer
self.image_formats = image_formats
self.info_types = info_types
self.srs = srs
self.srs_extents = srs_extents
self.max_output_pixels = max_output_pixels
self.max_tile_age = max_tile_age
self.inspire_md = inspire_md
def map(self, map_request):
self.check_map_request(map_request)
params = map_request.params
query = MapQuery(params.bbox, params.size, SRS(params.srs), params.format)
if map_request.params.get('tiled', 'false').lower() == 'true':
query.tiled_only = True
orig_query = query
if self.srs_extents and params.srs in self.srs_extents:
# limit query to srs_extent if query is larger
query_extent = MapExtent(params.bbox, SRS(params.srs))
if not self.srs_extents[params.srs].contains(query_extent):
limited_extent = self.srs_extents[params.srs].intersection(query_extent)
if not limited_extent:
img_opts = self.image_formats[params.format_mime_type].copy()
img_opts.bgcolor = params.bgcolor
img_opts.transparent = params.transparent
img = BlankImageSource(size=params.size, image_opts=img_opts, cacheable=True)
return Response(img.as_buffer(), content_type=img_opts.format.mime_type)
sub_size, offset, sub_bbox = bbox_position_in_image(params.bbox, params.size, limited_extent.bbox)
query = MapQuery(sub_bbox, sub_size, SRS(params.srs), params.format)
actual_layers = odict()
for layer_name in map_request.params.layers:
layer = self.layers[layer_name]
# only add if layer renders the query
if layer.renders_query(query):
# if layer is not transparent and will be rendered,
# remove already added (then hidden) layers
if layer.is_opaque(query):
actual_layers = odict()
for layer_name, map_layers in layer.map_layers_for_query(query):
actual_layers[layer_name] = map_layers
authorized_layers, coverage = self.authorized_layers('map', actual_layers.keys(),
map_request.http.environ, query_extent=(query.srs.srs_code, query.bbox))
self.filter_actual_layers(actual_layers, map_request.params.layers, authorized_layers)
render_layers = []
for layers in actual_layers.values():
render_layers.extend(layers)
self.update_query_with_fwd_params(query, params=params,
layers=render_layers)
raise_source_errors = True if self.on_error == 'raise' else False
renderer = LayerRenderer(render_layers, query, map_request,
raise_source_errors=raise_source_errors,
concurrent_rendering=self.concurrent_layer_renderer)
merger = LayerMerger()
renderer.render(merger)
if self.attribution and self.attribution.get('text') and not query.tiled_only:
merger.add(attribution_image(self.attribution['text'], query.size))
img_opts = self.image_formats[params.format_mime_type].copy()
img_opts.bgcolor = params.bgcolor
img_opts.transparent = params.transparent
result = merger.merge(size=query.size, image_opts=img_opts,
bbox=query.bbox, bbox_srs=params.srs, coverage=coverage)
if query != orig_query:
result = SubImageSource(result, size=orig_query.size, offset=offset, image_opts=img_opts)
# Provide the wrapping WSGI app or filter the opportunity to process the
# image before it's wrapped up in a response
result = self.decorate_img(result, 'wms.map', actual_layers.keys(),
map_request.http.environ, (query.srs.srs_code, query.bbox))
try:
result_buf = result.as_buffer(img_opts)
except IOError as ex:
raise RequestError('error while processing image file: %s' % ex,
request=map_request)
resp = Response(result_buf, content_type=img_opts.format.mime_type)
if query.tiled_only and isinstance(result.cacheable, CacheInfo):
cache_info = result.cacheable
resp.cache_headers(cache_info.timestamp, etag_data=(cache_info.timestamp, cache_info.size),
max_age=self.max_tile_age)
resp.make_conditional(map_request.http)
if not result.cacheable:
resp.cache_headers(no_cache=True)
return resp
def capabilities(self, map_request):
# TODO: debug layer
# if '__debug__' in map_request.params:
# layers = self.layers.values()
# else:
# layers = [layer for name, layer in iteritems(self.layers)
# if name != '__debug__']
if map_request.params.get('tiled', 'false').lower() == 'true':
tile_layers = self.tile_layers.values()
else:
tile_layers = []
service = self._service_md(map_request)
root_layer = self.authorized_capability_layers(map_request.http.environ)
info_types = ['text', 'html', 'xml'] # defaults
if self.info_types:
info_types = self.info_types
elif self.fi_transformers:
info_types = self.fi_transformers.keys()
info_formats = [mimetype_from_infotype(map_request.version, info_type) for info_type in info_types]
result = Capabilities(service, root_layer, tile_layers,
self.image_formats, info_formats, srs=self.srs, srs_extents=self.srs_extents,
inspire_md=self.inspire_md,
).render(map_request)
return Response(result, mimetype=map_request.mime_type)
def featureinfo(self, request):
infos = []
self.check_featureinfo_request(request)
p = request.params
query = InfoQuery(p.bbox, p.size, SRS(p.srs), p.pos,
p['info_format'], format=request.params.format or None,
feature_count=p.get('feature_count'))
actual_layers = odict()
for layer_name in request.params.query_layers:
layer = self.layers[layer_name]
if not layer.queryable:
raise RequestError('layer %s is not queryable' % layer_name, request=request)
for layer_name, info_layers in layer.info_layers_for_query(query):
actual_layers[layer_name] = info_layers
authorized_layers, coverage = self.authorized_layers('featureinfo', actual_layers.keys(),
request.http.environ, query_extent=(query.srs.srs_code, query.bbox))
self.filter_actual_layers(actual_layers, request.params.layers, authorized_layers)
# outside of auth-coverage
if coverage and not coverage.contains(query.coord, query.srs):
infos = []
else:
info_layers = []
for layers in actual_layers.values():
info_layers.extend(layers)
for layer in info_layers:
info = layer.get_info(query)
if info is None:
continue
infos.append(info)
mimetype = None
if 'info_format' in request.params:
mimetype = request.params.info_format
if not infos:
return Response('', mimetype=mimetype)
if self.fi_transformers:
doc = infos[0].combine(infos)
if doc.info_type == 'text':
resp = doc.as_string()
mimetype = 'text/plain'
else:
if not mimetype:
if 'xml' in self.fi_transformers:
info_type = 'xml'
elif 'html' in self.fi_transformers:
info_type = 'html'
else:
info_type = 'text'
mimetype = mimetype_from_infotype(request.version, info_type)
else:
info_type = infotype_from_mimetype(request.version, mimetype)
resp = self.fi_transformers[info_type](doc).as_string()
else:
mimetype = mimetype_from_infotype(request.version, infos[0].info_type)
if len(infos) > 1:
resp = infos[0].combine(infos).as_string()
else:
resp = infos[0].as_string()
return Response(resp, mimetype=mimetype)
def check_map_request(self, request):
if self.max_output_pixels and \
(request.params.size[0] * request.params.size[1]) > self.max_output_pixels:
request.prevent_image_exception = True
raise RequestError("image size too large", request=request)
self.validate_layers(request)
request.validate_format(self.image_formats)
request.validate_srs(self.srs)
def update_query_with_fwd_params(self, query, params, layers):
# forward relevant request params into MapQuery.dimensions
for layer in layers:
if not hasattr(layer, 'fwd_req_params'):
continue
for p in layer.fwd_req_params:
if p in params:
query.dimensions[p] = params[p]
def check_featureinfo_request(self, request):
self.validate_layers(request)
request.validate_srs(self.srs)
def validate_layers(self, request):
query_layers = request.params.query_layers if hasattr(request, 'query_layers') else []
for layer in chain(request.params.layers, query_layers):
if layer not in self.layers:
raise RequestError('unknown layer: ' + str(layer), code='LayerNotDefined',
request=request)
def check_legend_request(self, request):
if request.params.layer not in self.layers:
raise RequestError('unknown layer: ' + request.params.layer,
code='LayerNotDefined', request=request)
#TODO: If layer not in self.layers raise RequestError
def legendgraphic(self, request):
legends = []
self.check_legend_request(request)
layer = request.params.layer
if not self.layers[layer].has_legend:
raise RequestError('layer %s has no legend graphic' % layer, request=request)
legend = self.layers[layer].legend(request)
[legends.append(i) for i in legend if i is not None]
result = concat_legends(legends)
if 'format' in request.params:
mimetype = request.params.format_mime_type
else:
mimetype = 'image/png'
img_opts = self.image_formats[request.params.format_mime_type]
return Response(result.as_buffer(img_opts), mimetype=mimetype)
def _service_md(self, map_request):
md = dict(self.md)
md['url'] = map_request.url
md['has_legend'] = self.root_layer.has_legend
return md
def authorized_layers(self, feature, layers, env, query_extent):
if 'mapproxy.authorize' in env:
result = env['mapproxy.authorize']('wms.' + feature, layers[:],
environ=env, query_extent=query_extent)
if result['authorized'] == 'unauthenticated':
raise RequestError('unauthorized', status=401)
if result['authorized'] == 'full':
return PERMIT_ALL_LAYERS, None
layers = {}
if result['authorized'] == 'partial':
for layer_name, permissions in iteritems(result['layers']):
if permissions.get(feature, False) == True:
layers[layer_name] = permissions.get('limited_to')
limited_to = result.get('limited_to')
if limited_to:
coverage = load_limited_to(limited_to)
else:
coverage = None
return layers, coverage
else:
return PERMIT_ALL_LAYERS, None
def filter_actual_layers(self, actual_layers, requested_layers, authorized_layers):
if authorized_layers is not PERMIT_ALL_LAYERS:
requested_layer_names = set(requested_layers)
for layer_name in actual_layers.keys():
if layer_name not in authorized_layers:
# check whether layer was requested explicit...
if layer_name in requested_layer_names:
raise RequestError('forbidden', status=403)
# or implicit (part of group layer)
else:
del actual_layers[layer_name]
elif authorized_layers[layer_name] is not None:
limited_to = load_limited_to(authorized_layers[layer_name])
actual_layers[layer_name] = [LimitedLayer(lyr, limited_to) for lyr in actual_layers[layer_name]]
def authorized_capability_layers(self, env):
if 'mapproxy.authorize' in env:
result = env['mapproxy.authorize']('wms.capabilities', self.layers.keys(), environ=env)
if result['authorized'] == 'unauthenticated':
raise RequestError('unauthorized', status=401)
if result['authorized'] == 'full':
return self.root_layer
if result['authorized'] == 'partial':
limited_to = result.get('limited_to')
if limited_to:
coverage = load_limited_to(limited_to)
else:
coverage = None
return FilteredRootLayer(self.root_layer, result['layers'], coverage=coverage)
raise RequestError('forbidden', status=403)
else:
return self.root_layer
class FilteredRootLayer(object):
def __init__(self, root_layer, permissions, coverage=None):
self.root_layer = root_layer
self.permissions = permissions
self.coverage = coverage
def __getattr__(self, name):
return getattr(self.root_layer, name)
@cached_property
def extent(self):
layer_name = self.root_layer.name
limited_to = self.permissions.get(layer_name, {}).get('limited_to')
extent = self.root_layer.extent
if limited_to:
coverage = load_limited_to(limited_to)
limited_coverage = coverage.intersection(extent.bbox, extent.srs)
extent = limited_coverage.extent
if self.coverage:
limited_coverage = self.coverage.intersection(extent.bbox, extent.srs)
extent = limited_coverage.extent
return extent
@property
def queryable(self):
if not self.root_layer.queryable: return False
layer_name = self.root_layer.name
if not layer_name or self.permissions.get(layer_name, {}).get('featureinfo', False):
return True
return False
def layer_permitted(self, layer):
if not self.permissions.get(layer.name, {}).get('map', False):
return False
extent = layer.extent
limited_to = self.permissions.get(layer.name, {}).get('limited_to')
if limited_to:
coverage = load_limited_to(limited_to)
if not coverage.intersects(extent.bbox, extent.srs):
return False
if self.coverage:
if not self.coverage.intersects(extent.bbox, extent.srs):
return False
return True
@cached_property
def layers(self):
layers = []
for layer in self.root_layer.layers:
if not layer.name or self.layer_permitted(layer):
filtered_layer = FilteredRootLayer(layer, self.permissions, self.coverage)
if filtered_layer.is_active or filtered_layer.layers:
# add filtered_layer only if it is active (no grouping layer)
# or if it contains other active layers
layers.append(filtered_layer)
return layers
DEFAULT_EXTENTS = {
'EPSG:3857': DefaultMapExtent(),
'EPSG:4326': DefaultMapExtent(),
'EPSG:900913': DefaultMapExtent(),
}
def limit_srs_extents(srs_extents, supported_srs):
"""
Limit srs_extents to supported_srs.
"""
if srs_extents:
srs_extents = srs_extents.copy()
else:
srs_extents = DEFAULT_EXTENTS.copy()
for srs in list(srs_extents.keys()):
if srs not in supported_srs:
srs_extents.pop(srs)
return srs_extents
class Capabilities(object):
"""
Renders WMS capabilities documents.
"""
def __init__(self, server_md, layers, tile_layers, image_formats, info_formats,
srs, srs_extents=None, epsg_axis_order=False,
inspire_md=None,
):
self.service = server_md
self.layers = layers
self.tile_layers = tile_layers
self.image_formats = image_formats
self.info_formats = info_formats
self.srs = srs
self.srs_extents = limit_srs_extents(srs_extents, srs)
self.inspire_md = inspire_md
def layer_srs_bbox(self, layer, epsg_axis_order=False):
for srs, extent in iteritems(self.srs_extents):
# is_default is True when no explicit bbox is defined for this srs
# use layer extent
if extent.is_default:
bbox = layer.extent.bbox_for(SRS(srs))
elif layer.extent.is_default:
bbox = extent.bbox_for(SRS(srs))
else:
# Use intersection of srs_extent and layer.extent.
bbox = extent.intersection(layer.extent).bbox_for(SRS(srs))
if epsg_axis_order:
bbox = switch_bbox_epsg_axis_order(bbox, srs)
if srs in self.srs:
yield srs, bbox
# add native srs
layer_srs_code = layer.extent.srs.srs_code
if layer_srs_code not in self.srs_extents:
bbox = layer.extent.bbox
if epsg_axis_order:
bbox = switch_bbox_epsg_axis_order(bbox, layer_srs_code)
if layer_srs_code in self.srs:
yield layer_srs_code, bbox
def layer_llbbox(self, layer):
if 'EPSG:4326' in self.srs_extents:
llbbox = self.srs_extents['EPSG:4326'].intersection(layer.extent).llbbox
return limit_llbbox(llbbox)
return limit_llbbox(layer.extent.llbbox)
def render(self, _map_request):
return self._render_template(_map_request.capabilities_template)
def _render_template(self, template):
template = get_template(template)
inspire_md = None
if self.inspire_md:
inspire_md = recursive_bunch(default='', **self.inspire_md)
doc = template.substitute(service=bunch(default='', **self.service),
layers=self.layers,
formats=self.image_formats,
info_formats=self.info_formats,
srs=self.srs,
tile_layers=self.tile_layers,
layer_srs_bbox=self.layer_srs_bbox,
layer_llbbox=self.layer_llbbox,
inspire_md=inspire_md,
)
# strip blank lines
doc = '\n'.join(l for l in doc.split('\n') if l.rstrip())
return doc
def limit_llbbox(bbox):
"""
Limit the long/lat bounding box to +-180/89.99999999 degrees.
Some clients can't handle +-90 north/south, so we subtract a tiny bit.
>>> ', '.join('%.6f' % x for x in limit_llbbox((-200,-90.0, 180, 90)))
'-180.000000, -89.999999, 180.000000, 89.999999'
>>> ', '.join('%.6f' % x for x in limit_llbbox((-20,-9.0, 10, 10)))
'-20.000000, -9.000000, 10.000000, 10.000000'
"""
minx, miny, maxx, maxy = bbox
minx = max(-180, minx)
miny = max(-89.999999, miny)
maxx = min(180, maxx)
maxy = min(89.999999, maxy)
return minx, miny, maxx, maxy
class LayerRenderer(object):
def __init__(self, layers, query, request, raise_source_errors=True,
concurrent_rendering=1):
self.layers = layers
self.query = query
self.request = request
self.raise_source_errors = raise_source_errors
self.concurrent_rendering = concurrent_rendering
def render(self, layer_merger):
render_layers = combined_layers(self.layers, self.query)
if not render_layers: return
async_pool = async.Pool(size=min(len(render_layers), self.concurrent_rendering))
if self.raise_source_errors:
return self._render_raise_exceptions(async_pool, render_layers, layer_merger)
else:
return self._render_capture_source_errors(async_pool, render_layers,
layer_merger)
def _render_raise_exceptions(self, async_pool, render_layers, layer_merger):
# call _render_layer, raise all exceptions
try:
for layer_task in async_pool.imap(self._render_layer, render_layers,
use_result_objects=True):
if layer_task.exception is None:
layer, layer_img = layer_task.result
if layer_img is not None:
layer_merger.add(layer_img, layer.coverage)
else:
ex = layer_task.exception
async_pool.shutdown(True)
reraise(ex)
except SourceError as ex:
raise RequestError(ex.args[0], request=self.request)
def _render_capture_source_errors(self, async_pool, render_layers, layer_merger):
# call _render_layer, capture SourceError exceptions
errors = []
rendered = 0
for layer_task in async_pool.imap(self._render_layer, render_layers,
use_result_objects=True):
if layer_task.exception is None:
layer, layer_img = layer_task.result
if layer_img is not None:
layer_merger.add(layer_img, layer.coverage)
rendered += 1
else:
layer_merger.cacheable = False
ex = layer_task.exception
if isinstance(ex[1], SourceError):
errors.append(ex[1].args[0])
else:
async_pool.shutdown(True)
reraise(ex)
if render_layers and not rendered:
errors = '\n'.join(errors)
raise RequestError('Could not get any sources:\n'+errors, request=self.request)
if errors:
layer_merger.add(message_image('\n'.join(errors), self.query.size,
image_opts=ImageOptions(transparent=True)))
def _render_layer(self, layer):
try:
layer_img = layer.get_map(self.query)
if layer_img is not None:
layer_img.opacity = layer.opacity
return layer, layer_img
except SourceError:
raise
except MapBBOXError:
raise RequestError('Request too large or invalid BBOX.', request=self.request)
except MapError as e:
raise RequestError('Invalid request: %s' % e.args[0], request=self.request)
except TransformationError:
raise RequestError('Could not transform BBOX: Invalid result.',
request=self.request)
except BlankImage:
return layer, None
class WMSLayerBase(object):
"""
Base class for WMS layer (layer groups and leaf layers).
"""
"True if layer is an actual layer (not a group only)"
is_active = True
"list of sublayers"
layers = []
"metadata dictionary with tile, name, etc."
md = {}
"True if .info() is supported"
queryable = False
"True is .legend() is supported"
has_legend = False
legend_url = None
legend_size = None
"resolution range (i.e. ScaleHint) of the layer"
res_range = None
"MapExtend of the layer"
extent = None
def map_layers_for_query(self, query):
raise NotImplementedError()
def legend(self, query):
raise NotImplementedError()
def info(self, query):
raise NotImplementedError()
class WMSLayer(WMSLayerBase):
"""
Class for WMS layers.
Combines map, info and legend sources with metadata.
"""
is_active = True
layers = []
def __init__(self, name, title, map_layers, info_layers=[], legend_layers=[],
res_range=None, md=None):
self.name = name
self.title = title
self.md = md or {}
self.map_layers = map_layers
self.info_layers = info_layers
self.legend_layers = legend_layers
self.extent = merge_layer_extents(map_layers)
if res_range is None:
res_range = merge_layer_res_ranges(map_layers)
self.res_range = res_range
self.queryable = True if info_layers else False
self.has_legend = True if legend_layers else False
def is_opaque(self, query):
return any(l.is_opaque(query) for l in self.map_layers)
def renders_query(self, query):
if self.res_range and not self.res_range.contains(query.bbox, query.size, query.srs):
return False
return True
def map_layers_for_query(self, query):
if not self.map_layers:
return []
return [(self.name, self.map_layers)]
def info_layers_for_query(self, query):
if not self.info_layers:
return []
return [(self.name, self.info_layers)]
def legend(self, request):
p = request.params
query = LegendQuery(p.format, p.scale)
for lyr in self.legend_layers:
yield lyr.get_legend(query)
@property
def legend_size(self):
width = 0
height = 0
for layer in self.legend_layers:
width = max(layer.size[0], width)
height += layer.size[1]
return (width, height)
@property
def legend_url(self):
if self.has_legend:
req = WMS111LegendGraphicRequest(url='?',
param=dict(format='image/png', layer=self.name, sld_version='1.1.0'))
return req.complete_url
else:
return None
def child_layers(self):
return {self.name: self}
class WMSGroupLayer(WMSLayerBase):
"""
Class for WMS group layers.
Groups multiple wms layers, but can also contain a single layer (``this``)
that represents this layer.
"""
def __init__(self, name, title, this, layers, md=None):
self.name = name
self.title = title
self.this = this
self.md = md or {}
self.is_active = True if this is not None else False
self.layers = layers
self.has_legend = True if this and this.has_legend or any(l.has_legend for l in layers) else False
self.queryable = True if this and this.queryable or any(l.queryable for l in layers) else False
all_layers = layers + ([self.this] if self.this else [])
self.extent = merge_layer_extents(all_layers)
self.res_range = merge_layer_res_ranges(all_layers)
def is_opaque(self, query):
return any(l.is_opaque(query) for l in self.layers)
@property
def legend_size(self):
return self.this.legend_size
@property
def legend_url(self):
return self.this.legend_url
def renders_query(self, query):
if self.res_range and not self.res_range.contains(query.bbox, query.size, query.srs):
return False
return True
def map_layers_for_query(self, query):
if self.this:
return self.this.map_layers_for_query(query)
else:
layers = []
for layer in self.layers:
layers.extend(layer.map_layers_for_query(query))
return layers
def info_layers_for_query(self, query):
if self.this:
return self.this.info_layers_for_query(query)
else:
layers = []
for layer in self.layers:
layers.extend(layer.info_layers_for_query(query))
return layers
def child_layers(self):
layers = odict()
if self.name:
layers[self.name] = self
for lyr in self.layers:
if hasattr(lyr, 'child_layers'):
layers.update(lyr.child_layers())
elif lyr.name:
layers[lyr.name] = lyr
return layers
def combined_layers(layers, query):
"""
Returns a new list of the layers where all adjacent layers are combined
if possible.
"""
if len(layers) <= 1:
return layers
layers = layers[:]
combined_layers = [layers.pop(0)]
while layers:
current_layer = layers.pop(0)
combined = combined_layers[-1].combined_layer(current_layer, query)
if combined:
# change last layer with combined
combined_layers[-1] = combined
else:
combined_layers.append(current_layer)
return combined_layers
| apache-2.0 |
Ahmad31/Web_Flask_Cassandra | flask/lib/python2.7/site-packages/pip/_vendor/cachecontrol/controller.py | 327 | 13024 | """
The httplib2 algorithms ported for use with requests.
"""
import logging
import re
import calendar
import time
from email.utils import parsedate_tz
from pip._vendor.requests.structures import CaseInsensitiveDict
from .cache import DictCache
from .serialize import Serializer
logger = logging.getLogger(__name__)
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
class CacheController(object):
"""An interface to see if request should cached or not.
"""
def __init__(self, cache=None, cache_etags=True, serializer=None):
self.cache = cache or DictCache()
self.cache_etags = cache_etags
self.serializer = serializer or Serializer()
@classmethod
def _urlnorm(cls, uri):
"""Normalize the URL to create a safe key for the cache"""
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
scheme = scheme.lower()
authority = authority.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
defrag_uri = scheme + "://" + authority + request_uri
return defrag_uri
@classmethod
def cache_url(cls, uri):
return cls._urlnorm(uri)
def parse_cache_control(self, headers):
"""
Parse the cache control headers returning a dictionary with values
for the different directives.
"""
retval = {}
cc_header = 'cache-control'
if 'Cache-Control' in headers:
cc_header = 'Cache-Control'
if cc_header in headers:
parts = headers[cc_header].split(',')
parts_with_args = [
tuple([x.strip().lower() for x in part.split("=", 1)])
for part in parts if -1 != part.find("=")
]
parts_wo_args = [
(name.strip().lower(), 1)
for name in parts if -1 == name.find("=")
]
retval = dict(parts_with_args + parts_wo_args)
return retval
def cached_request(self, request):
"""
Return a cached response if it exists in the cache, otherwise
return False.
"""
cache_url = self.cache_url(request.url)
logger.debug('Looking up "%s" in the cache', cache_url)
cc = self.parse_cache_control(request.headers)
# Bail out if the request insists on fresh data
if 'no-cache' in cc:
logger.debug('Request header has "no-cache", cache bypassed')
return False
if 'max-age' in cc and cc['max-age'] == 0:
logger.debug('Request header has "max_age" as 0, cache bypassed')
return False
# Request allows serving from the cache, let's see if we find something
cache_data = self.cache.get(cache_url)
if cache_data is None:
logger.debug('No cache entry available')
return False
# Check whether it can be deserialized
resp = self.serializer.loads(request, cache_data)
if not resp:
logger.warning('Cache entry deserialization failed, entry ignored')
return False
# If we have a cached 301, return it immediately. We don't
# need to test our response for other headers b/c it is
# intrinsically "cacheable" as it is Permanent.
# See:
# https://tools.ietf.org/html/rfc7231#section-6.4.2
#
# Client can try to refresh the value by repeating the request
# with cache busting headers as usual (ie no-cache).
if resp.status == 301:
msg = ('Returning cached "301 Moved Permanently" response '
'(ignoring date and etag information)')
logger.debug(msg)
return resp
headers = CaseInsensitiveDict(resp.headers)
if not headers or 'date' not in headers:
if 'etag' not in headers:
# Without date or etag, the cached response can never be used
# and should be deleted.
logger.debug('Purging cached response: no date or etag')
self.cache.delete(cache_url)
logger.debug('Ignoring cached response: no date')
return False
now = time.time()
date = calendar.timegm(
parsedate_tz(headers['date'])
)
current_age = max(0, now - date)
logger.debug('Current age based on date: %i', current_age)
# TODO: There is an assumption that the result will be a
# urllib3 response object. This may not be best since we
# could probably avoid instantiating or constructing the
# response until we know we need it.
resp_cc = self.parse_cache_control(headers)
# determine freshness
freshness_lifetime = 0
# Check the max-age pragma in the cache control header
if 'max-age' in resp_cc and resp_cc['max-age'].isdigit():
freshness_lifetime = int(resp_cc['max-age'])
logger.debug('Freshness lifetime from max-age: %i',
freshness_lifetime)
# If there isn't a max-age, check for an expires header
elif 'expires' in headers:
expires = parsedate_tz(headers['expires'])
if expires is not None:
expire_time = calendar.timegm(expires) - date
freshness_lifetime = max(0, expire_time)
logger.debug("Freshness lifetime from expires: %i",
freshness_lifetime)
# Determine if we are setting freshness limit in the
# request. Note, this overrides what was in the response.
if 'max-age' in cc:
try:
freshness_lifetime = int(cc['max-age'])
logger.debug('Freshness lifetime from request max-age: %i',
freshness_lifetime)
except ValueError:
freshness_lifetime = 0
if 'min-fresh' in cc:
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
# adjust our current age by our min fresh
current_age += min_fresh
logger.debug('Adjusted current age from min-fresh: %i',
current_age)
# Return entry if it is fresh enough
if freshness_lifetime > current_age:
logger.debug('The response is "fresh", returning cached response')
logger.debug('%i > %i', freshness_lifetime, current_age)
return resp
# we're not fresh. If we don't have an Etag, clear it out
if 'etag' not in headers:
logger.debug(
'The cached response is "stale" with no etag, purging'
)
self.cache.delete(cache_url)
# return the original handler
return False
def conditional_headers(self, request):
cache_url = self.cache_url(request.url)
resp = self.serializer.loads(request, self.cache.get(cache_url))
new_headers = {}
if resp:
headers = CaseInsensitiveDict(resp.headers)
if 'etag' in headers:
new_headers['If-None-Match'] = headers['ETag']
if 'last-modified' in headers:
new_headers['If-Modified-Since'] = headers['Last-Modified']
return new_headers
def cache_response(self, request, response, body=None):
"""
Algorithm for caching requests.
This assumes a requests Response object.
"""
# From httplib2: Don't cache 206's since we aren't going to
# handle byte range requests
cacheable_status_codes = [200, 203, 300, 301]
if response.status not in cacheable_status_codes:
logger.debug(
'Status code %s not in %s',
response.status,
cacheable_status_codes
)
return
response_headers = CaseInsensitiveDict(response.headers)
# If we've been given a body, our response has a Content-Length, that
# Content-Length is valid then we can check to see if the body we've
# been given matches the expected size, and if it doesn't we'll just
# skip trying to cache it.
if (body is not None and
"content-length" in response_headers and
response_headers["content-length"].isdigit() and
int(response_headers["content-length"]) != len(body)):
return
cc_req = self.parse_cache_control(request.headers)
cc = self.parse_cache_control(response_headers)
cache_url = self.cache_url(request.url)
logger.debug('Updating cache with response from "%s"', cache_url)
# Delete it from the cache if we happen to have it stored there
no_store = False
if cc.get('no-store'):
no_store = True
logger.debug('Response header has "no-store"')
if cc_req.get('no-store'):
no_store = True
logger.debug('Request header has "no-store"')
if no_store and self.cache.get(cache_url):
logger.debug('Purging existing cache entry to honor "no-store"')
self.cache.delete(cache_url)
# If we've been given an etag, then keep the response
if self.cache_etags and 'etag' in response_headers:
logger.debug('Caching due to etag')
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
# Add to the cache any 301s. We do this before looking that
# the Date headers.
elif response.status == 301:
logger.debug('Caching permanant redirect')
self.cache.set(
cache_url,
self.serializer.dumps(request, response)
)
# Add to the cache if the response headers demand it. If there
# is no date header then we can't do anything about expiring
# the cache.
elif 'date' in response_headers:
# cache when there is a max-age > 0
if cc and cc.get('max-age'):
if cc['max-age'].isdigit() and int(cc['max-age']) > 0:
logger.debug('Caching b/c date exists and max-age > 0')
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
# If the request can expire, it means we should cache it
# in the meantime.
elif 'expires' in response_headers:
if response_headers['expires']:
logger.debug('Caching b/c of expires header')
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
def update_cached_response(self, request, response):
"""On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we've sent an ETag and
gotten a 304 as the response.
"""
cache_url = self.cache_url(request.url)
cached_response = self.serializer.loads(
request,
self.cache.get(cache_url)
)
if not cached_response:
# we didn't have a cached response
return response
# Lets update our headers with the headers from the new request:
# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
#
# The server isn't supposed to send headers that would make
# the cached body invalid. But... just in case, we'll be sure
# to strip out ones we know that might be problmatic due to
# typical assumptions.
excluded_headers = [
"content-length",
]
cached_response.headers.update(
dict((k, v) for k, v in response.headers.items()
if k.lower() not in excluded_headers)
)
# we want a 200 b/c we have content via the cache
cached_response.status = 200
# update our cache
self.cache.set(
cache_url,
self.serializer.dumps(request, cached_response),
)
return cached_response
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.